keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | john-drago/fluoro | code/datacomp/coord_change.py | .py | 19,493 | 577 | '''
This script is meant to accomplish four things, all about transforming between coordinate systems.
1) Transform from local coordinate system to global coordinate system.
- func: Local2Global_Coord
2) Transform from globabl coordinate system to local coordinate system.
- func: Global2Local_Coord
3) If given given a basis of unit vectors, which can describe coordinate change --> be able to produce the three angular rotations (Tait-Bryan angles): phi, theta, psi
- func: Basis2Angles
4) If given three Tait-Bryan angles: be able to produce the "rotation matrix", comprising the basis matrix for rotation between two coordinate systems.
- func: Angles2Basis
For reference: https://www.youtube.com/watch?v=meibWcbGqt4
'''
import autograd.numpy as np
from autograd import grad
def Global2Local_Coord(rot_mat, trans_vector, points_in_global):
'''
func Global2Local_Coord(rot_mat, trans_vector, points_in_global)
- Takes "rotation matrix", whereby the columns form an orthonormal basis, describing the axes of the new coordinate system in terms of the global coordinate system: Should be of form 3x3. Matrix should be square and invertible.
[ e_1 e_2 e_3 ]
- Takes translation vector of size 3, which describes translation from global origin to new local origin (global origin ----> local origin).
- Takes points defined in the global coordinate frame.
- Returns positions (which were originally defined in the global coordinate frame) in new local coordinate frame.
'''
if rot_mat.shape[0] != rot_mat.shape[1]:
raise ValueError('Rotation Matrix should be square')
# elif trans_vector.shape != (3,) and trans_vector.shape != (1, 3):
# raise ValueError('Translation Matrix should be an array of size 3 or 1x3 matrix')
translated_points = points_in_global - trans_vector
points_in_local = np.transpose(np.matmul(np.linalg.inv(rot_mat), np.transpose(translated_points)))
return points_in_local
def Local2Global_Coord(rot_mat, trans_vector, points_in_local):
'''
function Local2Global_Coord(rot_mat, trans_vector, points_in_local)
- Takes "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. The matrix should be 3x3 and be invertible.
[ e_1 e_2 e_3 ]
- Takes translation vector of size 3, which describes translation from global origin to the new local origin (global origin ----> local origin).
- Takes points defined in the local coordinate frame.
- Returns positions (which were originally defined in the local coordinate frame) in the global coordinate frame.
'''
if rot_mat.shape[0] != rot_mat.shape[1]:
raise ValueError('Rotation Matrix should be square')
elif trans_vector.shape != (3,) and trans_vector.shape != (1, 3):
raise ValueError('Translation Matrix should be an array of size 3 or 1x3 matrix')
rotated_points = np.transpose(np.matmul(rot_mat, np.transpose(points_in_local)))
points_in_global = rotated_points + trans_vector
return points_in_global
def Basis2Angles(rot_mat):
'''
function Basis2Angles(rot_mat)
This function will take a "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. Matrix should be 3x3 and invertible.
[ e_1 e_2 e_3 ]
We are making the assumption that this rotation matrix is equivalent to three basis transformation in the follow order:
R_rot = R_z * R_y * R_x (order matters)
Returns a vector of size 3, which containes the following angles in order:
- theta, as part of rotation matrix:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
- phi
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 1 0 ]
[ -sin(phi) 0 cos(phi) ]
- psi
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(psi) cos(psi) 0 ]
[ 0 0 1 ]
'''
phi = np.arcsin(-rot_mat[2, 0])
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
if rot_mat[0, 0] / np.cos(phi) < 0:
psi = np.pi - psi
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
if rot_mat[2, 2] / np.cos(phi) < 0:
theta = np.pi - theta
rot_mat_guess = Angles2Basis([theta, phi, psi])
error = rot_mat_guess - rot_mat
epsilon = 0.000009
error_binary = (error < epsilon)
if not error_binary.all():
phi = np.pi - phi
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
if rot_mat[0, 0] / np.cos(phi) < 0:
psi = np.pi - psi
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
# if rot_mat[2, 2] / np.cos(phi) < 0:
# theta = np.pi - theta
rot_mat_guess = Angles2Basis([theta, phi, psi])
error = rot_mat_guess - rot_mat
epsilon = 0.000009
error_binary = (error < epsilon)
assert error_binary.all()
return [theta, phi, psi]
def Angles2Basis(rot_ang_array):
'''
function Angles2Basis([theta,phi,psi])
With these angles, this function will compute the orthonormal basis for the coordinate system rotation according to to the following transformations:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
We will find the rotation matrix after applying the following rotations, in order:
R_rot = R_z * R_y * R_x (order matters)
We will produce a rotation matrix of the form:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
'''
theta = rot_ang_array[0]
phi = rot_ang_array[1]
psi = rot_ang_array[2]
u_x = np.cos(phi) * np.cos(psi)
u_y = np.cos(phi) * np.sin(psi)
u_z = -np.sin(phi)
v_x = np.cos(psi) * np.sin(theta) * np.sin(phi) - np.cos(theta) * np.sin(psi)
v_y = np.cos(theta) * np.cos(psi) + np.sin(theta) * np.sin(phi) * np.sin(psi)
v_z = np.cos(phi) * np.sin(theta)
w_x = np.cos(theta) * np.cos(psi) * np.sin(phi) + np.sin(theta) * np.sin(psi)
w_y = np.cos(theta) * np.sin(phi) * np.sin(psi) - np.cos(psi) * np.sin(theta)
w_z = np.cos(theta) * np.cos(phi)
rot_mat = np.array([
[u_x, v_x, w_x],
[u_y, v_y, w_y],
[u_z, v_z, w_z]
])
return rot_mat
def Basis2Angles_GD(rot_mat):
'''
function Basis2Angles_GD(rot_mat)
This function will take a "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. Matrix should be 3x3 and invertible.
[ e_1 e_2 e_3 ]
We are making the assumption that this rotation matrix is equivalent to three basis transformation in the follow order:
R_rot = R_z * R_y * R_x (order matters)
Returns a vector of size 3, which containes the following angles in order:
- theta, as part of rotation matrix:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
- phi
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
- psi
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
phi = np.arcsin(-rot_mat[2, 0])
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(np.arcsin(-rot_mat[2, 0]))))
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(np.arcsin(-rot_mat[2, 0]))))
def loss_fn(angle_array):
loss = (rot_mat[0, 0] - u_x(angle_array))**2 + (rot_mat[1, 0] - u_y(angle_array))**2 + (rot_mat[2, 0] - u_z(angle_array))**2 + (rot_mat[0, 1] - v_x(angle_array))**2 + (rot_mat[1, 1] - v_y(angle_array))**2 + (rot_mat[2, 1] - v_z(angle_array))**2 + (rot_mat[0, 2] - w_x(angle_array))**2 + (rot_mat[1, 2] - w_y(angle_array))**2 + (rot_mat[2, 2] - w_z(angle_array))**2
return loss
grad_loss = grad(loss_fn)
epsilon = 1e-12
learning_rate = 0.01
def learning_rate_scheduler(i):
learning_rate_update = learning_rate # * (2 / np.sqrt(i))
return learning_rate_update
i = 0
while loss_fn([theta, phi, psi]) > epsilon:
print('Iteration:\t', i + 1, '\t\tLoss:\t', loss_fn([theta, phi, psi]))
i = i + 1
if i < 50:
theta = theta - learning_rate * grad_loss([theta, phi, psi])[0]
phi = phi - learning_rate * grad_loss([theta, phi, psi])[1]
psi = psi - learning_rate * grad_loss([theta, phi, psi])[2]
else:
theta = theta - (i / 2) * learning_rate_scheduler(i) * grad_loss([theta, phi, psi])[0]
phi = phi - (i / 2) * learning_rate_scheduler(i) * grad_loss([theta, phi, psi])[1]
psi = psi - (i / 2) * learning_rate_scheduler(i) * grad_loss([theta, phi, psi])[2]
return [theta, phi, psi]
def u_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.cos(angle_array[2])
def u_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.sin(angle_array[2])
def u_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return -np.sin(angle_array[1])
def v_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[2]) * np.sin(angle_array[0]) * np.sin(angle_array[1]) - np.cos(angle_array[0]) * np.sin(angle_array[2])
def v_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[2]) + np.sin(angle_array[0]) * np.sin(angle_array[1]) * np.sin(angle_array[2])
def v_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.sin(angle_array[0])
def w_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[2]) * np.sin(angle_array[1]) + np.sin(angle_array[0]) * np.sin(angle_array[2])
def w_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.sin(angle_array[1]) * np.sin(angle_array[2]) - np.cos(angle_array[2]) * np.sin(angle_array[0])
def w_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[1])
# def Basis2Angles(rot_mat):
# '''
# function Basis2Angles(rot_mat)
# This function will take a "rotation matrix", whereby the columns for an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. Matrix should be 3x3 and invertible.
# [ e_1 e_2 e_3 ]
# Returns a vector of size 3, which containes the following angles in order:
# '''
# # phi = np.arcsin(-rot_mat[2, 0])
# # psi = np.arcsin((rot_mat[1, 0]) / (np.cos(np.arcsin(-rot_mat[2, 0]))))
# # theta = np.arctan(rot_mat[2, 1] / rot_mat[2, 2])
# # theta = np.arcsin((rot_mat[2, 1]) / (np.cos(np.arcsin(-rot_mat[2, 0]))))
# # theta = np.arccos((rot_mat[2, 2]) / (np.cos(np.arcsin(-rot_mat[2, 0]))))
# # phi = np.arcsin(-rot_mat[2, 0])
# # psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
# # if rot_mat[2, 0] / np.cos(phi) < 0:
# # psi = pi - psi
# # theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
# # if rot_mat[2, 2] / np.cos(phi) < 0:
# # theta = pi - theta
# # rot_mat_guess = Angles2Basis([theta, phi, psi])
# # error = rot_mat_guess - rot_mat
# # epsilon = 0.000009
# # error_binary = (error < epsilon)
# # if not error_binary.all():
# # phi = np.pi - phi
# phi = np.arcsin(-rot_mat[2, 0])
# psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
# if rot_mat[0, 0] / np.cos(phi) < 0:
# psi = np.pi - psi
# theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
# if rot_mat[2, 2] / np.cos(phi) < 0:
# theta = np.pi - theta
# rot_mat_guess = Angles2Basis([theta, phi, psi])
# error = rot_mat_guess - rot_mat
# epsilon = 0.000009
# error_binary = (error < epsilon)
# if not error_binary.all():
# phi = np.pi - phi
# psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
# if rot_mat[0, 0] / np.cos(phi) < 0:
# psi = np.pi - psi
# theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
# # if rot_mat[2, 2] / np.cos(phi) < 0:
# # theta = np.pi - theta
# rot_mat_guess = Angles2Basis([theta, phi, psi])
# error = rot_mat_guess - rot_mat
# epsilon = 0.000009
# error_binary = (error < epsilon)
# assert error_binary.all()
# return [theta, phi, psi]
| Python |
3D | john-drago/fluoro | code/datacomp/data_compile_from_source.py | .py | 12,354 | 336 |
'''
The purpose of this file is to organize the matched frames by compiling: (1) two .png fluoroscopic images and (2) the .mat file
with the results of the matching.
To accomplish this, first adjust the following variables at the top of the file:
- new_dir_name
- activity_to_copy
- dir_parse_list
- replacement_laterality
- wanted_files_specific
- wanted_files_base_CAD
Next, place this file into the "CR TKA Matching" file, and the run the "master_file_mover" function included in this file.
Following this, run the "did_all_files_transfer" function to determine if the files transferred.
'''
import os
import shutil
parent_dir = os.getcwd()
new_dir_name = 'compiled_matching_data'
activity_to_copy = [
'Gait Updated',
'Kneeling',
'Step-Up',
'STS'
]
# Files to parse through to extract our matching data
dir_parse_list = [
'CR 01',
'CR 02',
'CR 03',
'CR 04',
'CR 05',
'CR 06',
'CR 07',
'CR 08',
'CR 09',
'CR 10',
'CR 11',
'CR 12',
'CR 13',
'CR 14',
'CR 15'
]
replacement_laterality = [
'Rt',
'Lt'
]
wanted_files_specific = [
]
wanted_files_base_CAD = [
'LFemur.stl',
'LTibia.stl',
'RFemur.stl',
'RTibia.stl'
]
cali_base = 'reg2fl'
###
# Let's first make a function that can identify the files that we are going to want to copy
# Specifically the files that have been matched and now end in ".mat"
def files_that_end_in_type_identifier(file_type, exclusion='t.mat', *args):
'''
Function returns a list of all files that end in file_type
'''
# print('\n')
# print('Finding all files that end in: ' + file_type)
# print('\n')
dir_list_of_files = os.listdir()
list_of_desired_frames = []
for file in dir_list_of_files:
if (file[-len(file_type):] == file_type) and (not file[-(len(file_type) + 1):] == exclusion):
list_of_desired_frames.append(file)
return list_of_desired_frames
###
# Next we are going to make a function that can identify what the specific frames that have been matched are and then create a dictionary of the corresponding data from a given frame, organized by frame
def matched_png_finder(desired_frames_list, *args):
'''
Takes a list of the desired frames that have output results.
Function will make a dictionary with each frame, as a four digit number, as the key and the value as the three associated file names that share the same frame number
'''
frames_dict = {}
dir_list_of_files = os.listdir()
stereotyped_ending = '_results.mat'
for matched_frame in desired_frames_list:
list_of_files = []
# print('\nmatched_frame: ', matched_frame)
for assoc_file in dir_list_of_files:
# print('same base: ', (assoc_file[0:(len(matched_frame) - len(stereotyped_ending))] == matched_frame[:-len(stereotyped_ending)]))
# print('different base: ', (assoc_file[0:(len(matched_frame) - len(stereotyped_ending))] == matched_frame[0:1] + '2' + matched_frame[2:-len(stereotyped_ending)]))
same_base = (assoc_file[0:(len(matched_frame) - len(stereotyped_ending))] == matched_frame[:-len(stereotyped_ending)])
diff_base = (assoc_file[0:(len(matched_frame) - len(stereotyped_ending))] == matched_frame[0:1] + '2' + matched_frame[2:-len(stereotyped_ending)])
if same_base or diff_base:
# print('assoc_file:', assoc_file)
list_of_files.append(assoc_file)
# print('list_of_files ', list_of_files)
frames_dict[matched_frame[(-len(stereotyped_ending) - 4):-len(stereotyped_ending)]] = list_of_files
return frames_dict
def create_dir_path(*name):
'''
This function will take in a series of directory paths, i.e. create_dir_path('foo','bar','egg','fyegg') and will output a file path from left to right:
'/foo/bar/egg/fyegg'
'''
file_path = ''
ticker = 0
for direct in name:
ticker += 1
if not ticker == len(name):
file_path = file_path + direct + '/'
else:
file_path = file_path + direct
return os.path.normpath(file_path)
def file_copier(where_files_located, where_files_moved, files_to_move, *args):
# directory_maker(new_dir_name)
current_dir = os.getcwd()
copy_path = where_files_moved
for file in files_to_move:
os.chdir(where_files_located)
shutil.copy(file, copy_path)
os.chdir(current_dir)
def directory_maker(file_name, *args):
'''
This function will make a new directory name if the directory name is not already in the current directory
'''
cur_dir = os.getcwd()
print('Current directory:')
print(cur_dir)
if file_name not in os.listdir(cur_dir):
print('Making new directory named: ' + file_name + ' in ' + cur_dir, '\n')
os.makedirs("./" + file_name, exist_ok=True)
tracking_changes_list = []
def master_file_mover(activity_to_copy=activity_to_copy, dir_parse_list=dir_parse_list, replacement_laterality=replacement_laterality, new_dir_name=new_dir_name, parent_dir=parent_dir):
'''
Put this function in "CR-TKA-Matching".
This function, after specifying the activity, directories to parse through, laterality of the knee, where to put the new data, and where this is, will transfer all of the data to the specificied location. It will organize the data by frame
'''
if not os.path.isdir(new_dir_name):
os.makedirs("./" + new_dir_name, exist_ok=True)
os.chdir(parent_dir + '/' + new_dir_name)
for pt in dir_parse_list:
for side in replacement_laterality:
for act in activity_to_copy:
# top_level_dir = os.getcwd()
# print('\n\ntop_level_dir: ', os.getcwd())
file_location = create_dir_path(parent_dir, pt, side, act)
os.chdir(file_location)
print('Now in: ', file_location)
# print('os.getcwd(): ', os.getcwd())
mat_files = files_that_end_in_type_identifier('.mat')
dict_of_matched_frames = matched_png_finder(mat_files)
if not (len(dict_of_matched_frames) == 0):
global tracking_changes_list2
tracking_changes_list.append(create_dir_path(new_dir_name, act, pt, side))
for frame in dict_of_matched_frames.keys():
print('\t--->', pt, side, act, frame)
path_to_frame = create_dir_path(parent_dir, new_dir_name, act, pt, side, frame)
# print('Path to new frame location: ', path_to_frame)
# print('os.getcwd(): ', os.getcwd())
os.makedirs(path_to_frame, exist_ok=True)
for file in dict_of_matched_frames[frame]:
print(file)
# print('os.getcwd(): ', os.getcwd())
shutil.copy(file, path_to_frame)
# print('os.getcwd(): ', os.getcwd())
# print('top_level_dir: ', top_level_dir)
# os.chdir(top_level_dir)
def did_all_files_transfer(activity_to_copy=activity_to_copy, dir_parse_list=dir_parse_list, replacement_laterality=replacement_laterality, new_dir_name=new_dir_name, parent_dir=parent_dir, number_of_files=3):
'''
This function will check to see if for each frame for each side for each patient, there are three files in the child directory, after the data has been copied.
It will determine if there are three files. If there are not three files, it will return a list of the directories where there are not at least three files.
'''
os.chdir(parent_dir + '/' + new_dir_name)
running_transfer_tracker = []
for dr in tracking_changes_list:
patient_and_laterality_dir = create_dir_path(parent_dir, dr)
os.chdir(patient_and_laterality_dir)
for frame in os.listdir():
frame_dir = create_dir_path(patient_and_laterality_dir, frame)
os.chdir(frame_dir)
if len(os.listdir()) != number_of_files:
running_transfer_tracker.append(frame_dir)
return running_transfer_tracker
def moving_cali_files(activity_to_copy=activity_to_copy, dir_parse_list=dir_parse_list, replacement_laterality=replacement_laterality, new_dir_name=new_dir_name, parent_dir=parent_dir, number_of_files=3, cali_base=cali_base, cali_file='DFIS_Settings.txt'):
'''
This function will move the calibration files from the "cali" folder under "CR **", according to the settings contained in the DFIS_Settings.txt document.
'''
if not os.path.isdir(new_dir_name):
os.makedirs("./" + new_dir_name, exist_ok=True)
os.chdir(parent_dir + '/' + new_dir_name)
data_rep = '/Users/johndrago/fluoro/data'
for pt in dir_parse_list:
for side in replacement_laterality:
for act in activity_to_copy:
settings_loc = create_dir_path(parent_dir, pt, side, act)
file_text_holder = []
DFIS_file = open(settings_loc + '/' + cali_file)
print('\n', '\n', 'Now searching: ', settings_loc)
for line in DFIS_file:
file_text_holder.append(line.rstrip()) # rstrip() removes the '\n' from the end of the line
if line.rstrip().lower()[0:6] == cali_base:
try:
# ********** This section of the code attempts to copy the file over, if the file exists (try, except loop)
new_dir_cali_drop = create_dir_path(parent_dir, new_dir_name, pt, act, side)
os.makedirs(new_dir_cali_drop, exist_ok=True)
# print(' Just made file dirs: ', new_dir_cali_drop)
shutil.copy(create_dir_path(parent_dir, pt, 'cali', line.rstrip().lower()), new_dir_cali_drop)
shutil.copy(create_dir_path(parent_dir, pt, side, act, cali_file), new_dir_cali_drop)
# print(' Just copied: ', '\t', '--->', '\t', line.rstrip(), '\t', cali_file)
# ********** This section of the code will attempt to move the cali files to the position of the data repository
# data_repository_dir = create_dir_path(data_rep, act, pt, side, 'cali')
# os.makedirs(data_repository_dir, exist_ok=True)
# print(' Just made: ', data_repository_dir)
# print(' Going to copy: ', '\n', '\t\t', create_dir_path(parent_dir, pt, 'cali', line.rstrip().lower()), '-->', data_repository_dir)
# print(' Going to copy: ', '\n', '\t\t', create_dir_path(parent_dir, pt, side, act, cali_file), '-->', data_repository_dir)
# shutil.copy(create_dir_path(parent_dir, pt, 'cali', line.rstrip().lower()), data_repository_dir)
# shutil.copy(create_dir_path(parent_dir, pt, side, act, cali_file), data_repository_dir)
except (FileNotFoundError):
break
break
# for act in activity_to_copy:
# for pt in dir_parse_list:
# for side in replacement_laterality:
# patient_and_laterality_dir = create_dir_path(parent_dir, new_dir_name, act, pt, side)
# os.chdir(patient_and_laterality_dir)
# for frame in os.listdir():
# frame_dir = create_dir_path(patient_and_laterality_dir, frame)
# os.chdir(frame_dir)
# if len(os.listdir()) != number_of_files:
# running_transfer_tracker.append(frame_dir)
# def data_compile_folder_creator(activity_to_copy=activity_to_copy, dir_parse_list=dir_parse_list, replacement_laterality=replacement_laterality, new_dir_name=new_dir_name, parent_dir=parent_dir):
# pass
# def directory_parser(dir_list, end_destination=None, *vars):
# cur_dir = os.getcwd()
# for dir in dir_list:
# if dir not in cur_dir:
# print('Directory not in current directory: break')
# break
# directory_maker(new_dir_name)
| Python |
3D | john-drago/fluoro | code/datacomp/h5py_multidimensional_array.py | .py | 27,688 | 742 | '''
This file is developed to help deal with saving multidimensional arrays (4-D) that have variable last three-dimensions. This file is meant to help store variable voxel data sets with a variety of sizes.
'''
import numpy as np
import h5py
import math
import os
import tempfile
def matrix_unwrapper_3d(matrix):
'''
This function will unwrap a 3-dimensional array in a consistent way and then create a new 1-D array with all of the data points.
input:
matrix --> numpy array 3D
output:
array --> unravels in order: along columns, along rows, and then along z_dir
shape --> of original
'''
mat_shape = matrix.shape
z_dir = mat_shape[0]
rows = mat_shape[1]
columns = mat_shape[2]
# init_array = np.zeros((z_dir * rows * columns))
init_array = np.reshape(matrix, (z_dir * rows * columns))
# ticker = -1
# for z in range(z_dir):
# for r in range(rows):
# for c in range(columns):
# ticker += 1
# init_array[ticker] = matrix[z, r, c]
return [init_array, mat_shape]
def matrix_rewrapper_3d(array, shape):
'''
This function will take a 1-d array and rewrap it in a consistent way to reform voxel data:
input:
array --> numpy array 1D
shape --> shape of new matrix as a tuple
output:
matrix --> matrix with following dimensions: (z_dir, rows, columns)
'''
# init_mat = np.random.rand(shape[0], shape[1], shape[2])
# z_dir = shape[0]
# rows = shape[1]
# columns = shape[2]
init_mat = np.reshape(array, (shape[0], shape[1], shape[2]))
# ticker = -1
# for z in range(z_dir):
# for r in range(rows):
# for c in range(columns):
# ticker += 1
# init_mat[z, r, c] = array[ticker]
return init_mat
def variable_matrix_storer(vox_data_var, file_name_w_path, dset_name='vox_dset', save_as_type='uint8'):
'''
This function will create a new h5py file and dataset when given a 4D dataset containing several instances of 3D voxel data that is variable length.
It will first reshape the individual 3D arrays into 1D arrays with matrix_unwrapper_3d. It will then store each matrix as a 1D array in a numpy array under the name dset_name.
An attirbute of dset_name will be a matrix of shapes, corresponding the original shape of the matrices before they were reshaped. This will be important for when the 3D matrices are regenerated.
input:
vox_data_var --> this function will take the 4D matrix as an argument, can handle 4D matrices of voxel data sets, where the last three-dimensions are variable, expects numpy array of 3D numpy arrays
file_name_w_path --> takes string argument describing the path and filename (ending in .h5py) where the dataset is to be stored
dset_name --> the name of the dataset for indexing purposes
output:
vox_dset --> will return the voxel dataset from the closed file. In the process, the file will be created according to file_name_w_path, with a corresponding data set (dset_name), with 'shapes' attribute
'''
num_of_inst = len(vox_data_var)
shapes_matrix = np.zeros((num_of_inst, 3))
array_list = [0] * num_of_inst
for inst in range(num_of_inst):
array_list[inst], shapes_matrix[inst] = matrix_unwrapper_3d(vox_data_var[inst])
array_list = np.array(array_list)
vox_file = h5py.File(file_name_w_path, 'w')
# Need to create a special datatype in h5py because of the "ragged" nature of the variable length input
var_dtype = h5py.special_dtype(vlen=save_as_type)
vox_dset = vox_file.create_dataset(dset_name, data=array_list, dtype=var_dtype, compression='gzip')
shapes_matrix = shapes_matrix.astype('uint16')
# creating an attriubte for the data set so we will be able to regenerate shapes later on
vox_dset.attrs['shapes'] = shapes_matrix
vox_file.close()
return vox_dset
def max_shape_determination(vox_mat):
'''
Function to determine the maximum length across various 3D voxel datasets contained in a 4D matrix.
input:
vox_mat --> unpadded 4D matrix, consisting of a 1D numpy array consisting of 3D numpy arrays (varying size)
output:
max_shape_vector --> list of length 3 describing maximum size amongst the three dimensions
'''
max_shape_vector = [0, 0, 0]
# print('\n\nMax Shape Vector: ', max_shape_vector, '\n\n', type(max_shape_vector))
# print('\n\n')
# print('\n\nVox Mat Shape: ', vox_mat.shape, '\n\n', type(vox_mat.shape))
# print('\n\n')
for item in range(vox_mat.shape[0]):
# print('Item: \t', item)
# print('\n', max_shape_vector, '\n')
# print('\n')
# print('\n\n\n', 'vox_mat[item].shape', vox_mat[item].shape)
if vox_mat[item].shape[0] > max_shape_vector[0]:
max_shape_vector[0] = vox_mat[item].shape[0]
if vox_mat[item].shape[1] > max_shape_vector[1]:
max_shape_vector[1] = vox_mat[item].shape[1]
if vox_mat[item].shape[2] > max_shape_vector[2]:
max_shape_vector[2] = vox_mat[item].shape[2]
return max_shape_vector
def variable_matrix_padder(vox_mat, known_max_shape_vector=None):
'''
This function will take a 4D matrix containing many 3D voxel datasets of varying size. It will find the longest size amongst all dimensions and it will return a new 4D matrix with padded 3D voxel datasets all of the same size.
input:
vox_mat --> unpadded 4D matrix, consisting of a 1D numpy array consisting of 3D numpy arrays (varying size)
output:
vox_mat_pad --> padded 4D numpy array with padded 3D voxel datasets all of the same size
'''
# print('\n\nWithin variable matrix padder: \n')
# print('Vox_mat Shape:\t', vox_mat.shape)
# for i in range(vox_mat.shape[0]):
# print(vox_mat[i].shape)
if known_max_shape_vector:
max_shape_vector = known_max_shape_vector
vox_mat_pad_shape = list(max_shape_vector)
vox_mat_pad_shape.insert(0, vox_mat.shape[0])
vox_mat_pad = np.zeros(vox_mat_pad_shape)
else:
max_shape_vector = max_shape_determination(vox_mat)
# print('\n\nMax Shape Vector: ', max_shape_vector, '\n\n', type(max_shape_vector))
# print('\n\n')
vox_mat_pad_shape = list(max_shape_vector)
vox_mat_pad_shape.insert(0, vox_mat.shape[0])
vox_mat_pad = np.zeros(vox_mat_pad_shape)
for item in range(vox_mat.shape[0]):
pad_mat = np.zeros((3, 2))
# pad_mat = [[0, 0], [0, 0], [0, 0]]
# print('------------\n\nMatrix shape: \n', vox_mat[item].shape, '\n\n-----------', '\n\n')
# print('------------\n\nMax Shape Vector: \n', max_shape_vector, '\n\n-----------', '\n\n')
if vox_mat[item].shape[0] < max_shape_vector[0]:
pad_mat_0 = max_shape_vector[0] - vox_mat[item].shape[0]
# print('\n\npad_mat_0:\n', pad_mat_0, '\n\n')
if pad_mat_0 % 2 == 1:
pad_mat[0, 0] = pad_mat_0 // 2 + 1
pad_mat[0, 1] = pad_mat_0 // 2
else:
pad_mat[0, :] = pad_mat_0 / 2
# pad_mat[0][1] = pad_mat_0 / 2
if vox_mat[item].shape[1] < max_shape_vector[1]:
pad_mat_1 = max_shape_vector[1] - vox_mat[item].shape[1]
# print('\n\npad_mat_1:\n', pad_mat_1, '\n\n')
if pad_mat_1 % 2 == 1:
pad_mat[1, 0] = pad_mat_1 // 2 + 1
pad_mat[1, 1] = pad_mat_1 // 2
else:
pad_mat[1, :] = pad_mat_1 / 2
# pad_mat[1][1] = pad_mat_1 / 2
if vox_mat[item].shape[2] < max_shape_vector[2]:
pad_mat_2 = max_shape_vector[2] - vox_mat[item].shape[2]
# print('\n\npad_mat_2:\n', pad_mat_2, '\n\n')
if pad_mat_2 % 2 == 1:
pad_mat[2, 0] = pad_mat_2 // 2 + 1
pad_mat[2, 1] = pad_mat_2 // 2
else:
pad_mat[2, :] = pad_mat_2 / 2
# pad_mat[2][1] = pad_mat_2 / 2
# print('Pad mat: \n', pad_mat)
vox_mat_pad[item] = np.pad(vox_mat[item], pad_width=pad_mat.astype(int), mode='constant')
return vox_mat_pad
def iterative_matrix_padder(vox_mat, size_of_save_obj=50, save_as_type=np.dtype('uint8'), storage_file_path=os.path.expanduser('~/fluoro/data/compilation/'), storage_file_name=None):
'''
This function takes a 4D matrix as an argument, and it will use symmetric padding to generate a new 4D matrix that has 3D voxel datasets that are all equivalent sizes.
This function is specifically useful for when the 3D matrix will not fit in RAM, as it is too big. This function will create a temporary file and then the temporary file will be deleted after the 4D matrix is padded.
In contrast to sequential iterative matrix padder, this function creates an intermediary matrix that is uploaded to the temporary file, wheres the sequential iterative matrix padder just uploads directly to the storage file. Sequential iterative matrix padder creates a "sub matrix" following download from the temporary file and then uses this sub matrix to do matrix padding before uploading to the storage file.
input:
- vox_mat --> the 4D dataset of varying size 3D voxel datasets to complete padding on
output:
- store_dset --> h5py dataset stored at storage_file_path. The dset can be accessed after loading the file by calling 'vox_dset'
'''
temp_file_base_name = 'tempfile'
max_shape_vector = max_shape_determination(vox_mat)
# print(vox_mat.shape)
vox_mat_pad_shape = list(max_shape_vector)
vox_mat_pad_shape.insert(0, vox_mat.shape[0])
# print(vox_mat_pad_shape)
vox_mat_pad = np.zeros(vox_mat_pad_shape)
vox_mat_pad = vox_mat_pad.astype(save_as_type)
dset_dict = {}
if not storage_file_name:
storage_file_name = 'voxels_pad'
store_file = h5py.File(os.path.abspath(os.path.join(storage_file_path, storage_file_name + '.h5py')), 'w')
store_dset = store_file.create_dataset('vox_dset', data=vox_mat_pad, dtype=save_as_type)
for item in range(math.ceil(vox_mat_pad_shape[0] / size_of_save_obj)):
print('\nuploading item:\t', item)
print('\n')
# sub_mat = variable_matrix_padder(vox_mat[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj], known_max_shape_vector=max_shape_vector)
sub_mat = variable_matrix_padder(vox_mat[0:size_of_save_obj], known_max_shape_vector=max_shape_vector)
print('sub_mat.shape', sub_mat.shape)
vox_mat = np.delete(vox_mat, np.s_[:size_of_save_obj:1])
print('vox_mat.shape', vox_mat.shape)
dset_dict[temp_file_base_name + '_' + str(item)] = tempfile.TemporaryFile()
np.save(dset_dict[temp_file_base_name + '_' + str(item)], sub_mat)
vox_mat = None
print('\n\n\n')
for item in range(math.ceil(vox_mat_pad.shape[0] / size_of_save_obj)):
print('unpacking item:\t', item)
dset_dict[temp_file_base_name + '_' + str(item)].seek(0)
temp_data = np.load(dset_dict[temp_file_base_name + '_' + str(item)], allow_pickle=True)
store_dset[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj] = temp_data.astype(save_as_type)
store_file.close()
vox_mat_pad = None
return store_dset
def sequential_iterative_matrix_padder(vox_mat, size_of_save_obj=50, save_as_type=np.dtype('uint8'), storage_file_path=os.path.expanduser('~/fluoro/data/compilation/'), storage_file_name=None, compression=None):
'''
This function takes a 4D matrix as an argument, and it will use symmetric padding to generate a new 4D matrix that has 3D voxel datasets that are all equivalent sizes.
This function is specifically useful for when the 3D matrix will not fit in RAM, as it is too big. This function will create a temporary file and then the temporary file will be deleted after the 4D matrix is padded.
This function is in contrast to iterative matrix padder, which creates an intermediary function prior to saving the data to the storage file, on which the matrix padding funciton is run to get the voxel data sets to the same size.
*** Would typically want to use sequential_iterative_matrix_padder before iterative_matrix_padder, as it can handle larger files without getting out of memory errors.
input:
- vox_mat --> the 4D dataset of varying size 3D voxel datasets to complete padding on
output:
- store_dset --> h5py dataset stored at storage_file_path. The dset can be accessed after loading the file by calling 'vox_dset'
'''
temp_file_base_name = 'tempfile'
max_shape_vector = max_shape_determination(vox_mat)
# print(vox_mat.shape)
vox_mat_pad_shape = list(max_shape_vector)
vox_mat_pad_shape.insert(0, vox_mat.shape[0])
# print(vox_mat_pad_shape)
# vox_mat_pad = np.zeros(vox_mat_pad_shape)
# vox_mat_pad = vox_mat_pad.astype(save_as_type)
dset_dict = {}
if not storage_file_name:
storage_file_name = 'voxels_pad'
store_file = h5py.File(os.path.abspath(os.path.join(storage_file_path, storage_file_name + '.h5py')), 'w')
if compression:
store_dset = store_file.create_dataset('vox_dset', shape=tuple(vox_mat_pad_shape), dtype=save_as_type, compression=compression)
else:
store_dset = store_file.create_dataset('vox_dset', shape=tuple(vox_mat_pad_shape), dtype=save_as_type)
for item in range(math.ceil(vox_mat_pad_shape[0] / size_of_save_obj)):
print('\nuploading item:\t', item)
print('\n')
dset_dict[temp_file_base_name + '_' + str(item)] = tempfile.TemporaryFile()
np.save(dset_dict[temp_file_base_name + '_' + str(item)], vox_mat[0:size_of_save_obj])
# print('Sub mat\t', vox_mat[0:size_of_save_obj].shape)
vox_mat = np.delete(vox_mat, np.s_[:size_of_save_obj:1], axis=0)
print('vox_mat.shape\t', vox_mat.shape)
vox_mat = None
print('\n\n\n')
print(dset_dict.keys())
print('\n\n\n')
for item in range(math.ceil(vox_mat_pad_shape[0] / size_of_save_obj)):
print('unpacking item:\t', item)
dset_dict[temp_file_base_name + '_' + str(item)].seek(0)
temp_data = np.load(dset_dict[temp_file_base_name + '_' + str(item)], allow_pickle=True)
dset_dict[temp_file_base_name + '_' + str(item)].close()
upload_data = variable_matrix_padder(temp_data, max_shape_vector)
store_dset[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj] = upload_data.astype(save_as_type)
upload_data = None
store_file.close()
return store_dset
def variable_matrix_loader(file_name_w_path, dset_name='vox_dset', index_of_instances=[None, None], padding=False, known_max_shape_vector=None):
'''
This function will assume that there is a function foo.h5py that has already been created. In foo, there is assumed to be a dataset under the name 'dset_name', which is an input to this function.
The corresponding data set is assumed to be an array of arrays, where the first index corresponds to the instance number, and the second array is a reshaped 3D voxel data set (into 1D).
There is also an assumption that the data set has an attribute 'shapes', which describes how the 1D arrays will be reshaped into 3D arrays for processing.
input:
- file_name_w_path --> this is a string comprising a path to and the name of the h5py file where the data is stored
- dset_name --> this is a string describing how the data set in the file should be indexed
- number of instances --> this is expected to be an indexable object of size two, where the first object is supposed to be where the index of the array of voxel data should start, and the second object is where the index should stop, according to standard Python indexing.
- index_of_instances --> if nothing, will default to the entire matrix, will otherwise default to array index according to first and last int of list
output:
mat_3d_vox
'''
vox_file = h5py.File(file_name_w_path, 'r')
vox_dset = vox_file[dset_name]
vox_dset_shapes = vox_dset.attrs['shapes']
if index_of_instances[0] is None and index_of_instances[1] is None:
vox_dset_subset = vox_dset
vox_shapes_subset = vox_dset_shapes
else:
vox_dset_subset = vox_dset[index_of_instances[0]:index_of_instances[1]]
vox_shapes_subset = vox_dset_shapes[index_of_instances[0]:index_of_instances[1]]
mat_3d_vox = [0] * vox_dset_subset.shape[0]
for inst in range(vox_dset_subset.shape[0]):
mat_3d_vox[inst] = np.reshape(vox_dset_subset[inst], vox_shapes_subset[inst])
vox_file.close()
# return np.array(mat_3d_vox)
if padding:
if known_max_shape_vector:
return variable_matrix_padder(np.array(mat_3d_vox), known_max_shape_vector)
else:
return variable_matrix_padder(np.array(mat_3d_vox))
else:
return np.array(mat_3d_vox)
if __name__ == '__main__':
print('\n')
# import sys
# sys.path.append(os.path.abspath(os.path.expanduser('~/fluoro/code')))
# load_path = '~/fluoro/data/compilation/voxels.h5py'
load_path = '/Volumes/Seagate/fluoro/voxels_mark_origin.h5py'
vox_mat = variable_matrix_loader(load_path, index_of_instances=[None, None])
print('loaded max matrix')
shape_mat = max_shape_determination(vox_mat)
try1 = sequential_iterative_matrix_padder(vox_mat, size_of_save_obj=100, save_as_type=np.dtype('int8'), storage_file_path='/Volumes/Seagate/fluoro', storage_file_name='voxels_mark_origin_comp', compression='lzf')
# try1 = sequential_iterative_matrix_padder(vox_mat, size_of_save_obj=100, save_as_type=np.dtype('int8'), storage_file_path='/Volumes/Seagate/fluoro', storage_file_name='voxels_pad', compression=None)
# try1 = sequential_iterative_matrix_padder(vox_mat, size_of_save_obj=100, save_as_type=np.dtype('int8'), storage_file_path=os.path.expanduser('~/fluoro/data/compilation'), storage_file_name=None, compression='lzf')
# mat1 = variable_matrix_padder(vox_mat)
# ------------------------------------------
# BACKUP ITERATIVE MATRIX PADDER 2: temp file, pad on input
# def iterative_matrix_padder(vox_mat, size_of_save_obj=750, save_as_type=np.dtype('uint8'), save_as_h5py=False, storage_file_path=os.path.expanduser('~/fluoro/data/compilation/'), storage_file_name=None):
# '''
# This function takes a 4D matrix as an argument, and it will use symmetric padding to generate a new 4D matrix that has 3D voxel datasets that are all equivalent sizes.
# This function is specifically useful for when the 3D matrix will not fit in RAM, as it is too big. This function will create a temporary file and then the temporary file will be deleted after the 4D matrix is padded.
# input:
# - vox_mat --> the 4D dataset of varying size 3D voxel datasets to complete padding on
# output:
# - vox_mat_pad --> padded 4D numpy array with padded 3D voxel datasets all of the same size
# '''
# temp_file_base_name = 'tempfile'
# var_dtype = h5py.special_dtype(vlen=save_as_type)
# temp_file = h5py.File(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')), 'w')
# max_shape_vector = max_shape_determination(vox_mat)
# # print(vox_mat.shape)
# vox_mat_pad_shape = list(max_shape_vector)
# vox_mat_pad_shape.insert(0, vox_mat.shape[0])
# # print(vox_mat_pad_shape)
# vox_mat_pad = np.zeros(vox_mat_pad_shape)
# vox_mat_pad = vox_mat_pad.astype(save_as_type)
# temp_file.close()
# dset_dict = {}
# for item in range(math.ceil(vox_mat_pad_shape[0] / size_of_save_obj)):
# print('uploading item:\t', item)
# temp_file = h5py.File(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')), 'a')
# sub_mat = vox_mat[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj]
# num_of_inst = len(sub_mat)
# shapes_matrix = np.zeros((num_of_inst, 3))
# array_list = [0] * num_of_inst
# for sub_inst in range(num_of_inst):
# array_list[sub_inst], shapes_matrix[sub_inst] = matrix_unwrapper_3d(sub_mat[sub_inst])
# # print(array_list[sub_inst].shape)
# # print(shapes_matrix[sub_inst])
# array_list = np.array(array_list)
# dset_dict[temp_file_base_name + '_' + str(item)] = temp_file.create_dataset(temp_file_base_name + '_' + str(item), data=array_list, dtype=var_dtype, compression='gzip')
# shapes_matrix = shapes_matrix.astype('uint16')
# dset_dict[temp_file_base_name + '_' + str(item)].attrs['shapes'] = shapes_matrix
# # print(temp_dset[:].shape)
# temp_file.close()
# vox_mat = None
# print('\n\n\n')
# for item in range(math.ceil(vox_mat_pad.shape[0] / size_of_save_obj)):
# print('unpacking item:\t', item)
# temp_data = variable_matrix_loader(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')), dset_name=temp_file_base_name + '_' + str(item), index_of_instances=[None, None], padding=True, known_max_shape_vector=max_shape_vector)
# # print('Temp Data:\t', temp_data[:].shape, '\t', type(temp_data[:]), '\t', temp_data[:].dtype)
# vox_mat_pad[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj] = temp_data.astype(save_as_type)
# os.remove(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')))
# vox_mat_pad = vox_mat_pad.astype(save_as_type)
# if not storage_file_name:
# storage_file_name = 'voxels_pad'
# if save_as_h5py:
# print('vox_mat_pad.dtype', '\t', vox_mat_pad.dtype)
# store_file = h5py.File(os.path.abspath(os.path.join(storage_file_path, storage_file_name + '.h5py')), 'w')
# store_dset = store_file.create_dataset('vox_dset', data=vox_mat_pad, dtype=save_as_type, compression='gzip')
# store_file.close()
# vox_mat_pad = None
# return store_dset
# else:
# return vox_mat_pad
# ------------------------------------------
# ------------------------------------------
# BACKUP ITERATIVE MATRIX PADDER 1: make new datasets in h5py file
# def iterative_matrix_padder(vox_mat, size_of_save_obj=750, save_as_type=np.dtype('uint8'), save_as_h5py=False, storage_file_path=os.path.expanduser('~/fluoro/data/compilation/'), storage_file_name=None):
# '''
# This function takes a 4D matrix as an argument, and it will use symmetric padding to generate a new 4D matrix that has 3D voxel datasets that are all equivalent sizes.
# This function is specifically useful for when the 3D matrix will not fit in RAM, as it is too big. This function will create a temporary file and then the temporary file will be deleted after the 4D matrix is padded.
# input:
# - vox_mat --> the 4D dataset of varying size 3D voxel datasets to complete padding on
# output:
# - vox_mat_pad --> padded 4D numpy array with padded 3D voxel datasets all of the same size
# '''
# temp_file_base_name = 'tempfile'
# var_dtype = h5py.special_dtype(vlen=save_as_type)
# temp_file = h5py.File(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')), 'w')
# max_shape_vector = max_shape_determination(vox_mat)
# # print(vox_mat.shape)
# vox_mat_pad_shape = list(max_shape_vector)
# vox_mat_pad_shape.insert(0, vox_mat.shape[0])
# # print(vox_mat_pad_shape)
# vox_mat_pad = np.zeros(vox_mat_pad_shape)
# vox_mat_pad = vox_mat_pad.astype(save_as_type)
# temp_file.close()
# dset_dict = {}
# for item in range(math.ceil(vox_mat_pad_shape[0] / size_of_save_obj)):
# print('uploading item:\t', item)
# temp_file = h5py.File(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')), 'a')
# sub_mat = vox_mat[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj]
# num_of_inst = len(sub_mat)
# shapes_matrix = np.zeros((num_of_inst, 3))
# array_list = [0] * num_of_inst
# for sub_inst in range(num_of_inst):
# array_list[sub_inst], shapes_matrix[sub_inst] = matrix_unwrapper_3d(sub_mat[sub_inst])
# # print(array_list[sub_inst].shape)
# # print(shapes_matrix[sub_inst])
# array_list = np.array(array_list)
# dset_dict[temp_file_base_name + '_' + str(item)] = temp_file.create_dataset(temp_file_base_name + '_' + str(item), data=array_list, dtype=var_dtype, compression='gzip')
# shapes_matrix = shapes_matrix.astype('uint16')
# dset_dict[temp_file_base_name + '_' + str(item)].attrs['shapes'] = shapes_matrix
# # print(temp_dset[:].shape)
# temp_file.close()
# vox_mat = None
# print('\n\n\n')
# for item in range(math.ceil(vox_mat_pad.shape[0] / size_of_save_obj)):
# print('unpacking item:\t', item)
# temp_data = variable_matrix_loader(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')), dset_name=temp_file_base_name + '_' + str(item), index_of_instances=[None, None], padding=True, known_max_shape_vector=max_shape_vector)
# # print('Temp Data:\t', temp_data[:].shape, '\t', type(temp_data[:]), '\t', temp_data[:].dtype)
# vox_mat_pad[item * size_of_save_obj:item * size_of_save_obj + size_of_save_obj] = temp_data.astype(save_as_type)
# os.remove(os.path.abspath(os.path.join(os.getcwd(), temp_file_base_name + '.h5py')))
# vox_mat_pad = vox_mat_pad.astype(save_as_type)
# if not storage_file_name:
# storage_file_name = 'voxels_pad'
# if save_as_h5py:
# print('vox_mat_pad.dtype', '\t', vox_mat_pad.dtype)
# store_file = h5py.File(os.path.abspath(os.path.join(storage_file_path, storage_file_name + '.h5py')), 'w')
# store_dset = store_file.create_dataset('vox_dset', data=vox_mat_pad, dtype=save_as_type, compression='gzip')
# store_file.close()
# vox_mat_pad = None
# return store_dset
# else:
# return vox_mat_pad
# ------------------------------------------
# if index_of_instances[0] == None and index_of_instances[1] == None:
# instance_index = [0, vox_dset[0]]
# elif index_of_instances[0] != None and index_of_instances[1] != None:
# instance_index = index_of_instances
# elif index_of_instances[0] == None:
# if index_of_instances[1] <0:
# instance_index = [0, vox_dset[0]+index_of_instances[1]]
# elif index_of_instances[1] >0:
# instance_index = [0, index_of_instances[1]]
# else:
# assert ValueError
# elif index_of_instances[1] == None:
# if index_of_instances[0] <0:
# instance_index[]
#
#
| Python |
3D | john-drago/fluoro | code/datacomp/voxel_graph.py | .py | 2,370 | 69 | '''
This file will allow us to roughly graph voxel data for visualization using mayavi.
'''
import mayavi.mlab as mlab
# from data_organization import extract_stl_femur_tib
import numpy as np
import h5py
import h5py_multidimensional_array
# path_to_dir = '/Users/johndrago/fluoro/data/Gait Updated/CR 01/Lt'
# fib_tib_pair = extract_stl_femur_tib(path_to_dir)
# fib_bin = fib_tib_pair[0]
# tib_bin = fib_tib_pair[1]
def simple_voxel_graph(bin_voxel):
'''
Function will simply take in a binary 3D voxel data set and will plot 3D scatter of the vertices for visualization.
'''
if len(bin_voxel.shape) != 3:
assert ValueError('Need 3D voxel dataset')
number_of_points_vertices = np.count_nonzero(bin_voxel == 1)
vertex_matrix = np.zeros((number_of_points_vertices, 3))
ticker = -1
for x in range(bin_voxel.shape[0]):
for y in range(bin_voxel.shape[1]):
for z in range(bin_voxel.shape[2]):
if bin_voxel[x, y, z]:
ticker = ticker + 1
vertex_matrix[ticker] = np.array([x, y, z])
figure_vox = mlab.figure()
# vox_verts = mlab.points3d(vertex_matrix[:, 0], vertex_matrix[:, 1], vertex_matrix[:, 2], color=(0.2, 0.2, 0.7), opacity=0.45, mode='point')
vox_verts = mlab.points3d(vertex_matrix[:, 0], vertex_matrix[:, 1], vertex_matrix[:, 2], color=(0.2, 0.2, 0.7), opacity=0.45, mode='sphere', scale_factor=0.25)
return figure_vox, vox_verts
if __name__ == '__main__':
# For testing voxel_graph, see below and uncomment:
# path_to_dir = '/Users/johndrago/fluoro/data/Gait Updated/CR 01/Lt'
# fib_tib_pair = extract_stl_femur_tib(path_to_dir)
# fib_bin = fib_tib_pair[0]
# tib_bin = fib_tib_pair[1]
# simple_voxel_graph(fib_bin)
random_numb = np.random.randint(0, 6364)
# vox_data = h5py_multidimensional_array.variable_matrix_loader('/Users/johndrago/fluoro/data/compilation/voxels.h5py', 'vox_dset', [random_numb, random_numb + 1])
vox_file = h5py.File('/Users/johndrago/fluoro/data/compilation/voxels_pad.h5py', 'r')
vox_init = vox_file['vox_dset']
# vox_data = h5py_multidimensional_array.variable_matrix_loader('/Users/johndrago/fluoro/data/compilation/voxels.h5py', 'vox_dset', [random_numb, random_numb + 1])
simple_voxel_graph(vox_init[random_numb])
vox_file.close()
| Python |
3D | john-drago/fluoro | code/datacomp/data_augmentation.py | .py | 56,997 | 1,479 | '''
This function will perform data augmentation on our current data set. Basically, we will do small translations and rotations on our voxel dataset to increase the number of instances we are currently training with.
'''
import os
import scipy.io as sio
import skimage
import numpy as np
import trimesh
import pandas as pd
import h5py
import pickle
import psutil
import datetime
# ---------------------------------------------------------------
# Before we get started, need to first define some top-level variables, which future functions will make reference to.
top_level_dir = os.path.expanduser('~/fluoro/data')
save_dir = os.path.abspath('/Volumes/Seagate/fluoro')
# ---------------------------------------------------------------
# These are some functions, which will be useful for changing the data for the augmentation.
def Global2Local_Coord(rot_mat, trans_vector, points_in_global):
'''
func Global2Local_Coord(rot_mat, trans_vector, points_in_global)
- Takes "rotation matrix", whereby the columns form an orthonormal basis, describing the axes of the new coordinate system in terms of the global coordinate system: Should be of form 3x3. Matrix should be square and invertible.
[ e_1 e_2 e_3 ]
- Takes translation vector of size 3, which describes translation from global origin to new local origin (global origin ----> local origin).
- Takes points defined in the global coordinate frame.
- Returns positions (which were originally defined in the global coordinate frame) in new local coordinate frame.
'''
if rot_mat.shape[0] != rot_mat.shape[1]:
raise ValueError('Rotation Matrix should be square')
# elif trans_vector.shape != (3,) and trans_vector.shape != (1, 3):
# raise ValueError('Translation Matrix should be an array of size 3 or 1x3 matrix')
translated_points = points_in_global - trans_vector
points_in_local = np.transpose(np.matmul(np.linalg.inv(rot_mat), np.transpose(translated_points)))
return points_in_local
def Local2Global_Coord(rot_mat, trans_vector, points_in_local):
'''
function Local2Global_Coord(rot_mat, trans_vector, points_in_local)
- Takes "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. The matrix should be 3x3 and be invertible.
[ e_1 e_2 e_3 ]
- Takes translation vector of size 3, which describes translation from global origin to the new local origin (global origin ----> local origin).
- Takes points defined in the local coordinate frame.
- Returns positions (which were originally defined in the local coordinate frame) in the global coordinate frame.
'''
if rot_mat.shape[0] != rot_mat.shape[1]:
raise ValueError('Rotation Matrix should be square')
elif trans_vector.shape != (3,) and trans_vector.shape != (1, 3):
raise ValueError('Translation Matrix should be an array of size 3 or 1x3 matrix')
# print(rot_mat.shape)
# print(trans_vector.shape)
rotated_points = np.transpose(np.matmul(rot_mat, np.transpose(points_in_local)))
points_in_global = rotated_points + trans_vector
return points_in_global
def Basis2Angles(rot_mat):
'''
function Basis2Angles(rot_mat)
This function will take a "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. Matrix should be 3x3 and invertible.
[ e_1 e_2 e_3 ]
We are making the assumption that this rotation matrix is equivalent to three basis transformation in the follow order:
R_rot = R_z * R_y * R_x (order matters)
Returns a vector of size 3, which containes the following angles in order:
- theta, as part of rotation matrix:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
- phi
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 1 0 ]
[ -sin(phi) 0 cos(phi) ]
- psi
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(psi) cos(psi) 0 ]
[ 0 0 1 ]
'''
phi = np.arcsin(-rot_mat[2, 0])
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
if rot_mat[0, 0] / np.cos(phi) < 0:
psi = np.pi - psi
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
if rot_mat[2, 2] / np.cos(phi) < 0:
theta = np.pi - theta
rot_mat_guess = Angles2Basis([theta, phi, psi])
error = rot_mat_guess - rot_mat
epsilon = 0.000009
error_binary = (error < epsilon)
if not error_binary.all():
phi = np.pi - phi
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
if rot_mat[0, 0] / np.cos(phi) < 0:
psi = np.pi - psi
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
# if rot_mat[2, 2] / np.cos(phi) < 0:
# theta = np.pi - theta
rot_mat_guess = Angles2Basis(np.array(theta, phi, psi))
error = rot_mat_guess - rot_mat
epsilon = 0.000009
error_binary = (error < epsilon)
assert error_binary.all()
return np.array([theta, phi, psi])
def u_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.cos(angle_array[2])
def u_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.sin(angle_array[2])
def u_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return -np.sin(angle_array[1])
def v_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[2]) * np.sin(angle_array[0]) * np.sin(angle_array[1]) - np.cos(angle_array[0]) * np.sin(angle_array[2])
def v_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[2]) + np.sin(angle_array[0]) * np.sin(angle_array[1]) * np.sin(angle_array[2])
def v_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.sin(angle_array[0])
def w_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[2]) * np.sin(angle_array[1]) + np.sin(angle_array[0]) * np.sin(angle_array[2])
def w_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.sin(angle_array[1]) * np.sin(angle_array[2]) - np.cos(angle_array[2]) * np.sin(angle_array[0])
def w_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[1])
def Angles2Basis(rot_ang_array):
'''
function Angles2Basis([theta,phi,psi])
With these angles, this function will compute the orthonormal basis for the coordinate system rotation according to to the following transformations:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
We will find the rotation matrix after applying the following rotations, in order:
R_rot = R_z * R_y * R_x (order matters)
We will produce a rotation matrix of the form:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
'''
theta = rot_ang_array[0]
phi = rot_ang_array[1]
psi = rot_ang_array[2]
u_x = np.cos(phi) * np.cos(psi)
u_y = np.cos(phi) * np.sin(psi)
u_z = -np.sin(phi)
v_x = np.cos(psi) * np.sin(theta) * np.sin(phi) - np.cos(theta) * np.sin(psi)
v_y = np.cos(theta) * np.cos(psi) + np.sin(theta) * np.sin(phi) * np.sin(psi)
v_z = np.cos(phi) * np.sin(theta)
w_x = np.cos(theta) * np.cos(psi) * np.sin(phi) + np.sin(theta) * np.sin(psi)
w_y = np.cos(theta) * np.sin(phi) * np.sin(psi) - np.cos(psi) * np.sin(theta)
w_z = np.cos(theta) * np.cos(phi)
rot_mat = np.array([
[u_x, v_x, w_x],
[u_y, v_y, w_y],
[u_z, v_z, w_z]
])
return np.squeeze(rot_mat)
# ---------------------------------------------------------------
# We are first going to copy some old useful functions into this file to make them available for later calls. These functions will primarily focus on identifying where the data is located.
def generate_dict_of_acts_with_patients():
'''
This function will generate a dictionary of the different activities and all of the patients who did that activity.
The final format will be a dictionary where keys are the activities and the values are the different patients who did the activity.
Assuming that we are in path: */fluoro/data
'''
activity_list = []
pt_dict = {}
for direct1 in os.listdir(os.path.abspath(top_level_dir)):
if (direct1 != '.DS_Store') and (direct1 != 'compilation') and (direct1 != 'prediction'):
activity_list.append(direct1)
# print(direct1)
list_of_pts_for_act = []
for direct2 in os.listdir(os.path.join(top_level_dir, direct1)):
if direct2 != '.DS_Store':
list_of_pts_for_act.append(direct2)
# print(list_of_pts_for_act)
pt_dict[direct1] = list_of_pts_for_act
# print(pt_dict)
return pt_dict
def generate_dict_path_to_frames(dict_of_act_pts):
'''
This function will take a dictionary, where keys are activities and the values are the list of different patients who completed that given activity
This function will generate a dictionary with keys of path to frames and values of the list of frames at the corresponding path
Keys: path to directory where frames are
Values: list of frames
'''
path_to_frames_dict = {}
for act in dict_of_act_pts.keys():
pt_list = dict_of_act_pts[act]
for pt in pt_list:
dir_to_pt = os.path.join(top_level_dir, act, pt)
for direct3 in os.listdir(dir_to_pt):
list_of_frames = []
if direct3 != '.DS_Store' and direct3 != 'stl':
dir_to_frames = os.path.join(top_level_dir, act, pt, direct3)
# print(dir_to_frames)
for frame in os.listdir(dir_to_frames):
if (frame != '.DS_Store') and (frame != 'cali'):
list_of_frames.append(frame)
path_to_frames_dict[dir_to_frames] = list_of_frames
return path_to_frames_dict
def generate_comprehensive_list_of_frames(dict_path_to_frames):
'''
This function will take in the dictionary with the keys as paths to locations of frames and the values as the strings of the frames for a given path.
It will return a comprehensive list of all of the paths to the various frames that are stored under "~/fluoro/data".
'''
list_of_path_to_frames = []
for frme_path in dict_path_to_frames.keys():
for frame in dict_path_to_frames[frme_path]:
list_of_path_to_frames.append(os.path.join(frme_path, frame))
return sorted(list_of_path_to_frames)
# ---------------------------------------------------------------
def extract_calibration_data(path_to_cali):
'''
This function will return the R12 and V12 variables from the reg2fl***.mat file in the "data/activity/patient/laterality/cali" folder.
extract_calibration_data(path_to_cali)
input: expects a path to the directory where the cali frame is located
output: will output an array of the form: [ R12, V12 ]
'''
cali_str = 'cali'
for fle in os.listdir(os.path.join(path_to_cali, cali_str)):
if fle[0:6].lower() == 'reg2fl':
fluoro_file = sio.loadmat(os.path.join(path_to_cali, cali_str, fle))
# print(fle)
break
return [fluoro_file['R12'], fluoro_file['V12']]
def extract_image_data(path_to_frame, resize_shape=(128, 128)):
'''
This function will return the image data for a given frame from the two fluoroscopes that comprise the viewing area.
Additionally, the function will reshape the image size from standard 1024 x 1024 input to what is in the 'resize_shape' input
function: extract_image_data(path_to_frame, resize_shape=(128, 128))
input:
path_to_frame: where the frame that contains the image is located
resize_shape: what the resized image should be
output: will output an array of the form: [ image1, image 2 ]
'''
image_array = [0, 0]
for fle in os.listdir(os.path.normpath(path_to_frame)):
if fle[-4:] == '.png' and fle[0:2] == 'F1':
image_load = skimage.io.imread(os.path.join(path_to_frame, fle))
image_resize = skimage.transform.resize(image_load, resize_shape, anti_aliasing=True)
image_gray = skimage.color.rgb2gray(image_resize)
# image_array[0] = image_gray.reshape(resize_shape[0] * resize_shape[1])
image_array[0] = image_gray
# print(type(image_gray))
elif fle[-4:] == '.png' and fle[0:2] == 'F2':
image_load = skimage.io.imread(os.path.join(path_to_frame, fle))
image_resize = skimage.transform.resize(image_load, resize_shape, anti_aliasing=True)
image_gray = skimage.color.rgb2gray(image_resize)
# print(type(image_gray))
# image_array[1] = image_gray.reshape(resize_shape[0] * resize_shape[1])
image_array[1] = image_gray
# pass
return np.array(image_array)
def extract_labels_rot_trans_femur_tib_data(path_to_frame):
'''
This function will take in the path to the frame where the data for each registration has occurred. It will return the rotation matrix (converted to three angles) and translation vector for both the FEMUR and the TIBIA.
function extract_femur_tib_cup_data(path_to_frames)
input:
path to frames
output:
2x2 matrix:
[ [ R_angles of femur V_trans of femur ],
[ R_angles of tibia V_trans of tibia ] ]
'''
femur_keyword = 'Cup_RV'
tibia_keyword = 'Stem_RV'
# femur_tib_data = [[0, 0], [0, 0]]
femur_tib_data = np.zeros((2, 6))
for fle in os.listdir(os.path.normpath(path_to_frame)):
if fle[-4:] == '.mat':
results_file = sio.loadmat(os.path.join(path_to_frame, fle))
femur_data = results_file[femur_keyword]
tibia_data = results_file[tibia_keyword]
femur_rot = np.array(Basis2Angles(femur_data[0][0][0]))
femur_trans = femur_data[0][0][1]
femur_tib_data[0, 0:3] = femur_rot.reshape(3)
femur_tib_data[0, 3:6] = femur_trans.reshape(3)
tibia_rot = np.array(Basis2Angles(tibia_data[0][0][0]))
tibia_trans = tibia_data[0][0][1]
femur_tib_data[1, 0:3] = tibia_rot.reshape(3)
femur_tib_data[1, 3:6] = tibia_trans.reshape(3)
break
return femur_tib_data
def voxel_from_array(mesh_vertices, spacing=0.5, mark_origin=False, location_of_origin=np.array([0, 0, 0]), origin_value=2):
'''
This function will take in a matrix of the location of mesh vertices. It will then take the vertices and transform them into a binary voxel data set with a 1 located in the bin if a corresponding point is to be found. It will return the voxelized matrix.
input:
mesh_vertices --> expects np.array of locations of mesh vertices
spacing --> the spacing of the voxels in mm
output:
bin_mat --> a binary voxelized matrix wtih 1's corresponding to points with a corresponding vertex
'''
mesh_min_vec = np.min(mesh_vertices, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(mesh_vertices, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
mesh_min_mat = mesh_vertices - mesh_min_vec
range_vec = mesh_max_vec - mesh_min_vec
# print('range_vec:\t', range_vec)
bins_vec = np.ceil(range_vec / spacing)
# print('bins_vec:\t', bins_vec)
bin_mat = np.zeros(bins_vec.astype('int32'))
# print('bin_mat.shape:\t', bin_mat.shape)
for indx in range(mesh_vertices.shape[0]):
# print(int(np.floor(mesh_min_mat[indx, 0] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 1] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 2] / spacing)))
# print(type(int(np.floor(mesh_min_mat[indx, 0] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 1] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 2] / spacing))))
bin_mat[int(np.floor(mesh_min_mat[indx, 0] / spacing)):int(np.ceil(mesh_min_mat[indx, 0] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 1] / spacing)):int(np.ceil(mesh_min_mat[indx, 1] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 2] / spacing)):int(np.ceil(mesh_min_mat[indx, 2] / spacing)) + 1] = 1
if mark_origin:
location_of_origin = location_of_origin - mesh_min_vec
bin_mat[int(np.floor(location_of_origin[0] / spacing)), int(np.floor(location_of_origin[1] / spacing)), int(np.floor(location_of_origin[2] / spacing))] = origin_value
return bin_mat.astype('int8')
def extract_stl_to_meshpoints(mesh_obj, PTS_file, voxelize_dim=0.5, random_disp=False, random_seed=np.random.randint(low=0, high=2**32)):
'''
'''
# print('Random_seed: ', random_seed)
np.random.seed(random_seed)
PTS_file = np.array(PTS_file)
X_vec = np.array(PTS_file[0, :] - PTS_file[1, :])
Z_vec_pre = np.array(PTS_file[2, :] - PTS_file[3, :])
Y_vec = np.cross(Z_vec_pre, X_vec)
# We have to do the second cross, because we cannot a priori guarantee that the X and Z unit vectors are orthogonal. Once we generate an orthogonal Y unit vector, we can regenerate the Z unit vector based on the X and Y unit vectors to create a true orthonormal basis
Z_unit = np.cross(X_vec, Y_vec)
x_unit = X_vec / np.linalg.norm(X_vec)
y_unit = Y_vec / np.linalg.norm(Y_vec)
z_unit = Z_unit / np.linalg.norm(Z_unit)
rot_mat = np.array([
[x_unit[0], y_unit[0], z_unit[0]],
[x_unit[1], y_unit[1], z_unit[1]],
[x_unit[2], y_unit[2], z_unit[2]]])
origin_mesh_local = np.array(PTS_file[0:2, :].mean(axis=0))
verts_local_coord = Global2Local_Coord(rot_mat, origin_mesh_local, mesh_obj.vertices)
if random_disp:
return random_rotation_translation(verts_local_coord, rotation=True)
else:
return verts_local_coord
# ---------------------------------------------------------------
# These are the two main functions, which will be used for data augmentation.
def random_rotation_translation(mesh_vertices, rotation=True, translation=False):
'''
This function will take in array of the mesh vertices. It will then apply a random rotation and/or translation to the mesh_vertices. This function will output a new list array of the mesh vertices, which have been rotated and/or translated.
input:
mesh_vertices --> the input array of mesh vertices, which should be in their local coordinate frame, such that the local origin is [0,0,0].
rotation --> whether or not to apply a rotation to the dataset
translation --> whether or not to apply a random translation to the dataset
output:
updated_mesh_vertices --> the updated mesh vertices, which have been randomly transformed
'''
if rotation:
random_theta = np.random.rand(1) * 2 * np.pi
random_phi = np.random.rand(1) * 2 * np.pi
random_psi = np.random.rand(1) * 2 * np.pi
else:
random_theta = 0
random_phi = 0
random_psi = 0
rotation_angles = np.array([random_theta, random_phi, random_psi])
if translation:
random_x = np.random.rand(1) * 3
random_y = np.random.rand(1) * 3
random_z = np.random.rand(1) * 3
else:
random_x = 0
random_y = 0
random_z = 0
translation_vecs = np.array([random_x, random_y, random_z])
random_rot_mat = Angles2Basis(rotation_angles)
# print('random_rot_mat', random_rot_mat)
new_rand_positions = Local2Global_Coord(random_rot_mat, translation_vecs, mesh_vertices)
return new_rand_positions
def random_samples_selector(list_of_paths_incl_frames, numb_of_new_instances):
'''
This function will take the list of paths to the the directory where each individual frame's data is stored. It will then sample the list_of_paths to generate a new list of paths, which will include new paths to create random rotations on for data augmentation.
input:
list_of_paths_incl_frames --> list of all of the paths to where the frame data is actually held.
numb_of_new_instances --> the number of how many new instances wanted to be added to the initial data set
returns:
list_of_paths_data_aug --> this will return a new list of paths where the data can be sampled from
'''
random_sample_of_paths = np.random.choice(list_of_paths_incl_frames, numb_of_new_instances, replace=True)
list_of_paths_data_aug = list_of_paths_incl_frames + list(random_sample_of_paths)
return list_of_paths_data_aug
def determine_voxel_max_shape_from_mesh_vertices(list_of_mesh_vertices, spacing=0.5):
'''
We are anticipating getting a list of the various mesh vertices, which have been set to local coordinate system, so the origin can be expected to be at [0,0,0].
'''
max_shape_vector = np.zeros(3)
for mesh_verts in list_of_mesh_vertices:
mesh_min_vec = np.min(mesh_verts, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(mesh_verts, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
range_vec = mesh_max_vec - mesh_min_vec
bins_vec = np.ceil(range_vec / spacing)
if bins_vec[0] > max_shape_vector[0]:
max_shape_vector[0] = bins_vec[0]
if bins_vec[1] > max_shape_vector[1]:
max_shape_vector[1] = bins_vec[1]
if bins_vec[2] > max_shape_vector[2]:
max_shape_vector[2] = bins_vec[2]
return max_shape_vector
def matrix_padder_to_size(vox_mat, max_shape_vector):
pad_mat = np.zeros((3, 2))
# pad_mat = [[0, 0], [0, 0], [0, 0]]
# print('------------\n\nMatrix shape: \n', vox_mat[item].shape, '\n\n-----------', '\n\n')
# print('------------\n\nMax Shape Vector: \n', max_shape_vector, '\n\n-----------', '\n\n')
if vox_mat.shape[0] < max_shape_vector[0]:
pad_mat_0 = max_shape_vector[0] - vox_mat.shape[0]
# print('\n\npad_mat_0:\n', pad_mat_0, '\n\n')
if pad_mat_0 % 2 == 1:
pad_mat[0, 0] = pad_mat_0 // 2 + 1
pad_mat[0, 1] = pad_mat_0 // 2
else:
pad_mat[0, :] = pad_mat_0 / 2
# pad_mat[0][1] = pad_mat_0 / 2
if vox_mat.shape[1] < max_shape_vector[1]:
pad_mat_1 = max_shape_vector[1] - vox_mat.shape[1]
# print('\n\npad_mat_1:\n', pad_mat_1, '\n\n')
if pad_mat_1 % 2 == 1:
pad_mat[1, 0] = pad_mat_1 // 2 + 1
pad_mat[1, 1] = pad_mat_1 // 2
else:
pad_mat[1, :] = pad_mat_1 / 2
# pad_mat[1][1] = pad_mat_1 / 2
if vox_mat.shape[2] < max_shape_vector[2]:
pad_mat_2 = max_shape_vector[2] - vox_mat.shape[2]
# print('\n\npad_mat_2:\n', pad_mat_2, '\n\n')
if pad_mat_2 % 2 == 1:
pad_mat[2, 0] = pad_mat_2 // 2 + 1
pad_mat[2, 1] = pad_mat_2 // 2
else:
pad_mat[2, :] = pad_mat_2 / 2
# pad_mat[2][1] = pad_mat_2 / 2
# print('Pad mat: \n', pad_mat)
return np.pad(vox_mat, pad_width=pad_mat.astype(int), mode='constant')
# ---------------------------------------------------------------
# This part of the file will deal with creating functions that will store the massive amounts of data
def generate_cali_storage_mat(list_of_path_to_frames, path_to_save_dir, save_file_name='cali_aug', compression=None):
os.makedirs(path_to_save_dir, exist_ok=True)
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = len(list_of_path_to_frames) * len(list_of_bones)
calibration_data = np.zeros((total_number_of_frames, 6))
print('Calibration data shape: ', calibration_data.shape)
calibration_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
cali_dset = calibration_file.create_dataset('cali_dset', data=calibration_data, compression=compression)
ticker = 0
for frame in list_of_path_to_frames:
temp_cali_data = extract_calibration_data(os.path.abspath(os.sep.join(os.path.normpath(frame).split(os.sep)[:-1])))
temp_cali_rot = Basis2Angles(temp_cali_data[0])
temp_cali_trans = np.reshape(temp_cali_data[1], 3)
interim_cali_array = np.hstack((temp_cali_rot, temp_cali_trans))
cali_dset[2 * ticker: 2 * ticker + 2] = np.array([interim_cali_array, interim_cali_array])
ticker += 1
calibration_file.close()
calibration_data = None
return None
def generate_label_storage_mat(list_of_path_to_frames, path_to_save_dir, save_file_name='label_aug', compression=None):
os.makedirs(path_to_save_dir, exist_ok=True)
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = len(list_of_path_to_frames) * len(list_of_bones)
label_data = np.zeros((total_number_of_frames, 6))
print('Label data shape: ', label_data.shape)
label_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
label_dset = label_file.create_dataset('label_dset', data=label_data, compression=compression)
ticker = 0
for frame in list_of_path_to_frames:
label_dset[2 * ticker: 2 * ticker + 2] = extract_labels_rot_trans_femur_tib_data(os.path.abspath(frame))
ticker += 1
label_file.close()
label_data = None
return None
def generate_image_storage_mat(list_of_path_to_frames, path_to_save_dir, save_file_name='images_aug', compression=None):
os.makedirs(path_to_save_dir, exist_ok=True)
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = len(list_of_path_to_frames) * len(list_of_bones)
image_data = np.zeros((total_number_of_frames, 2, 128, 128))
print('Image data shape: ', image_data.shape)
image_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
image_dset = image_file.create_dataset('image_dset', data=image_data, compression=compression)
ticker = 0
for frame in list_of_path_to_frames:
image_dset[2 * ticker: 2 * ticker + 2] = extract_image_data(os.path.abspath(frame))
if ticker % 500 == 0:
print("Image: ", ticker)
ticker += 1
image_file.close()
image_data = None
return None
def generate_voxel_storage_mat(list_of_path_to_frames, path_to_save_dir, augmented_frames_number, save_file_name='voxels_aug', upload_set_size=35, compression='lzf', save_as_type='int8'):
process_id = psutil.Process(os.getpid())
date_time = datetime.datetime
import time
spacing = 0.5
total_number_of_frames = len(list_of_path_to_frames)
original_number_frames = total_number_of_frames - augmented_frames_number
random_seed_array = np.random.randint(low=0, high=2**32, size=augmented_frames_number * 2)
max_shape_vector = np.zeros(3)
ticker = -1
for path in list_of_path_to_frames:
load_mesh_path_time = time.time()
ticker += 1
lat = os.path.abspath(os.sep.join(os.path.normpath(os.path.expanduser(path.replace('/Users/johndrago', '~', 1))).split(os.sep)[:-1]))
lat_split = lat.split(os.sep)[-1]
return_vox_tib_fib = [0, 0]
if lat_split.lower() == 'lt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Left Femur
LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# Left Tibia
LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
if ticker >= original_number_frames:
seed_indexer = ticker - original_number_frames
# print('Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
if lat_split.lower() == 'rt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Right Femur
RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# Right Tibia
RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
if ticker >= original_number_frames:
seed_indexer = ticker - original_number_frames
# print('Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
for bone in return_vox_tib_fib:
mesh_min_vec = np.min(bone, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(bone, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
range_vec = mesh_max_vec - mesh_min_vec
bins_vec = np.ceil(range_vec / spacing)
if bins_vec[0] > max_shape_vector[0]:
max_shape_vector[0] = int(bins_vec[0])
if bins_vec[1] > max_shape_vector[1]:
max_shape_vector[1] = int(bins_vec[1])
if bins_vec[2] > max_shape_vector[2]:
max_shape_vector[2] = int(bins_vec[2])
time_to_load_mesh_path = time.time() - load_mesh_path_time
if ticker % 200 == 0:
print('----------------------------------------------------------------')
print("Voxel mesh load: ", ticker)
print('Time to load mesh path: ', round(time_to_load_mesh_path, 4), 'secs')
if ticker >= original_number_frames:
print('Random:', ticker, 'Seed: ', seed_indexer)
else:
print('Not Random: ', ticker)
print('Memory Usage: ')
print('\t', 'RSS: ', round(process_id.memory_info()[0] / 1e9, 3), 'GB')
print('\t', 'VMS: ', round(process_id.memory_info()[1] / 1e9, 3), 'GB')
vox_mat_shape = np.array([total_number_of_frames * 2, int(max_shape_vector[0]), int(max_shape_vector[1]), int(max_shape_vector[2])]).astype('int16')
# vox_mat_shape = np.array([20000, int(max_shape_vector[0]), int(max_shape_vector[1]), int(max_shape_vector[2])]).astype('int16')
print('\n' * 3)
print('----------------------------------------------------------------')
print('Voxel data shape: ', vox_mat_shape)
print('----------------------------------------------------------------')
print('\n' * 3)
vox_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
vox_dset = vox_file.create_dataset('vox_dset', shape=vox_mat_shape, dtype=save_as_type, compression=compression)
ticker1 = -1
ticker3 = -1
process_id.cpu_percent()
upload_times = []
for path_indx in range(int(np.ceil(len(list_of_path_to_frames) / upload_set_size))):
ticker1 += 1
num_sub_frames = len(list_of_path_to_frames[path_indx * upload_set_size: path_indx * upload_set_size + upload_set_size])
vox_mat_sub = np.zeros((2 * num_sub_frames, vox_mat_shape[1], vox_mat_shape[2], vox_mat_shape[3])).astype(save_as_type)
ticker2 = -1
sequence_time = time.time()
for path in list_of_path_to_frames[path_indx * upload_set_size: path_indx * upload_set_size + upload_set_size]:
ticker2 += 1
ticker3 += 1
path_upload_time = time.time()
lat = os.path.abspath(os.sep.join(os.path.normpath(os.path.expanduser(path.replace('/Users/johndrago', '~', 1))).split(os.sep)[:-1]))
lat_split = lat.split(os.sep)[-1]
return_vox_tib_fib = [0, 0]
if lat_split.lower() == 'lt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Left Femur
LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# Left Tibia
LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
if ticker3 >= original_number_frames:
seed_indexer = ticker3 - original_number_frames
# print('Random:', ticker3, 'Seed: ', seed_indexer)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random: ', ticker3)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
if lat_split.lower() == 'rt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Right Femur
RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# Right Tibia
RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
if ticker3 >= original_number_frames:
seed_indexer = ticker3 - original_number_frames
# print('Random:', ticker3, 'Seed: ', seed_indexer)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random: ', ticker3)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
vox_dset_mat_1 = voxel_from_array(return_vox_tib_fib[0], spacing=spacing, mark_origin=True)
vox_mat_sub[2 * ticker2] = matrix_padder_to_size(vox_dset_mat_1.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
vox_dset_mat_2 = voxel_from_array(return_vox_tib_fib[1], spacing=spacing, mark_origin=True)
vox_mat_sub[2 * ticker2 + 1] = matrix_padder_to_size(vox_dset_mat_2.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
return_vox_tib_fib = None
if ticker3 % upload_set_size == 0:
print('----------------------------------------------------------------')
print('Voxel: ', ticker3)
if ticker3 >= original_number_frames:
print('Random:', ticker3, 'Seed: ', seed_indexer)
else:
print('Not Random: ', ticker3)
print('Voxel pad creation time: ', round(time.time() - path_upload_time, 4), 'secs')
vox_mat_sub_time = time.time()
vox_dset[2 * path_indx * upload_set_size: 2 * path_indx * upload_set_size + 2 * num_sub_frames] = vox_mat_sub.astype(save_as_type)
vox_mat_sub_time_finish = time.time()
print('--')
print('Time to upload vox_mat_sub per instance: ', round((vox_mat_sub_time_finish - vox_mat_sub_time) / (num_sub_frames * 2), 4), 'secs')
print('Time to upload upload_size_set total: ', round((vox_mat_sub_time_finish - vox_mat_sub_time), 4), 'secs')
print('Total time to create and upload upload_size_set: ', round((vox_mat_sub_time_finish - sequence_time), 4), 'secs')
upload_times.append(vox_mat_sub_time_finish - sequence_time)
print('Average +/- SD upload times: ', round(np.mean(upload_times), 3), '+/-', round(np.std(upload_times), 3), 'secs')
if (vox_mat_sub_time_finish - sequence_time) > 400:
vox_file.close()
vox_mat_sub = None
print('\n' * 1)
print('Pausing 30 seconds')
time.sleep(30)
print('Re-opening vox_file')
vox_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'r+')
vox_dset = vox_file['vox_dset']
print('\n' * 1)
vox_mat_sub = None
print('--')
print('Memory Usage: ')
print('\t', 'RSS: ', round(process_id.memory_info()[0] / 1e9, 3), 'GB')
print('\t', 'VMS: ', round(process_id.memory_info()[1] / 1e9, 3), 'GB')
print('CPU percent for process: ', process_id.cpu_percent(), '%')
print('CPU percent per cpu: ', psutil.cpu_percent(percpu=True))
print('--')
print('Date/Time: ', date_time.now())
vox_file.close()
return None
# ---------------------------------------------------------------
# ---------------------------------------------------------------
if __name__ == '__main__':
# dict_of_acts = generate_dict_of_acts_with_patients()
# dict_of_paths = generate_dict_path_to_frames(dict_of_acts)
# list_of_frames = generate_comprehensive_list_of_frames(dict_of_paths)
# list_of_frames = sorted(list_of_frames)
# augmented_frames_number = 10000
# data_aug_list_of_frames = random_samples_selector(list_of_frames, augmented_frames_number)
# aug_dict = {}
# aug_dict['all_paths'] = data_aug_list_of_frames
# aug_dict['numb_aug_frames'] = augmented_frames_number
# aug_dict['list_of_original_frames'] = list_of_frames
# aug_file = open(os.path.join(save_dir, 'aug_data' + '.pkl'), 'wb')
# pickle.dump(aug_dict, aug_file)
# aug_file.close()
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/data/compilation')))
aug_file = open(os.path.join(save_dir, 'aug_data' + '.pkl'), 'rb')
aug_dict = pickle.load(aug_file)
data_aug_list_of_frames = aug_dict['all_paths']
augmented_frames_number = aug_dict['numb_aug_frames']
aug_file.close()
# generate_cali_storage_mat(data_aug_list_of_frames, save_dir)
# generate_label_storage_mat(data_aug_list_of_frames, save_dir)
# generate_image_storage_mat(data_aug_list_of_frames, save_dir)
generate_voxel_storage_mat(data_aug_list_of_frames, save_dir, augmented_frames_number)
# generate_voxel_storage_mat_test(data_aug_list_of_frames[:1000], save_dir, augmented_frames_number=900, save_file_name='voxels_test2')
# ---------------------------------------------------------------
# def generate_voxel_storage_mat(list_of_path_to_frames, path_to_save_dir, augmented_frames_number, save_file_name='voxels_aug', compression='lzf', save_as_type='int8'):
# import time
# spacing = 0.5
# total_number_of_frames = len(list_of_path_to_frames)
# original_number_frames = total_number_of_frames - augmented_frames_number
# random_seed_array = np.random.randint(low=0, high=2**32, size=augmented_frames_number * 2)
# max_shape_vector = np.zeros(3)
# ticker = -1
# for path in list_of_path_to_frames:
# load_mesh_path_time = time.time()
# ticker += 1
# lat = os.path.abspath(os.sep.join(os.path.normpath(path).split(os.sep)[:-1]))
# lat_split = lat.split(os.sep)[-1]
# return_vox_tib_fib = [0, 0]
# if lat_split.lower() == 'lt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Left Femur
# LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
# LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# # Left Tibia
# LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
# LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
# if ticker >= original_number_frames:
# seed_indexer = ticker - original_number_frames
# # print('Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# # print('Not Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
# if lat_split.lower() == 'rt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Right Femur
# RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
# RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# # Right Tibia
# RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
# RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
# if ticker >= original_number_frames:
# seed_indexer = ticker - original_number_frames
# # print('Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# # print('Not Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
# for bone in return_vox_tib_fib:
# mesh_min_vec = np.min(bone, axis=0)
# mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
# mesh_max_vec = np.max(bone, axis=0)
# mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
# range_vec = mesh_max_vec - mesh_min_vec
# bins_vec = np.ceil(range_vec / spacing)
# if bins_vec[0] > max_shape_vector[0]:
# max_shape_vector[0] = int(bins_vec[0])
# if bins_vec[1] > max_shape_vector[1]:
# max_shape_vector[1] = int(bins_vec[1])
# if bins_vec[2] > max_shape_vector[2]:
# max_shape_vector[2] = int(bins_vec[2])
# time_to_load_mesh_path = time.time() - load_mesh_path_time
# if ticker % 200 == 0:
# print('\n')
# print("Voxel mesh load: ", ticker)
# print('Time to load mesh path: ', time_to_load_mesh_path)
# if ticker >= original_number_frames:
# print('Random')
# else:
# print('Not Random')
# vox_mat_shape = np.array([total_number_of_frames * 2, int(max_shape_vector[0]), int(max_shape_vector[1]), int(max_shape_vector[2])]).astype('int16')
# print('Voxel data shape: ', vox_mat_shape)
# vox_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
# vox_dset = vox_file.create_dataset('vox_dset', shape=vox_mat_shape, dtype=save_as_type, compression=None)
# ticker2 = -1
# for path in list_of_path_to_frames:
# time_to_vox_pad = time.time()
# ticker2 += 1
# lat = os.path.abspath(os.sep.join(os.path.normpath(path).split(os.sep)[:-1]))
# lat_split = lat.split(os.sep)[-1]
# return_vox_tib_fib = [0, 0]
# if lat_split.lower() == 'lt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Left Femur
# LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
# LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# # Left Tibia
# LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
# LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
# if ticker2 >= original_number_frames:
# seed_indexer = ticker2 - original_number_frames
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
# if lat_split.lower() == 'rt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Right Femur
# RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
# RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# # Right Tibia
# RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
# RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
# if ticker2 >= original_number_frames:
# seed_indexer = ticker2 - original_number_frames
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
# time_vox_mat = time.time()
# vox_dset_mat_1 = voxel_from_array(return_vox_tib_fib[0], spacing=spacing, mark_origin=True)
# print('Make voxel array 1: ', time.time() - time_vox_mat)
# vox_dset[2 * ticker2] = matrix_padder_to_size(vox_dset_mat_1.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
# print('Upload voxel array 1: ', time.time() - time_vox_mat)
# time_vox_mat = time.time()
# vox_dset_mat_2 = voxel_from_array(return_vox_tib_fib[1], spacing=spacing, mark_origin=True)
# print('Make voxel array 2: ', time.time() - time_vox_mat)
# vox_dset[2 * ticker2 + 1] = matrix_padder_to_size(vox_dset_mat_2.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
# print('Upload voxel array 2: ', time.time() - time_vox_mat)
# print('\n')
# return_vox_tib_fib = [0, 0]
# time_to_vox_pad_load = time.time() - time_to_vox_pad
# if ticker2 % 20 == 0:
# print("Voxel: ", ticker2)
# print('Time to vox pad load: ', time_to_vox_pad_load)
# vox_file.close()
# return None
| Python |
3D | john-drago/fluoro | code/datacomp/mesh_voxelization.py | .py | 6,116 | 165 | '''
This file will be used to generate voxel data sets from the meshes by voxelizing the vertices.
The high level overview is that we need to supply a list of directory paths to where the stl files are housed. We will then extract the vertices data, and we will create voxels in this file.
'''
import os
from coord_change import Global2Local_Coord
import scipy.io as sio
import skimage
import numpy as np
import trimesh
import pandas as pd
import sys
from scipy import ndimage
import h5py
import pickle
def stl_load_to_vertex_array(path_to_stl, bone):
'''
This function will take the path to the frame location according to laterality, and it will return a mesh object loaded according to what laterality and patient has been specified in path.
input:
path_to_stl --> the path to specific laterality ~/data/activity/patient/laterality
bone --> either 'Tibia' or 'Femur' to specify which bone to load
output:
mesh --> returns a trimesh mesh object that has been loaded
'''
last_path_split = path_to_stl.split(os.sep)[-1]
new_path = os.path.join(os.path.normpath(path_to_stl[:-3]), 'stl')
if last_path_split.lower() == 'lt':
if bone.lower() == 'femur':
mesh = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
elif bone.lower() == 'tibia':
mesh = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
elif last_path_split.lower() == 'rt':
if bone.lower() == 'femur':
mesh = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
elif bone.lower() == 'tibia':
mesh = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
return mesh
def voxel_from_array(mesh_vertices, spacing=0.5):
'''
This function will take in a matrix of the location of mesh vertices. It will then take the vertices and transform them into a binary voxel data set with a 1 located in the bin if a corresponding point is to be found. It will return the voxelized matrix.
input:
mesh_vertices --> expects np.array of locations of mesh vertices
spacing --> the spacing of the voxels in mm
output:
bin_mat --> a binary voxelized matrix wtih 1's corresponding to points with a corresponding vertex
'''
mesh_min_vec = np.min(mesh_vertices, axis=0)
mesh_min_mat = mesh_vertices - mesh_min_vec
range_vec = mesh_vertices.max(axis=0) - mesh_vertices.min(axis=0)
bins_vec = np.ceil(range_vec / spacing)
bin_mat = np.zeros(bins_vec.astype('int32') + 2)
for indx in range(mesh_vertices.shape[0]):
# print(int(np.floor(mesh_min_mat[indx, 0] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 1] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 2] / spacing)))
# print(type(int(np.floor(mesh_min_mat[indx, 0] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 1] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 2] / spacing))))
bin_mat[int(np.floor(mesh_min_mat[indx, 0] / spacing)):int(np.ceil(mesh_min_mat[indx, 0] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 1] / spacing)):int(np.ceil(mesh_min_mat[indx, 1] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 2] / spacing)):int(np.ceil(mesh_min_mat[indx, 2] / spacing)) + 1] = 1
return bin_mat.astype('int8')
def extract_stl_to_voxel(mesh_obj, PTS_file, voxelize_dim=0.5):
'''
In sum, this function will:
1) take an STL file loaded as a mesh object and take the PTS file loaded as a pandas object
2) using the PTS file, determine local coordinate frame and shift STL point cloud to new local coordinate frame
3) voxelize the vertices of the point cloud to binary, depending on if a vertex would be in the corresponding voxel
4) return an array of both 3D voxel models for loaded model
function extract_stl_to_voxel(path_to_frame, voxelize_dim=0.5)
input:
mesh_obj --> loaded trimesh mesh object (stl file)
PTS_file --> loaded pandas
voxelize_dim --> the scale of creating new voxel map
output:
3D binary voxel model as an array
This function assumes it will be passed the loaded trimesh mesh as an argument. This function will produce an a NumPy array of the binary voxel data.
In doing so, this function will also translate the points in the stl file to the local coordinate frame defined through the PTS files:
PTS file: ---> defines the new X and Z directions of the local coordinate system
X Coordinate system: From PTS row 1 to PTS row 0
Z Coordinate system: From PTS row 3 to PTS row 2
The origin of the new coordinate system is defined to be halfway between the two anatomical points, which demarcate the x-axis.
From these two coordinates, we can determine the Y axis, via the cross product of the unit vectors: Y = cross(z,x)
'''
PTS_file = np.array(PTS_file)
X_vec = np.array(PTS_file[0, :] - PTS_file[1, :])
Z_vec_pre = np.array(PTS_file[2, :] - PTS_file[3, :])
Y_vec = np.cross(Z_vec_pre, X_vec)
# We have to do the second cross, because we cannot a priori guarantee that the X and Z unit vectors are orthogonal. Once we generate an orthogonal Y unit vector, we can regenerate the Z unit vector based on the X and Y unit vectors to create a true orthonormal basis
LFemur_Z = np.cross(X_vec, Y_vec)
x_unit = X_vec / np.linalg.norm(X_vec)
y_unit = Y_vec / np.linalg.norm(Y_vec)
z_unit = LFemur_Z / np.linalg.norm(LFemur_Z)
rot_mat = np.array([
[x_unit[0], y_unit[0], z_unit[0]],
[x_unit[1], y_unit[1], z_unit[1]],
[x_unit[2], y_unit[2], z_unit[2]]])
origin_mesh_local = np.array(PTS_file[0:2, :].mean(axis=0))
verts_local_coord = Global2Local_Coord(rot_mat, origin_mesh_local, mesh_obj.vertices)
bin_mat = voxel_from_array(verts_local_coord, spacing=voxelize_dim)
return bin_mat.astype('int8')
if __name__ == '__main__':
dir_file = open('vox_fluoro_hist_objects.pkl', 'rb')
dir_dict = pickle.load(dir_file)
dir_file.close()
| Python |
3D | john-drago/fluoro | code/scratch/unsup_segmentation_draft.py | .py | 6,619 | 136 | import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
import keras
import sys
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/scratch/unsup_seg'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp(number_of_samples=None):
# vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
# vox_init = vox_file['vox_dset']
# vox_mat = vox_init[:number_of_samples]
# vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:number_of_samples]
image_file.close()
# label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
# label_init = label_file['labels_dset']
# label_mat = label_init[:number_of_samples]
# label_file.close()
# cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
# cali_init = cali_file['cali_len3_rot']
# cali_mat = cali_init[:number_of_samples]
# cali_file.close()
image_train_cum, image_test = train_test_split(image_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val = train_test_split(image_train_cum, shuffle=True, test_size=0.2, random_state=42)
print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return image_train_sub, image_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
channel_order = 'channels_last'
input_size = (128, 128, 1)
# vox_input_shape = (198, 162, 564, 1)
# cali_input_shape = (6,)
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
# model.summary()
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# model.compile(optimizer=params['model_opt'], loss=params['model_loss'], metrics=[params['model_metric']])
# vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp(2000)
# result = model.fit(x={'input_vox': np.expand_dims(vox_train_sub, axis=-1), 'input_fluoro_1': np.expand_dims(image_train_sub[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_train_sub[:, 1, :, :], axis=-1), 'input_cali': cali_train_sub}, y=label_train_sub, validation_data=([np.expand_dims(vox_val, axis=-1), np.expand_dims(image_val[:, 0, :, :], axis=-1), np.expand_dims(image_val[:, 1, :, :], axis=-1), cali_val], label_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
# model.save(os.path.join(os.getcwd(), 'test_model_save.h5'))
| Python |
3D | john-drago/fluoro | code/scratch/graphical_rotation_of_frame.py | .py | 1,203 | 41 | import numpy as np
from numpy import *
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
####################################################
# This part is just for reference if
# you are interested where the data is
# coming from
# The plot is at the bottom
#####################################################
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(0, 0, 0, c='black', s=50)
x = Arrow3D([0, 1], [0, 0], [0, 0], color=(0.5, 0, 0))
ax.add_artist(x)
y = Arrow3D([0, 0], [0, 1], [0, 0], color=(0, 0.5, 0))
ax.add_artist(y)
z = Arrow3D([0, 0], [0, 0], [0, 1], color=(0, 0, 0.5))
ax.add_artist(z)
plt.show()
| Python |
3D | john-drago/fluoro | code/scratch/unsup_segmentation.py | .py | 24,536 | 483 | import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import sys
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
expr_name = sys.argv[0][:-3]
expr_no = '2'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/scratch/unsup_seg'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp(number_of_samples=None):
# vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
# vox_init = vox_file['vox_dset']
# vox_mat = vox_init[:number_of_samples]
# vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:number_of_samples]
image_file.close()
# label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
# label_init = label_file['labels_dset']
# label_mat = label_init[:number_of_samples]
# label_file.close()
# cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
# cali_init = cali_file['cali_len3_rot']
# cali_mat = cali_init[:number_of_samples]
# cali_file.close()
image_train_cum, image_test = train_test_split(image_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val = train_test_split(image_train_cum, shuffle=True, test_size=0.2, random_state=42)
print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return image_train_sub, image_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
class KMeansLayer(keras.layers.Layer):
def __init__(self, clusters=8, n_init=5, trainable=False, **kwargs):
super(KMeansLayer, self).__init__(**kwargs)
self.clusters = clusters
self.n_init = n_init
def build(self, input_shape):
# self.input_shape = input_shape
# print(input_shape[0])
self.output_s = (input_shape[0],input_shape[1], input_shape[2],1)
self.depth = input_shape[3]
self.built=True
def call(self, inputs):
output=tf.Variable(initial_value=tf.keras.backend.random_uniform(shape=(6,128,128,1)),dtype=tf.float32,trainable=False,validate_shape=False)
# output=tf.Variable(initial_value=tf.keras.backend.random_uniform(shape=tf.convert_to_tensor(inputs.get_shape()[0],inputs.get_shape()[1],inputs.get_shape()[2],1)),dtype=tf.float32)
def KMeansFunc(input_tens,clusters=self.clusters,n_init=self.n_init):
base_mat = np.zeros((input_tens.shape[0],input_tens.shape[1],input_tens.shape[2]))
for frame in range(input_tens.shape[0]):
init_mat = np.zeros((input_tens.shape[1]*input_tens.shape[2]))
print(init_mat.shape)
reshape_mat = np.reshape(input_tens[frame],(input_tens.shape[1]*input_tens.shape[2],input_tens.shape[3]))
print(reshape_mat.shape)
kmeans_init = KMeans(n_clusters=clusters, n_init=n_init)
class_pred = kmeans_init.fit_predict(reshape_mat)
for clust in range(8):
init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
print(base_mat.shape)
base_mat[frame]=np.reshape(init_mat,(input_tens.shape[1],input_tens.shape[2]))
return np.expand_dims(base_mat,axis=-1).astype('float32')
output = tf.py_func(KMeansFunc,[inputs],tf.float32) + self.kernel-self.kernel
# output=tf.placeholder(shape=(inputs.get_shape()[0],inputs.get_shape()[1],inputs.get_shape()[2],1),dtype=tf.float32)
return output
def compute_output_shape(self, input_shape):
return self.output_s
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
# vox_input_shape = (198, 162, 564, 1)
# cali_input_shape = (6,)
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
params = {
# 2D CONV
'conv_1_1_filters': 64,
'conv_1_1_kernel': 5,
'conv_1_1_strides': 1,
'conv_1_1_pad': 'same',
'spatial_drop_rate_1_1': 0.5,
'conv_1_2_filters': 64,
'conv_1_2_kernel': 3,
'conv_1_2_strides': 1,
'conv_1_2_pad': 'same',
'spatial_drop_rate_1_2': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_1_filters': 128,
'conv_2_1_kernel': 3,
'conv_2_1_strides': 1,
'conv_2_1_pad': 'same',
'spatial_drop_rate_2_1': 0.5,
'conv_2_2_filters': 128,
'conv_2_2_kernel': 3,
'conv_2_2_strides': 1,
'conv_2_2_pad': 'same',
'spatial_drop_rate_2_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_1_filters': 256,
'conv_3_1_kernel': 3,
'conv_3_1_strides': 1,
'conv_3_1_pad': 'same',
'spatial_drop_rate_3_1': 0.5,
'conv_3_2_filters': 256,
'conv_3_2_kernel': 3,
'conv_3_2_strides': 1,
'conv_3_2_pad': 'same',
'spatial_drop_rate_3_2': 0.5,
'pool_3_size': 2,
'pool_3_pad': 'same',
'conv_4_1_filters': 512,
'conv_4_1_kernel': 3,
'conv_4_1_strides': 1,
'conv_4_1_pad': 'same',
'spatial_drop_rate_4_1': 0.5,
'conv_4_2_filters': 512,
'conv_4_2_kernel': 3,
'conv_4_2_strides': 1,
'conv_4_2_pad': 'same',
'spatial_drop_rate_4_2': 0.5,
'pool_4_size': 2,
'pool_4_pad': 'same',
'conv_5_1_filters': 1024,
'conv_5_1_kernel': 3,
'conv_5_1_strides': 1,
'conv_5_1_pad': 'same',
'conv_5_2_filters': 1024,
'conv_5_2_kernel': 3,
'conv_5_2_strides': 1,
'conv_5_2_pad': 'same',
'up_conv_1_filters': 512,
'up_conv_1_kernel': 2,
'up_conv_1_strides': 1,
'up_conv_1_pad': 'same',
'up_1_size': 2,
'up_1_int': 'bilinear',
'conv_6_1_filters': 512,
'conv_6_1_kernel': 3,
'conv_6_1_strides': 1,
'conv_6_1_pad': 'same',
'conv_6_2_filters': 512,
'conv_6_2_kernel': 3,
'conv_6_2_strides': 1,
'conv_6_2_pad': 'same',
'up_conv_2_filters': 256,
'up_conv_2_kernel': 2,
'up_conv_2_strides': 1,
'up_conv_2_pad': 'same',
'up_2_size': 2,
'up_2_int': 'bilinear',
'conv_7_1_filters': 256,
'conv_7_1_kernel': 3,
'conv_7_1_strides': 1,
'conv_7_1_pad': 'same',
'conv_7_2_filters': 256,
'conv_7_2_kernel': 3,
'conv_7_2_strides': 1,
'conv_7_2_pad': 'same',
'up_conv_3_filters': 128,
'up_conv_3_kernel': 2,
'up_conv_3_strides': 1,
'up_conv_3_pad': 'same',
'up_3_size': 2,
'up_3_int': 'bilinear',
'conv_8_1_filters': 128,
'conv_8_1_kernel': 3,
'conv_8_1_strides': 1,
'conv_8_1_pad': 'same',
'conv_8_2_filters': 128,
'conv_8_2_kernel': 3,
'conv_8_2_strides': 1,
'conv_8_2_pad': 'same',
'up_conv_4_filters': 64,
'up_conv_4_kernel': 2,
'up_conv_4_strides': 1,
'up_conv_4_pad': 'same',
'up_4_size': 2,
'up_4_int': 'bilinear',
'conv_9_1_filters': 64,
'conv_9_1_kernel': 3,
'conv_9_1_strides': 1,
'conv_9_1_pad': 'same',
'conv_9_2_filters': 64,
'conv_9_2_kernel': 64,
'conv_9_2_strides': 1,
'conv_9_2_pad': 'same',
'conv_k_1_filters': 20,
'conv_k_1_kernel': 3,
'conv_k_1_strides': 1,
'conv_k_1_pad': 'same',
'conv_k_2_filters': 3,
'conv_k_2_kernel': 1,
'conv_k_2_strides': 1,
'conv_k_2_pad': 'same',
# General Housekeeping
'regularizer_l1': 0.1,
'regularizer_l2': 0.25,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': keras.optimizers.RMSprop(),
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mse'
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_fluoro_1 = keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
# -----------------------------------------------------------------
# d
bn_1 = keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=params['conv_1_1_filters'], kernel_size=params['conv_1_1_kernel'], strides=params['conv_1_1_strides'], padding=params['conv_1_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(bn_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1_1'])(conv_1_1)
conv_1_2 = keras.layers.Conv2D(filters=params['conv_1_2_filters'], kernel_size=params['conv_1_2_kernel'], strides=params['conv_1_2_strides'], padding=params['conv_1_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_1_1)
spat_1_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1_2'])(conv_1_2)
pool_1 = keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
conv_2_1 = keras.layers.SeparableConv2D(filters=params['conv_2_1_filters'], kernel_size=params['conv_2_1_kernel'], strides=params['conv_2_1_strides'], padding=params['conv_2_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_1)
spat_2_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2_1'])(conv_2_1)
conv_2_2 = keras.layers.SeparableConv2D(filters=params['conv_2_2_filters'], kernel_size=params['conv_2_2_kernel'], strides=params['conv_2_2_strides'], padding=params['conv_2_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_2_1)
spat_2_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2_2'])(conv_2_2)
pool_2 = keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
conv_3_1 = keras.layers.SeparableConv2D(filters=params['conv_3_1_filters'], kernel_size=params['conv_3_1_kernel'], strides=params['conv_3_1_strides'], padding=params['conv_3_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_2)
spat_3_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3_1'])(conv_3_1)
conv_3_2 = keras.layers.SeparableConv2D(filters=params['conv_3_2_filters'], kernel_size=params['conv_3_2_kernel'], strides=params['conv_3_2_strides'], padding=params['conv_3_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_3_1)
spat_3_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3_2'])(conv_3_2)
pool_3 = keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(spat_3_2)
conv_4_1 = keras.layers.SeparableConv2D(filters=params['conv_4_1_filters'], kernel_size=params['conv_4_1_kernel'], strides=params['conv_4_1_strides'], padding=params['conv_4_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_3)
spat_4_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4_1'])(conv_4_1)
conv_4_2 = keras.layers.SeparableConv2D(filters=params['conv_4_2_filters'], kernel_size=params['conv_4_2_kernel'], strides=params['conv_4_2_strides'], padding=params['conv_4_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_4_1)
spat_4_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4_2'])(conv_4_2)
pool_4 = keras.layers.MaxPooling2D(pool_size=params['pool_4_size'], padding=params['pool_4_pad'], data_format=channel_order)(spat_4_2)
conv_5_1 = keras.layers.SeparableConv2D(filters=params['conv_5_1_filters'], kernel_size=params['conv_5_1_kernel'], strides=params['conv_5_1_strides'], padding=params['conv_5_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_4)
conv_5_2 = keras.layers.SeparableConv2D(filters=params['conv_5_2_filters'], kernel_size=params['conv_5_2_kernel'], strides=params['conv_5_2_strides'], padding=params['conv_5_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_5_1)
up_conv_1 = keras.layers.SeparableConv2D(filters=params['up_conv_1_filters'], kernel_size=params['up_conv_1_kernel'], strides=params['up_conv_1_strides'], padding=params['up_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_5_2)
up_1 = keras.layers.UpSampling2D(size=(params['up_1_size'], params['up_1_size']), interpolation=params['up_1_int'])(up_conv_1)
conv_6_1 = keras.layers.SeparableConv2D(filters=params['conv_6_1_filters'], kernel_size=params['conv_6_1_kernel'], strides=params['conv_6_1_strides'], padding=params['conv_6_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_1)
conv_6_2 = keras.layers.SeparableConv2D(filters=params['conv_6_2_filters'], kernel_size=params['conv_6_2_kernel'], strides=params['conv_6_2_strides'], padding=params['conv_6_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_6_1)
up_conv_2 = keras.layers.SeparableConv2D(filters=params['up_conv_2_filters'], kernel_size=params['up_conv_2_kernel'], strides=params['up_conv_2_strides'], padding=params['up_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_6_2)
up_2 = keras.layers.UpSampling2D(size=(params['up_2_size'], params['up_2_size']), interpolation=params['up_2_int'])(up_conv_2)
conv_7_1 = keras.layers.SeparableConv2D(filters=params['conv_7_1_filters'], kernel_size=params['conv_7_1_kernel'], strides=params['conv_7_1_strides'], padding=params['conv_7_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_2)
conv_7_2 = keras.layers.SeparableConv2D(filters=params['conv_7_2_filters'], kernel_size=params['conv_7_2_kernel'], strides=params['conv_7_2_strides'], padding=params['conv_7_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_7_1)
up_conv_3 = keras.layers.SeparableConv2D(filters=params['up_conv_3_filters'], kernel_size=params['up_conv_3_kernel'], strides=params['up_conv_3_strides'], padding=params['up_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_7_2)
up_3 = keras.layers.UpSampling2D(size=(params['up_3_size'], params['up_3_size']), interpolation=params['up_3_int'])(up_conv_3)
conv_8_1 = keras.layers.SeparableConv2D(filters=params['conv_8_1_filters'], kernel_size=params['conv_8_1_kernel'], strides=params['conv_8_1_strides'], padding=params['conv_8_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_3)
conv_8_2 = keras.layers.SeparableConv2D(filters=params['conv_8_2_filters'], kernel_size=params['conv_8_2_kernel'], strides=params['conv_8_2_strides'], padding=params['conv_8_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_8_1)
up_conv_4 = keras.layers.SeparableConv2D(filters=params['up_conv_4_filters'], kernel_size=params['up_conv_4_kernel'], strides=params['up_conv_2_strides'], padding=params['up_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_8_2)
up_4 = keras.layers.UpSampling2D(size=(params['up_4_size'], params['up_4_size']), interpolation=params['up_4_int'])(up_conv_4)
conv_9_1 = keras.layers.SeparableConv2D(filters=params['conv_9_1_filters'], kernel_size=params['conv_9_1_kernel'], strides=params['conv_9_1_strides'], padding=params['conv_9_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_4)
conv_9_2 = keras.layers.SeparableConv2D(filters=params['conv_9_2_filters'], kernel_size=params['conv_9_2_kernel'], strides=params['conv_9_2_strides'], padding=params['conv_9_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_9_1)
conv_k_1 = keras.layers.SeparableConv2D(filters=params['conv_k_1_filters'], kernel_size=params['conv_k_1_kernel'], strides=params['conv_k_1_strides'], padding=params['conv_k_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_9_2)
# kmeans_out = KMeansLayer(clusters=8,n_init=5)(conv_k_1)
# kmeans_out.trainable=False
conv_k_2 = keras.layers.SeparableConv2D(filters=params['conv_k_2_filters'], kernel_size=params['conv_k_2_kernel'], strides=params['conv_k_2_strides'], padding=params['conv_k_2_pad'], data_format=channel_order, activation='linear', kernel_initializer=params['kern_init'])(conv_k_1)
kmeans_out = keras.layers.Lambda(function=KMeansFunc)(conv_k_2)
# kmeans_out = keras.layers.SeparableConv2D(filters=1, kernel_size=1, strides=1, padding='same', data_format=channel_order, activation='linear', kernel_initializer=params['kern_init'], use_bias=False)(conv_k_2)
# def KMeansFunc(x):
# # batch_mat = np.zeros((x.shape[0],x.shape[1],x.shape[2]))
# def inner_fn(x, clusters=8,n_init=5):
# batch_mat = np.zeros((x.shape[0],x.shape[1],x.shape[2]))
# for frame in range(keras.backend.shape(x)[0]):
# input_mat = x[frame]
# init_mat = np.zeros((input_mat.shape[0]*input_mat.shape[1]))
# kmeans_init = KMeans(n_clusters=clusters,n_init=n_init)
# reshape_mat = np.reshape(input_mat,(input_mat.shape[0]*input_mat.shape[1],input_mat.shape[2]))
# class_pred = kmeans_init.fit_predict(reshape_mat)
# for clust in range(clusters):
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
# batch_mat[frame] = np.reshape(init_mat,(input_mat.shape[0],input_mat.shape[1])).astype(np.float32)
# batch_mat = np.expand_dims(batch_mat,axis=-1)
# return batch_mat.astype(np.float32)
# return tf.py_func(inner_fn,[x],tf.float32)
# def KMeansFunc_outputshape(input_shape):
# return (input_shape[0],input_shape[1], input_shape[2],1)
# kmeans_out = keras.layers.Lambda(KMeansFunc)(conv_k_2)
# print(dir(kmeans_out))
# print(kmeans_out.graph)
# -----------------------------------------------------------------
# Main Output
# main_output = keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(conv_k_1)
# -----------------------------------------------------------------
# Model Housekeeping
model = keras.Model(inputs=[input_fluoro_1], outputs=kmeans_out)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.compile(optimizer=params['model_opt'], loss=params['model_loss'], metrics=[params['model_metric']])
# image_train_sub, image_val = data_comp(200)
# result = model.fit(x={'input_vox': np.expand_dims(vox_train_sub, axis=-1), 'input_fluoro_1': np.expand_dims(image_train_sub[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_train_sub[:, 1, :, :], axis=-1), 'input_cali': cali_train_sub}, y=label_train_sub, validation_data=([np.expand_dims(vox_val, axis=-1), np.expand_dims(image_val[:, 0, :, :], axis=-1), np.expand_dims(image_val[:, 1, :, :], axis=-1), cali_val], label_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
# model.save(os.path.join(os.getcwd(), 'test_model_save.h5'))
# def KMeansFunc(input_tens,clusters=8,n_init=5):
# base_mat = np.zeros((1,128,128,1))
# global xaaa
# xaaa = 0
# def KMeans_base(input_tens,base_mat=base_mat):
# global xaaa
# xaaa +=1
# init_mat = np.zeros((input_tens.shape[0]*input_tens.shape[1]))
# print(init_mat.shape)
# reshape_mat = np.reshape(input_tens[frame],(input_tens.shape[0]*input_tens.shape[1],input_tens.shape[2]))
# print(reshape_mat.shape)
# kmeans_init = KMeans(n_clusters=clusters, n_init=n_init)
# class_pred = kmeans_init.fit_predict(reshape_mat)
# for clust in range(8):
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
# print(base_mat.shape)
# base_mat[frame]=np.reshape(init_mat,(input_tens.shape[1],input_tens.shape[2]))
# return np.expand_dims(base_mat,axis=-1).astype('float32')
# class KMeansLayer(keras.layers.Layer):
# def __init__(self, clusters, n_init, trainable=False, **kwargs):
# super(KMeansLayer, self).__init__(**kwargs)
# self.clusters = clusters
# self.n_init = n_init
# def build(self, input_shape):
# # self.input_shape = input_shape
# input_shape = input_shape
# self.output_s = (input_shape[0],input_shape[1], input_shape[2],1)
# self.depth = input_shape[3]
# super(KMeansLayer, self).build(input_shape)
# def call(self, inputs, **kwargs):
# def KMeansFunc(input_tens):
# batch_mat = np.zeros((input_tens.shape[0],input_tens.shape[1],input_tens.shape[2]))
# for frame in range(input_tens.shape[0]):
# input_mat = input_tens[frame]
# init_mat = np.zeros((input_mat.shape[0]*input_mat.shape[1]))
# kmeans_init = KMeans(n_clusters=self.clusters,n_init=self.n_init)
# reshape_mat = np.reshape(input_mat,(input_mat.shape[0]*input_mat.shape[1],input_mat.shape[2]))
# class_pred = kmeans_init.fit_predict(reshape_mat)
# for clust in range(clusters):
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
# batch_mat[frame] = np.reshape(init_mat,(input_mat.shape[0],input_mat.shape[1])).astype(np.float32)
# batch_mat = np.expand_dims(batch_mat,axis=-1)
# return tf.convert_to_tensor(batch_mat.astype(np.float32))
# return tf.py_func(KMeansFunc,[inputs],tf.float32)
# def compute_output_shape(self, input_shape):
# return self.output_s | Python |
3D | aAbdz/CylShapeDecomposition | CSD/coord_conv.py | .py | 267 | 15 | # -*- coding: utf-8 -*-
import numpy as np
def cart2pol(x,y):
rho=np.sqrt(x**2+y**2)
phi=np.arctan2(y,x)
phi=phi*(180/np.pi)
return(rho,phi)
def pol2cart(rho, phi):
phi=phi*(np.pi/180)
x=rho*np.cos(phi)
y=rho*np.sin(phi)
return(x,y) | Python |
3D | aAbdz/CylShapeDecomposition | CSD/skeleton_decomposition.py | .py | 11,339 | 376 | # -*- coding: utf-8 -*-
import numpy as np
from skeleton3D import get_line_length
import collections
def skeleton_main_branch(skel):
main_skeletons = []
n_branch = len(skel)
nodes_coord, branch_length = skeleton_info(skel)
longest_branch = np.argmax(np.array(branch_length))
graph = form_graph(nodes_coord)
graph_e = {i: [i*2, i*2+1] for i in range(n_branch)}
graph_v = {i: i//2 for i in range(2*n_branch)}
graph = rearrange_graph(graph, graph_v, graph_e)
dec_junction = [i for i in graph if len(graph[i])>2]
dec_leaf = set()
while len(graph)!=0:
path = set()
obj = {}
for i in range(n_branch):
if i not in graph_e:
branch_length[i] = -np.inf
path0 = np.argmax(np.array(branch_length))
path.add(path0)
root1, root2 = graph_e[path0]
path1, dec_leaf1 = path_to_leaf(graph, root1, root2, graph_v, graph_e, skel, nodes_coord)
path2, dec_leaf2 = path_to_leaf(graph, root2, root1, graph_v, graph_e, skel, nodes_coord)
path.update(set(path1)); path.update(set(path2))
dec_leaf.update(set(dec_leaf1)); dec_leaf.update(set(dec_leaf2))
dec_nodes = detect_decomposing_nodes(path, dec_junction, list(dec_leaf), graph_e)
graph, rgraph, rgraph_e, rgraph_v = update_graphs(graph, graph_v, graph_e, path)
if cyclic_graph(rgraph, rgraph_v, rgraph_e) == False:
parametrized_skel = skeleton_parametrization(skel, rgraph, rgraph_v, rgraph_e, nodes_coord)
coord_dec_nodes = pair_dec_nodes(rgraph, dec_nodes, nodes_coord)
obj['skeleton'] = parametrized_skel
obj['dec_nodes'] = coord_dec_nodes
if len(path) > 1:
main_skeletons.append(obj)
elif (len(path) == 1) & (list(path)[0] == longest_branch):
main_skeletons.append(obj)
return main_skeletons
def cyclic_graph(g, rgraph_v, rgraph_e):
Flag = False
for i in g:
for neighbour in g[i]:
if len(set(rgraph_e[rgraph_v[neighbour]]) & set(g[i])) == 2:
Flag = True
return Flag
def pair_dec_nodes(g, dec_nodes, nodes_coord):
p, visited = [], set()
leaf = [i for i in g if len(g[i])==1]
for i in dec_nodes:
if i not in visited:
visited.add(i)
if i in leaf:
p.append([i])
elif len(set(g[i]) & set(dec_nodes)) == 0:
p.append([i])
else:
for neighbour in g[i]:
if neighbour in dec_nodes:
if neighbour not in visited:
p.append([i, neighbour])
visited.add(neighbour)
coord_dec_nodes = [[nodes_coord[j] for j in i] for i in p]
return coord_dec_nodes
def skeleton_parametrization(skel, n_graph, graph_v, graph_e, nodes_coord):
parametrized_skel = np.empty((0,3))
visited = set()
leaf = [i for i in n_graph if len(n_graph[i])==1]
st_node = leaf.pop()
visited.add(st_node)
branch = graph_v[st_node]
o_branch = order_branch(skel[branch], nodes_coord[st_node], 'ascend')
parametrized_skel = np.append(parametrized_skel, o_branch, axis=0)
while st_node not in leaf:
for neighbour in n_graph[st_node]:
if neighbour not in visited:
visited.add(neighbour)
st_node = neighbour
if neighbour not in graph_e[branch]:
branch = graph_v[st_node]
o_branch = order_branch(skel[branch], nodes_coord[st_node], 'ascend')
parametrized_skel = np.append(parametrized_skel, o_branch, axis=0)
return parametrized_skel
def detect_decomposing_nodes(path, dec_junction, dec_leaf, graph_e):
dec_nodes = set()
for i in path:
intersect_j = set(graph_e[i]) & set(dec_junction); dec_nodes.update(intersect_j)
intersect_l = set(graph_e[i]) & set(dec_leaf); dec_nodes.update(intersect_l)
return list(dec_nodes)
def update_graphs(graph, graph_v, graph_e, path):
all_path = set(graph_e.keys()); path_comp = all_path - path
path_v = [j for i in path for j in graph_e[i]]
path_comp_v = [j for i in path_comp for j in graph_e[i]]
rgraph = {i:graph.pop(i) for i in path_v}
rgraph_v = {i:graph_v.pop(i) for i in path_v}
rgraph_e = {i:graph_e.pop(i) for i in path}
graph = {i:list(set(graph[i]) - set(path_v)) for i in graph}
rgraph = {i:list(set(rgraph[i]) - set(path_comp_v)) for i in rgraph}
return graph, rgraph, rgraph_e, rgraph_v
def path_to_leaf(graph, st_node, counter_st_node, graph_v, graph_e, skel, nodes_coord):
path = []
dec_leaf = []
leaf = [i for i in graph if len(graph[i])==1]
visited = set([counter_st_node])
while st_node not in leaf:
st_node_old = st_node
st_node, p, visited = detect_next_node(st_node, graph, visited, graph_v, graph_e, skel, nodes_coord)
if st_node == st_node_old:
leaf.append(st_node)
ngbh = list(set(graph[st_node]) - set(graph_e[graph_v[st_node]]))
dec_leaf.append(st_node)
for i in ngbh:
dec_leaf.append(i)
else:
path.append(p)
return path, dec_leaf
def detect_next_node(st_node, graph, visited, graph_v, graph_e, skel, nodes_coord):
orientation_diff = []
comparing_branches = []
for neighbour in graph[st_node]:
if neighbour not in visited:
visited.add(neighbour)
branch_ref, branch_comp = graph_v[st_node], graph_v[neighbour]
skel_ref = skel[branch_ref]
ord_skelRef = order_branch(skel_ref, nodes_coord[st_node], 'ascend')
vs_ref = tangent_vector_sum(ord_skelRef)
skel_comp = skel[branch_comp]
ord_skelComp = order_branch(skel_comp, nodes_coord[neighbour], 'ascend')
vs_comp = tangent_vector_sum(ord_skelComp)
angle = np.arccos(np.clip(np.dot(vs_ref, vs_comp), -1.0, 1.0))
angle = angle*(180/np.pi)
if angle > 90:
orientation_diff.append(angle)
comparing_branches.append(branch_comp)
if len(comparing_branches) != 0:
next_branch = comparing_branches[np.argmax(np.array(orientation_diff))]
next_st_node = list(set(graph_e[next_branch]) - visited)
if len(next_st_node) != 0:
next_st_node = next_st_node[0]
else:
next_st_node = st_node
else:
next_branch = []
next_st_node = st_node
return next_st_node, next_branch, visited
def detect_junction_coordinates(end_points_coord, junctions_as_endpoint):
junction_coordinates = []
for junction in junctions_as_endpoint:
if len(junction)>2:
junction_coordinate = np.array([end_points_coord[i] for i in list(junction)])
junction_coordinates.append(junction_coordinate)
return junction_coordinates
def branch_endpoints_inx(branch_inx):
ep_inx = set([branch_inx*2, branch_inx*2 + 1])
return ep_inx
def skeleton_info(skel):
end_point=[]
branch_length=[]
for branch in skel:
end_point.append(branch[0])
end_point.append(branch[-1])
branch_length.append(get_line_length(branch))
return end_point, branch_length
def end_points_cross_distance(end_points):
l=len(end_points)
dist=np.empty((l,0))
for ep in end_points:
euclidean_dist=np.expand_dims(np.sum((end_points-ep)**2,axis=1)**0.5,axis=1)
dist=np.append(dist,euclidean_dist,axis=1)
return dist
def form_graph(end_points):
ep_cross_dist = end_points_cross_distance(end_points)
sz = ep_cross_dist.shape
ep_cross_dist[np.diag_indices(sz[0])] = np.inf
for i in range(1, sz[0], 2):
ep_cross_dist[i,i-1] = np.inf
ep_cross_dist[i-1,i] = np.inf
graph = detect_fully_connected_graph(ep_cross_dist)
return graph
def detect_fully_connected_graph(ep_cross_dist):
num_eps = ep_cross_dist.shape[0]
junction_eps = np.empty((0,2), dtype=np.int32)
visited = set()
while len(visited) != num_eps:
conn_dist = np.min(ep_cross_dist)
if conn_dist == np.inf:
visited, graph = fully_connected_tree(junction_eps, num_eps)
else:
min_ind = np.where(ep_cross_dist==conn_dist)
ep_cross_dist[min_ind] = np.inf
junction_ep = np.array(min_ind).T
junction_eps = np.append(junction_eps, junction_ep, axis=0)
visited, graph = fully_connected_tree(junction_eps, num_eps)
return graph
def fully_connected_tree(junction_eps, num_eps):
graph = {i: [i+1] for i in range(0, num_eps, 2)}
graph_odd = {i: [i-1] for i in range(1, num_eps, 2)}
graph.update(graph_odd)
for i in junction_eps:
graph[i[0]].append(i[1])
visited = breadth_first_search(graph, root = 0)
return visited, graph
def breadth_first_search(graph, root):
visited, queue = set(), collections.deque([root])
while queue:
vertex = queue.popleft()
for neighbour in graph[vertex]:
if neighbour not in visited:
visited.add(neighbour)
queue.append(neighbour)
return visited
def rearrange_graph(graph, graph_v, graph_e):
for i in graph:
ngb = set(graph[i])
if len(ngb)>1:
l_ngb = -1
while len(ngb) > l_ngb:
l_ngb = len(ngb)
nngb = ngb - set(graph_e[graph_v[i]])
for n in nngb:
ngb |= set(graph[n]) - set(graph_e[graph_v[n]]) - set([i])
graph[i] = ngb
return graph
def order_branch(branch, junction, order):
e1 = np.sum(branch[0]-junction)**2
e2 = np.sum(branch[-1]-junction)**2
if order=='descend':
if e1<e2:
branch = np.flip(branch, axis=0)
elif order=='ascend':
if e1>e2:
branch = np.flip(branch, axis=0)
return branch
def unique(mylist):
unique_list = []
for x in mylist:
if x not in unique_list:
unique_list.append(x)
return unique_list
def tangent_vector_sum(branch):
d=np.gradient(branch,axis=0)
ds=np.sum((d**2),axis=1)**0.5
ds=np.repeat(np.expand_dims(ds,axis=1),3,axis=1)
ds[ds==0] = 1e-5
nTangVec=d/ds
vec_sum = np.sum(nTangVec,axis=0)
vec_sum = vec_sum / np.linalg.norm(vec_sum)
return vec_sum
| Python |
3D | aAbdz/CylShapeDecomposition | CSD/shape_decomposition.py | .py | 19,224 | 573 | # -*- coding: utf-8 -*-
import numpy as np
import plane_rotation as pr
from scipy.interpolate import RegularGridInterpolator as rgi
from unit_tangent_vector import unit_tangent_vector
from hausdorff_distance import hausdorff_distance
from skimage.measure import label, regionprops
from skeleton_decomposition import skeleton_main_branch
import pylab as plt
from polar_parametrization import polar_parametrization
from polar_interpolation import polar_interpolation
import skfmm
from scipy.interpolate import interp1d
from matplotlib import path
from coord_conv import cart2pol
def tangent_planes_to_zone_of_interest(cropAx, parametrized_skel,
s_inx, e_inx, g_radius, g_res, shift_impose, direction, H_th):
p_inx, p_bound, p_shiftX, p_shiftY = s_inx, [], 0 , 0
sz = cropAx.shape
x, y = np.mgrid[-g_radius:g_radius:g_res, -g_radius:g_radius:g_res]
z = np.zeros_like(x)
c_mesh = (2*g_radius)/(2*g_res)
xyz = np.array([np.ravel(x), np.ravel(y), np.ravel(z)]).T
tangent_vecs = unit_tangent_vector(parametrized_skel)
interpolating_func = rgi((range(sz[0]),range(sz[1]),range(sz[2])),
cropAx,bounds_error=False,fill_value=0)
cent_ball = (x**2+y**2)<g_res*1
count = 1
while s_inx != e_inx:
#print s_inx
point = parametrized_skel[s_inx]
utv = tangent_vecs[s_inx]
if np.array_equal(utv, np.array([0, 0, 0])):
s_inx = s_inx+direction
continue
rot_axis = pr.unit_normal_vector(utv, np.array([0,0,1]))
theta = pr.angle(utv, np.array([0,0,1]))
rot_mat = pr.rotation_matrix_3D(rot_axis, theta)
rotated_plane = np.squeeze(pr.rotate_vector(xyz, rot_mat))
cross_section_plane = rotated_plane+point
cross_section = interpolating_func(cross_section_plane)
bw_cross_section = cross_section>=0.5
bw_cross_section = np.reshape(bw_cross_section, x.shape)
label_cross_section, nn = label(bw_cross_section, connectivity=1, return_num=True)
main_lbl = np.unique(label_cross_section[cent_ball])
main_lbl = main_lbl[np.nonzero(main_lbl)]
if len(main_lbl)!=1:
s_inx = s_inx+direction
continue
bw_cross_section = label_cross_section==main_lbl
nz_X = np.count_nonzero(np.sum(bw_cross_section, axis=0))
nz_Y = np.count_nonzero(np.sum(bw_cross_section, axis=1))
if (nz_X<4) | (nz_Y<4):
s_inx = s_inx+direction
continue
if shift_impose:
props = regionprops(bw_cross_section.astype(np.int64))
y0, x0 = props[0].centroid
shiftX = np.round(c_mesh-x0).astype(np.int64)
shiftY = np.round(c_mesh-y0).astype(np.int64)
p = max(abs(shiftX), abs(shiftY))
if p != 0:
bw_cross_section = np.pad(bw_cross_section, p, mode='constant')
bw_cross_section = np.roll(bw_cross_section,shiftY,axis=0)
bw_cross_section = np.roll(bw_cross_section,shiftX,axis=1)
bw_cross_section = bw_cross_section[p:-p, p:-p]
label_cross_section, nn = label(bw_cross_section,
connectivity=1, return_num=True)
if nn != 1:
main_lbl = np.unique(label_cross_section[cent_ball])
main_lbl = main_lbl[np.nonzero(main_lbl)]
if len(main_lbl)!=1:
s_inx = s_inx+direction
continue
bw_cross_section = label_cross_section==main_lbl
bound = boundary_parametrization(bw_cross_section)
if test_boundary_parametrization(bound, c_mesh) == False:
s_inx = s_inx+direction
continue
#fig, ax=plt.subplots()
#ax.plot(bound[:,1], bound[:,0], '-', linewidth=2, color='black')
if count==1:
m_curve = mean_curve(bound, bound, 2, c_mesh, 0)
max_radius = np.max(np.sum((m_curve-np.array(x.shape)/2)**2, axis=1)**0.5)
p_inx = s_inx
p_bound = bound
p_shiftX = shiftX
p_shiftY = shiftY
count = count+1
s_inx = s_inx+direction
else:
H_dist = hausdorff_distance(bound, m_curve, len(m_curve))
d_ratio = np.true_divide(H_dist, (H_dist+max_radius))
#print d_ratio
if d_ratio<H_th:
m_curve = mean_curve(bound, m_curve, count, c_mesh, 0)
max_radius = g_res*np.max(np.sum((m_curve-np.array(x.shape)/2)**2, axis=1)**0.5)
p_inx = s_inx
p_bound = bound
p_shiftX = shiftX
p_shiftY = shiftY
count = count+1
s_inx = s_inx+direction
else:
break
return p_inx, p_bound, p_shiftX, p_shiftY
def test_boundary_parametrization(bound, c_mesh):
flag = True
p_bound = polar_parametrization(bound, c_mesh)
r,phi = cart2pol(p_bound[:,1]-c_mesh, p_bound[:,0]-c_mesh)
s_phi = phi; s_phi[1:] = phi[1:] + 0.0001
sign_change = np.where((s_phi[1:]*s_phi[:-1])<0)[0]
if len(sign_change) == 0:
flag = False
return flag
def find_junction_in_skeleton(parametrized_skel, junction_coordinate):
flag = False
main_junction_coordinates = []
for jc in junction_coordinate:
if jc in parametrized_skel:
flag = True
main_junction_coordinates.append(jc)
return flag, main_junction_coordinates
def crop_image(bw, point, rect):
sR = max(point[0]-rect[0], 0)
eR = min(point[0]+rect[0], bw.shape[0])
sC = max(point[1]-rect[1], 0)
eC = min(point[1]+rect[1], bw.shape[1])
sH = max(point[2]-rect[2], 0)
eH = min(point[2]+rect[2], bw.shape[2])
bbw = bw[sR:eR,sC:eC,sH:eH]
new_point = point - np.array([sR,sC,sH])
return bbw, new_point
def zone_of_interest(cropAx, parametrized_skel, junction_coordinate):
mean_junction_coordinate = np.mean(junction_coordinate, axis=0)
dist2junc = np.sqrt(np.sum((parametrized_skel-mean_junction_coordinate)**2, axis=1))
min_dist_inx = np.argmin(dist2junc)
min_dist = np.min(dist2junc)
l_skel = len(parametrized_skel)-1
try:
sd = max(50, min_dist); rect = np.array([sd,sd,sd])
while True:
maximal_ball_radius_lb = maximal_inner_sphere(cropAx, parametrized_skel, junction_coordinate, rect)
if maximal_ball_radius_lb < sd:
break
else:
rect = rect+10
except:
maximal_ball_radius_lb = 2
maximal_ball_radius_lb = max(maximal_ball_radius_lb, min_dist, 4)
ss_inx = np.array([0, l_skel])
ee_inx = ss_inx
ub = 20; lb = 2
coeff_ub = np.linspace(ub, lb, 40)
maximal_ball_radius_ub = lb*maximal_ball_radius_lb
e_inx = dist2junc > maximal_ball_radius_ub
e_inx = np.where(np.logical_xor(e_inx[1:], e_inx[:-1]))[0]
if len(e_inx) == 2:
if e_inx[0] <= min_dist_inx <= e_inx[1]:
ee_inx = e_inx
elif len(e_inx) == 1:
if e_inx > min_dist_inx:
ee_inx[1] = e_inx
else:
ee_inx[0] = e_inx
for coeff in coeff_ub:
maximal_ball_radius_ub = coeff*maximal_ball_radius_lb
s_inx = dist2junc > maximal_ball_radius_ub
s_inx = np.where(np.logical_xor(s_inx[1:], s_inx[:-1]))[0]
if len(s_inx) == 2:
if s_inx[0] <= min_dist_inx <= s_inx[1]:
ss_inx = s_inx
break
if len(s_inx) == 1:
if s_inx > min_dist_inx:
ss_inx[1] = s_inx
else:
ss_inx[0] = s_inx
if ~(ss_inx[0] <= ee_inx[0] <= min_dist_inx):
ss_inx[0] = 0
ee_inx[0] = 0
if ~(min_dist_inx <= ee_inx[1] <= ss_inx[1]):
ss_inx[1] = l_skel
ee_inx[1] = l_skel
if ~(ss_inx[0] <= ee_inx[0] <= min_dist_inx <= ee_inx[1] <= ss_inx[1]):
ss_inx = [0, l_skel]
ee_inx = ss_inx
return ss_inx, ee_inx
def obj_ends_conditions(dist2junc):
if dist2junc[0] < dist2junc[-1]:
s_dist = min(max(dist2junc), 20)
s_inx = [0]
s_inx.append(np.argmin(np.abs(dist2junc - s_dist)))
s_dist = min(max(dist2junc), 5)
e_inx = [0]
e_inx.append(np.argmin(np.abs(dist2junc - s_dist)))
else:
s_dist = min(max(dist2junc), 20)
s_inx = [np.argmin(np.abs(dist2junc - s_dist))]
s_inx.append(len(dist2junc)-1)
s_dist = min(max(dist2junc), 5)
e_inx = [np.argmin(np.abs(dist2junc - s_dist))]
e_inx.append(len(dist2junc)-1)
return s_inx, e_inx
def maximal_inner_sphere(cropAx, parametrized_skel, junction_coordinate, rect):
mean_junction_coordinate = np.mean(junction_coordinate, axis=0)
f_jc = tuple(np.floor(mean_junction_coordinate).astype(np.int64))
if cropAx[f_jc] != 1:
dist2junc = np.sqrt(np.sum((parametrized_skel-mean_junction_coordinate)**2, axis=1))
min_dist_inx = np.argmin(dist2junc)
l = min(min_dist_inx, len(dist2junc)-min_dist_inx)
for i in range(l):
f_jc = parametrized_skel[min_dist_inx+i]
f_jc = tuple(np.floor(f_jc).astype(np.int64))
if cropAx[f_jc] == 1:
break
else:
f_jc = parametrized_skel[min_dist_inx-i]
f_jc = tuple(np.floor(f_jc).astype(np.int64))
if cropAx[f_jc] == 1:
break
crop_obj, njc = crop_image(cropAx, f_jc, rect)
D = skfmm.distance(crop_obj)
boundary = ((D!=0)==(D<=1))
im_one = np.ones_like(crop_obj)
im_one[tuple(njc)] = 0
D = skfmm.travel_time(im_one, crop_obj)
dist_on_boundary = D[boundary]
maximal_ball_radius_lb = np.min(dist_on_boundary)
return maximal_ball_radius_lb
def corresponding_skel(im,final_skeleton,main_branches):
c_skel=[]
for i in main_branches:
main_branch=np.floor(final_skeleton[i]).astype(np.int64)
count=0
for coord in main_branch:
if im[tuple(coord)]==1:
count += 1
if np.true_divide(count,len(main_branch))>0.8:
c_skel.append(final_skeleton[i])
return c_skel
def detect_main_obj(obj, corrected_skeleton):
count = 0
flag = False
f_skel = np.floor(corrected_skeleton).astype(np.int64)
f_skel = np.unique(f_skel, axis=0)
for point in f_skel:
if obj[tuple(point)]:
count = count+1
if np.true_divide(count, len(f_skel)) > 0.6:
flag = True
return flag
def boundary_parametrization(bw):
sz=np.array(bw.shape)+2
p_bw=np.zeros(sz)
p_bw[1:-1,1:-1]=bw
f_0=np.array(np.unravel_index(np.argmax(p_bw),sz))
Cor=f_0
nCor=f_0+1
bound=[f_0]
x=[0,-1, 0,1]
y=[1, 0,-1,0]
move=np.array((x,y)).T
direc=2
while np.any(nCor!=f_0):
Temp_dir=np.mod(direc+3,4)
for i in range(4):
nCor=Cor+move[Temp_dir]
if p_bw[tuple(nCor)]:
direc=Temp_dir
Cor=nCor
bound.append(nCor)
break
Temp_dir=Temp_dir+1
if Temp_dir==4:
Temp_dir=0
bound=np.array(bound)-1
return bound
def mean_curve(curve1, curve2, num_samp, c_mesh, vis):
curve1 = polar_parametrization(curve1, c_mesh)
curve1 = polar_interpolation(curve1, c_mesh)
if num_samp==2:
curve2=polar_parametrization(curve2, c_mesh)
curve2=polar_interpolation(curve2, c_mesh)
m_curve=np.true_divide(np.sum((curve1,(num_samp-1)*curve2),axis=0),num_samp)
if vis:
fig, ax=plt.subplots()
ax.plot(curve1[:,1], curve1[:,0], '-', linewidth=2, color='black')
ax.plot(curve2[:,1], curve2[:,0], '-', linewidth=2, color='red')
ax.plot(m_curve[:,1], m_curve[:,0], '-', linewidth=2, color='blue')
ax.set_xlim([0,120])
ax.set_ylim([120,0])
return m_curve
def interpolated_super_tube(curve1, curve2, num_steps):
polygons = []
curve1_coeff = np.linspace(1,0,num_steps)
curve2_coeff = 1 - curve1_coeff
for i in range(len(curve1_coeff)):
polygon = np.sum((curve1_coeff[i]*curve1, curve2_coeff[i]*curve2), axis=0)
#fig, ax=plt.subplots()
#ax.plot(polygon[:,1], polygon[:,0], '-', linewidth=2, color='black')
#ax.set_xlim([0,120])
#ax.set_ylim([120,0])
polygons.append(polygon)
return polygons
def curve_interp(curve,c_sampling):
sz=curve.shape
interpolated_curve=np.empty((c_sampling,0))
x=range(sz[0])
xnew=np.linspace(0,len(curve)-1,c_sampling)
for i in range(sz[1]):
y=curve[:,i]
f=interp1d(x,y)
interp_f=np.expand_dims(f(xnew),axis=1)
interpolated_curve=np.append(interpolated_curve,interp_f,axis=1)
return interpolated_curve
def object_decomposition(obj, interpolated_skel, filled_cs, g_radius=15, g_res=0.25):
sz = obj.shape
x, y = np.mgrid[-g_radius:g_radius:g_res, -g_radius:g_radius:g_res]
z = np.zeros_like(x)
xyz = np.array([np.ravel(x), np.ravel(y), np.ravel(z)]).T
tangent_vecs = unit_tangent_vector(interpolated_skel)
for i in range(len(interpolated_skel)):
point = interpolated_skel[i]
utv = tangent_vecs[i]
if np.array_equal(utv, [0, 0, 0]):
continue
rot_axis = pr.unit_normal_vector(utv, np.array([0,0,1]))
theta = pr.angle(utv, np.array([0,0,1]))
rot_mat = pr.rotation_matrix_3D(rot_axis, theta)
rotated_plane = pr.rotate_vector(xyz, rot_mat)
cross_section_plane = rotated_plane+point
cs = np.ravel(filled_cs[i])
discrete_coordinates = np.round(cross_section_plane).astype(np.int64)
for ii in range(len(discrete_coordinates)):
inx = discrete_coordinates[ii]
if np.all(inx>=0) and np.all(inx<=np.array(sz)-1):
if cs[ii]==0:
obj[tuple(inx)] = 0
else:
obj[tuple(inx)] = 1
return obj
def filling_cross_sections(st_cross_sections, g_radius, g_res):
ep = np.array(2*g_radius/g_res, dtype=np.int64)
x, y = np.mgrid[0:ep, 0:ep]
filled_cs = []
for cs in st_cross_sections:
p = path.Path(cs)
f_cs = p.contains_points(np.hstack((np.ravel(x)[:,np.newaxis], np.ravel(y)[:,np.newaxis])))
f_cs = np.reshape(f_cs, (ep,ep))
filled_cs.append(f_cs)
return filled_cs
def junction_correction(cropAx, parametrized_skel, main_junction_coordinates,
g_radius, g_res, H_th, shift_impose, Euler_step_size):
s_inxs, e_inxs = zone_of_interest(cropAx, parametrized_skel, main_junction_coordinates)
s_inx = s_inxs[0]
e_inx = e_inxs[0]
p_inx1, bound1, shiftX1, shiftY1 = tangent_planes_to_zone_of_interest(cropAx, parametrized_skel,
s_inx, e_inx, g_radius, g_res, shift_impose, +1, H_th)
point1 = parametrized_skel[p_inx1]
s_inx = s_inxs[1]
e_inx = e_inxs[1]
p_inx2, bound2, shiftX2, shiftY2 = tangent_planes_to_zone_of_interest(cropAx, parametrized_skel,
s_inx, e_inx, g_radius, g_res, shift_impose, -1, H_th)
point2 = parametrized_skel[p_inx2]
c_mesh = (2*g_radius)/(2*g_res)
if len(bound1) == 0:
curve1 = c_mesh * np.ones((c_mesh,2), dtype=np.int64)
else:
curve1 = polar_parametrization(bound1, c_mesh)
curve1 = polar_interpolation(curve1, c_mesh)
if len(bound2) == 0:
curve2 = c_mesh * np.ones((c_mesh,2), dtype=np.int64)
else:
curve2 = polar_parametrization(bound2, c_mesh)
curve2 = polar_interpolation(curve2, c_mesh)
if shift_impose:
curve1 = curve1 - np.array([shiftY1, shiftX1])
curve2 = curve2 - np.array([shiftY2, shiftX2])
num_steps = np.floor(np.sqrt(np.sum((point2-point1)**2)) / Euler_step_size).astype(np.int64)
st_cross_sections = interpolated_super_tube(curve1, curve2, num_steps)
interpolated_skel = interpolated_super_tube(point1, point2, num_steps)
corrected_skeleton = parametrized_skel[:p_inx1]
corrected_skeleton = np.append(corrected_skeleton, interpolated_skel, axis=0)
corrected_skeleton = np.append(corrected_skeleton, parametrized_skel[p_inx2+1:], axis=0)
return st_cross_sections, interpolated_skel, corrected_skeleton
def object_analysis(obj, skel):
decomposed_objs = []
decomposed_skeletons = []
sub_skeletons = skeleton_main_branch(skel)
for s in sub_skeletons:
junction_coordinates = s['dec_nodes']
sub_skeleton = s['skeleton']
rec_obj = obj.copy()
for junction_coordinate in junction_coordinates:
st_cross_sections, interpolated_skel, sub_skeleton = junction_correction(rec_obj, sub_skeleton, junction_coordinate,
g_radius=15, g_res=0.25, H_th=0.7, shift_impose=1, Euler_step_size=0.5)
interpolated_skel = np.array(interpolated_skel)
filled_cs = filling_cross_sections(st_cross_sections, g_radius=15, g_res=0.25)
rec_obj = object_decomposition(rec_obj, interpolated_skel, filled_cs, g_radius=15, g_res=0.25)
labeled_obj = label(rec_obj, connectivity=1)
for region in regionprops(labeled_obj):
if region.area>=len(sub_skeleton):
dec_obj = np.zeros(labeled_obj.shape, dtype=bool)
for coordinates in region.coords:
dec_obj[coordinates[0], coordinates[1], coordinates[2]] = True
if detect_main_obj(dec_obj, sub_skeleton):
decomposed_objs.append(dec_obj)
decomposed_skeletons.append(sub_skeleton)
break
return decomposed_objs, decomposed_skeletons
| Python |
3D | aAbdz/CylShapeDecomposition | CSD/skeleton3D.py | .py | 7,991 | 304 | # -*- coding: utf-8 -*-
import numpy as np
import skfmm
import sys
def discrete_shortest_path(D,start_point):
sz = D.shape
x = [0, 1,-1, 0, 0, 1, 1,-1,-1, 0, 1,-1, 0, 0, 1, 1,-1,-1, 1,-1, 0, 0, 1, 1,-1,-1]
y = [0, 0, 0, 1,-1, 1,-1, 1,-1, 0, 0, 0, 1,-1, 1,-1, 1,-1, 0, 0, 1,-1, 1,-1, 1,-1]
z = [1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0]
path = [start_point]
min_v = np.inf
while(min_v!=0):
neighbor_inx = np.array((x,y,z)).T
ngb = start_point + neighbor_inx
valid_ngb_inx = np.where(np.all((np.all(ngb>=0,axis=1), np.all(ngb<sz,axis=1)), axis=0))
ngb = ngb[valid_ngb_inx]
ngb_value = [D[tuple(i)] for i in ngb]
min_ind = np.argmin(ngb_value)
min_v = ngb_value[min_ind]
start_point = ngb[min_ind]
path.append(start_point)
path = np.array(path)
return path
def pointmin(D):
sz = D.shape
max_D = np.max(D)
Fx = np.zeros(sz)
Fy = np.zeros(sz)
Fz = np.zeros(sz)
J = max_D * np.ones(np.array(sz)+2)
J[1:-1,1:-1,1:-1] = D
x = [0, 1,-1, 0, 0, 1, 1,-1,-1, 0, 1,-1, 0, 0, 1, 1,-1,-1, 1,-1, 0, 0, 1, 1,-1,-1]
y = [0, 0, 0, 1,-1, 1,-1, 1,-1, 0, 0, 0, 1,-1, 1,-1, 1,-1, 0, 0, 1,-1, 1,-1, 1,-1]
z = [1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0]
#x = [1,-1, 0, 0, 0, 0]
#y = [0, 0, 1,-1, 0, 0]
#z = [0, 0, 0, 0, 1,-1]
for i in range(26):
In = J[1+x[i]:1+sz[0]+x[i], 1+y[i]:1+sz[1]+y[i], 1+z[i]:1+sz[2]+z[i]]
check = In<D
D[check] = In[check]
den = (x[i]**2 + y[i]**2 + z[i]**2)**0.5
Fx[check]= x[i]/den
Fy[check]= y[i]/den
Fz[check]= z[i]/den
return Fx, Fy, Fz
def euler_shortest_path(D,source_point,start_point,step_size):
Fx, Fy, Fz = pointmin(D)
Fx = -Fx
Fy = -Fy
Fz = -Fz
itr = 0
path = start_point
while True:
end_point = Euler_path(Fx,Fy,Fz,start_point,step_size)
dist_endpoint_to_all = np.sum((source_point-end_point)**2,axis=1)**0.5
distance_to_endpoint = min(dist_endpoint_to_all)
if(itr>=10):
Movement = np.sum((end_point-path[itr-10])**2)**0.5
else:
Movement = step_size+1
if(np.all(end_point==0) or Movement<step_size):
break
itr = itr+1
path = np.append(path,end_point,axis=0)
if(distance_to_endpoint<10*step_size):
source_inx = source_point[np.argmin(dist_endpoint_to_all)]
path = np.append(path,np.array(source_inx,ndmin=2),axis=0)
break
start_point = end_point
return path
def Euler_path(Fx,Fy,Fz,start_point,step_size):
f_start_point = np.floor(start_point).astype(int)
sz = Fx.shape
x = [0, 0, 0, 0, 1, 1, 1, 1]
y = [0, 0, 1, 1, 0, 0, 1, 1]
z = [0, 1, 0, 1, 0, 1, 0, 1]
neighbor_inx = np.array((x,y,z)).T
base = f_start_point + neighbor_inx
base[base<0] = 0
xbase=base[:,0]; xbase[xbase>=sz[0]]=sz[0]-1
ybase=base[:,1]; ybase[ybase>=sz[1]]=sz[1]-1
zbase=base[:,2]; zbase[zbase>=sz[2]]=sz[2]-1
base=np.array((xbase,ybase,zbase)).T
dist2f=np.squeeze(start_point-f_start_point)
dist2c=1-dist2f
perc = np.array(( dist2c[0]*dist2c[1]*dist2c[2],
dist2c[0]*dist2c[1]*dist2f[2],
dist2c[0]*dist2f[1]*dist2c[2],
dist2c[0]*dist2f[1]*dist2f[2],
dist2f[0]*dist2c[1]*dist2c[2],
dist2f[0]*dist2c[1]*dist2f[2],
dist2f[0]*dist2f[1]*dist2c[2],
dist2f[0]*dist2f[1]*dist2f[2] ))
gradient_valueX=[Fx[tuple(i)] for i in base]*perc
gradient_valueY=[Fy[tuple(i)] for i in base]*perc
gradient_valueZ=[Fz[tuple(i)] for i in base]*perc
gradient_value=np.array((gradient_valueX,gradient_valueY,gradient_valueZ))
sum_g=np.sum(gradient_value,axis=1)
gradient=sum_g/((np.sum(sum_g**2)+0.000001)**0.5)
end_point = start_point - step_size*gradient
if (np.any(end_point<0) or end_point[0,0]>sz[0] or end_point[0,1]>sz[1] or end_point[0,2]>sz[2]):
end_point=np.zeros((1,3))
return end_point
def get_line_length(L):
dist = np.sum(np.sum((L[1:] - L[:-1])**2,axis=1)**0.5)
return dist
def organize_skeleton(skel_seg,length_th):
final_skeleton = []
n = len(skel_seg)
end_points = np.zeros((n*2,3))
l = 0
for i in range(n):
ss = skel_seg[i]
l = max(l,len(ss))
end_points[i*2] = ss[0]
end_points[i*2+1] = ss[-1]
connecting_distance = 2
for i in range(n):
ss = np.asarray(skel_seg[i])
ex = np.reshape(end_points[:,0],(-1,1)); ex = np.repeat(ex,len(ss),axis=1)
sx = np.reshape(ss[:,0],(1,-1)); sx = np.repeat(sx,len(end_points),axis=0)
ey = np.reshape(end_points[:,1],(-1,1)); ey = np.repeat(ey,len(ss),axis=1)
sy = np.reshape(ss[:,1],(1,-1)); sy = np.repeat(sy,len(end_points),axis=0)
ez = np.reshape(end_points[:,2],(-1,1)); ez = np.repeat(ez,len(ss),axis=1)
sz = np.reshape(ss[:,2],(1,-1)); sz = np.repeat(sz,len(end_points),axis=0)
D = (ex-sx)**2 + (ey-sy)**2 + (ez-sz)**2
check = np.amin(D, axis=1) < connecting_distance
check[i*2] = False
check[i*2+1] = False
cut_skel = [0,len(ss)]
if(any(check)):
for ii in range(len(check)):
if(check[ii]):
line = D[ii]
min_ind = np.ma.argmin(line)
if((min_ind>2) and (min_ind<(len(line)-2))):
cut_skel.append(min_ind)
cut_skel = sorted(cut_skel)
for j in range(len(cut_skel)-1):
skel_breaked_seg = ss[cut_skel[j]:cut_skel[j+1]]
length_skel_seg = get_line_length(skel_breaked_seg)
if(length_skel_seg >= length_th):
final_skeleton.append(skel_breaked_seg)
return final_skeleton
def skeleton(Ax):
boundary_dist=skfmm.distance(Ax)
source_point=np.unravel_index(np.argmax(boundary_dist), boundary_dist.shape)
maxD=boundary_dist[source_point]
speed_im=(boundary_dist/maxD)**1.5
Ax=np.ones(Ax.shape)
Ax[source_point]=0
flag=True
skeleton_segments=[]
source_point = np.array(source_point,ndmin=2)
while True:
D=skfmm.travel_time(Ax,speed_im)
end_point=np.unravel_index(np.ma.argmax(D), D.shape)
max_dist=D[end_point]
D=np.ma.MaskedArray.filled(D,max_dist)
end_point = np.array(end_point,ndmin=2)
shortest_line=euler_shortest_path(D,source_point,end_point,step_size=0.1)
#shortest_line = discrete_shortest_path(D,end_point)
line_length=get_line_length(shortest_line)
print(line_length)
if flag:
length_threshold=min(40*maxD, 0.18*line_length)
flag=False
if(line_length<=length_threshold):
break
source_point=np.append(source_point,shortest_line,axis=0)
skeleton_segments.append(shortest_line)
shortest_line=np.floor(shortest_line).astype(int)
for i in shortest_line:
Ax[tuple(i)]=0
if len(skeleton_segments)!=0:
final_skeleton=organize_skeleton(skeleton_segments,length_threshold)
else:
final_skeleton=[]
return final_skeleton
if __name__ == "__main__":
skeleton(sys.argv[1])
| Python |
3D | aAbdz/CylShapeDecomposition | CSD/plane_rotation.py | .py | 1,117 | 41 | # -*- coding: utf-8 -*-
import numpy as np
def rotate_vector(vector, rot_mat):
"rotating a vector by a rotation matrix"
rotated_vec = np.dot(vector,rot_mat)
return rotated_vec
def rotation_matrix_3D(vector, theta):
"""counterclockwise rotation about a unit vector by theta radians using
Euler-Rodrigues formula: https://en.wikipedia.org/wiki/Euler-Rodrigues_formula"""
a=np.cos(theta/2.0)
b,c,d=-vector*np.sin(theta/2.0)
aa,bb,cc,dd=a**2, b**2, c**2, d**2
bc,ad,ac,ab,bd,cd=b*c, a*d, a*c, a*b, b*d, c*d
rot_mat=np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
return rot_mat
def unit_normal_vector(vec1, vec2):
n = np.cross(vec1, vec2)
if np.array_equal(n, np.array([0, 0, 0])):
n = vec1
s = max(np.sqrt(np.dot(n,n)), 1e-5)
n = n/s
return n
def angle(vec1, vec2):
theta=np.arccos(np.dot(vec1,vec2) / (np.sqrt(np.dot(vec1,vec1)) * np.sqrt(np.dot(vec2, vec2))))
return theta
| Python |
3D | aAbdz/CylShapeDecomposition | CSD/hausdorff_distance.py | .py | 425 | 13 | # -*- coding: utf-8 -*-
import numpy as np
from scipy.spatial.distance import directed_hausdorff
def hausdorff_distance(curve1,curve2,n_sampling):
s1=np.floor(np.linspace(0,len(curve1)-1,n_sampling)).astype(int)
s2=np.floor(np.linspace(0,len(curve2)-1,n_sampling)).astype(int)
u=curve1[s1]
v=curve2[s2]
curve_dist=max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
return curve_dist | Python |
3D | aAbdz/CylShapeDecomposition | CSD/polar_interpolation.py | .py | 956 | 40 | # -*- coding: utf-8 -*-
import numpy as np
from coord_conv import cart2pol, pol2cart
from scipy.interpolate import interp1d
def polar_interpolation(curve, c_mesh):
r,phi = cart2pol(curve[:,1]-c_mesh,curve[:,0]-c_mesh)
s_phi = phi; s_phi[1:] = phi[1:] + 0.0001
sign_change=np.where((s_phi[1:]*s_phi[:-1])<0)[0]
phi1=phi[:sign_change[0]+1]+360
rho1=r[:sign_change[0]+1]
phi1=np.flip(phi1,axis=0)
rho1=np.flip(rho1,axis=0)
phi2=phi[sign_change[0]+1:]
rho2=r[sign_change[0]+1:]
phi2=np.flip(phi2,axis=0)
rho2=np.flip(rho2,axis=0)
nnphi = np.append(phi2,phi1)
nnr = np.append(rho2,rho1)
interp_phi = np.array(range(0,360,6))
f=interp1d(nnphi,nnr,bounds_error=False,fill_value=tuple([nnr[0],nnr[-1]]))
interp_rho=f(interp_phi)
x,y=pol2cart(interp_rho,interp_phi)
curve=np.array([y+c_mesh,x+c_mesh]).T
return curve
| Python |
3D | aAbdz/CylShapeDecomposition | CSD/polar_parametrization.py | .py | 1,161 | 40 | # -*- coding: utf-8 -*-
import numpy as np
from coord_conv import cart2pol, pol2cart
def polar_parametrization(curve, c_mesh):
r,phi = cart2pol(curve[:,1]-c_mesh,curve[:,0]-c_mesh)
s=phi<0
s_inx=np.where(s)[0]
s_inx=s_inx[np.argmin(abs(phi[s_inx]))]
nphi=np.append(phi[s_inx:],phi[:s_inx])
nr=np.append(r[s_inx:],r[:s_inx])
for i in range(3):
d_ang=np.diff(nphi)
d_ang=np.append(nphi[0],d_ang)
cw_direction=np.sign(d_ang)>=0
if sum(cw_direction)>(len(cw_direction)/2):
'error'
else:
cw_dir=np.where(cw_direction)[0]
cw_dir=cw_dir[abs(d_ang[cw_direction])<350]
nr=np.delete(nr,cw_dir)
nphi=np.delete(nphi,cw_dir)
sign_change=np.where((nphi[1:]*nphi[:-1])<0)[0]
if len(sign_change)>1:
over_st_point=np.where(nphi<nphi[0])[0]
over_st_point=over_st_point[over_st_point>sign_change[1]]
nr=np.delete(nr,over_st_point)
nphi=np.delete(nphi,over_st_point)
x,y=pol2cart(nr,nphi)
curve=np.array([y+c_mesh,x+c_mesh]).T
return curve | Python |
3D | aAbdz/CylShapeDecomposition | CSD/unit_tangent_vector.py | .py | 298 | 14 | # -*- coding: utf-8 -*-
import numpy as np
def unit_tangent_vector(curve):
d_curve = np.gradient(curve, axis=0)
ds = np.expand_dims((np.sum(d_curve**2, axis=1))**0.5, axis=1)
ds[ds==0] = 1e-5
u_tang_vec = d_curve/np.repeat(ds, curve.shape[1], axis=1)
return u_tang_vec
| Python |
3D | kkhuang1990/PlaqueDetection | lr_scheduler.py | .py | 572 | 18 | # _*_ coding: utf-8 _*_
""" define custom learning rate scheduler """
from __future__ import print_function
from torch.optim.lr_scheduler import _LRScheduler
class PolyLR(_LRScheduler):
""" poly learning rate scheduler """
def __init__(self, optimizer, max_iter=100, power=0.9, last_epoch=-1):
self.max_iter = max_iter
self.power = power
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * (1.0 - self.last_epoch/self.max_iter) ** self.power
for base_lr in self.base_lrs] | Python |
3D | kkhuang1990/PlaqueDetection | snake.py | .py | 3,786 | 95 | # _*_ coding: utf-8 _*_
""" use morphological operations and Snake to obtain single-pixel contour
from prediction results
"""
import matplotlib as mpl
mpl.use('Agg')
import torch
from torch.autograd import Variable
import warnings
warnings.filterwarnings('ignore', category=RuntimeWarning, module='scipy')
import numpy as np
from skimage.segmentation import morphological_geodesic_active_contour, inverse_gaussian_gradient
from multiprocessing import Pool
from utils import lslist2bound
from skimage import img_as_float
import cv2 as cv2
def probmap2bound_slicewise(i, prob_map_b, thres=0.7, ks=9):
""" obtain boundary from probability map for each slice
:param prob_map_b: ndarray of size [B, H, W], probability map
:param thres: float, thres for filtering out pixels with prob lower than given thres
:param outer_ks: int, kernel size for bound detection
:return: lses: list of obtained bounds
"""
n_channel, height, width = prob_map_b.shape
iter_max = 30
lses = []
for bound_inx in range(1, n_channel): # inner and outer bound
prob_map_bb = prob_map_b[bound_inx]
pred_filter = (prob_map_bb >= thres).astype(np.uint8)
kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (ks, ks))
for inx in range(iter_max):
image_close = cv2.morphologyEx(pred_filter, cv2.MORPH_CLOSE, kernel_close, iterations=inx+1)
_, contours, _ = cv2.findContours(image_close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = [contour for contour in contours
if len(contour) > 4 and cv2.contourArea(contour) / len(contour) >= 4.0]
if len(contours) > 0: # find the optimal number of iterations
break
if len(contours) > 0:
mask = np.zeros(image_close.shape[:2], np.uint8)
ls = cv2.drawContours(mask, contours, -1, 1, -1)
else: # use Snake to find contours if cv2.findContours doesn't work well
if bound_inx == 1:
gimage = inverse_gaussian_gradient(img_as_float(image_close), alpha=100, sigma=5.0)
else:
gimage = inverse_gaussian_gradient(img_as_float(image_close), alpha=100, sigma=3.0)
init = np.zeros(gimage.shape, dtype=np.int8)
init[5:-5, 5:-5] = 1
ls = morphological_geodesic_active_contour(gimage, 100, init, smoothing=1,
balloon=-1, threshold='auto')
lses.append(ls)
reg = lslist2bound(lses)
return reg
def probmap2bound(prob_map, n_workers=32, thres=0.7, kernel_size=9):
""" calculate constrained boundary from probmap
this function accepts both ndarray and tensor inputs
:param prob_map: Tensor/Ndarray of size [B, C, D, H, W], probability map as the Network output
:return: bounds_cuda: Tensor/Ndarray of size [B, D, H, W], obtained closed contour
"""
if not isinstance(prob_map, np.ndarray): # convert tensor into ndarray
# prob_map = F.softmax(prob_map, 1)
if prob_map.dim() == 5: # 3D volume
prob_map = prob_map.permute(0, 2, 1, 3, 4)
prob_map = prob_map.contiguous().view(-1, *prob_map.size()[2:]) # combine first 2 dims
prob_map_np = prob_map.data.cpu().numpy() # [B', C, H, W]
batch_size, n_channel, height, width = prob_map_np.shape
args = []
for b in range(batch_size):
args.append((b, prob_map_np[b], thres, kernel_size))
pool = Pool(processes=n_workers)
bounds = pool.starmap(probmap2bound_slicewise, args)
pool.close()
bounds = np.stack(bounds).astype(np.uint8)
bounds_cuda = Variable(torch.from_numpy(bounds).cuda()).long() # convert ndarray into tensor
return bounds_cuda | Python |
3D | kkhuang1990/PlaqueDetection | loss.py | .py | 54,878 | 1,255 | # _*_ coding: utf-8 _*_
""" define custom loss functions """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.nn import CrossEntropyLoss
from skimage import io
from utils import gray2mask, gray2innerouterbound, gray2bound
import torch
from matplotlib import pyplot as plt
from torch import nn
from torch.autograd import Function
torch.set_default_dtype(torch.float32)
# from mpl import mpl
import math
import numpy as np
from sklearn.preprocessing import label_binarize
from scipy import ndimage
torch.set_default_dtype(torch.float32)
def _assert_no_grad(variables):
for var in variables:
assert not var.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
class DiceLoss(Function):
""" Normal Dice Loss for multi-class segmentation """
def __init__(self, weight=None, ignore_index=None, weight_type=None, reduce=True, cal_zerogt=False):
self.weight = weight
self.ignore_index = ignore_index
self.weight_type = weight_type
self.reduce = reduce
self.cal_zerogt = cal_zerogt # whether calculate Dice for case of all GT pixels are zero
def __call__(self, output, target):
# output : N x C x *, Variable of float tensor (* means any dimensions)
# target : N x *, Variable of long tensor (* means any dimensions)
# weights : C, float tensor
# ignore_index : int, class index to ignore from loss (0 for background)
smooth = 1.0e-9
output = F.softmax(output, dim=1)
n_classes = output.size(1)
if output.size() != target.size():
target = target.data
encoded_target = output.data.clone().zero_() # make output size array and initialize with zeros
if self.ignore_index is not None:
mask = (target == self.ignore_index)
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
unseq = target.long()
encoded_target.scatter_(1, unseq.unsqueeze(1), 1)
encoded_target = Variable(encoded_target, requires_grad = False)
else:
encoded_target = target
# calculate gt, t and p from perspective of 1
intersection = output * encoded_target
numerator = 2 * torch.sum(intersection.view(*intersection.size()[:2], -1), 2) + smooth
denominator1 = torch.sum(output.view(*output.size()[:2], -1), 2)
denominator2 = torch.sum(encoded_target.view(*encoded_target.size()[:2], -1), 2)
denominator = denominator1 + denominator2 + smooth
mask_gt = (denominator2 == 0)
if self.weight is None:
if self.weight_type is None:
weight = Variable(torch.ones(n_classes).cuda(), requires_grad=False)
else:
tmp = denominator2.sum(0)
tmp = tmp / tmp.sum()
if self.weight_type == 'nlf':
weight = -1.0 * torch.log(tmp + smooth)
elif self.weight_type == 'mfb':
weight = torch.median(tmp) / (tmp + smooth)
weight = weight.detach()
else: # prior weight is setting manually
weight = self.weight
loss_per_channel = weight * (1.0 - (numerator / denominator))
# calculate Dice for special case of all GT pixels are zero
if self.cal_zerogt:
output_com = 1.0 - output
encoded_target_com = 1 - encoded_target
intersection_com = output_com * encoded_target_com
numerator_com = 2 * torch.sum(intersection_com.view(*intersection_com.size()[:2], -1), 2) + smooth
denominator1_com = torch.sum(output_com.view(*output_com.size()[:2], -1), 2)
denominator2_com = torch.sum(encoded_target_com.view(*encoded_target_com.size()[:2], -1), 2)
denominator_com = denominator1_com + denominator2_com + smooth
loss_per_channel_com = weight * (1.0 - (numerator_com / denominator_com))
loss_per_channel = loss_per_channel.clone()
# if all GT pixels are zero, use the complementary pixels for calculation
if self.cal_zerogt:
loss_per_channel[mask_gt] = loss_per_channel_com[mask_gt]
else:
loss_per_channel[mask_gt] = 0
if self.reduce:
if self.cal_zerogt:
dice_loss = loss_per_channel.mean()
else:
loss_ave_class = loss_per_channel.sum() / (mask_gt == 0).sum(1).float()
dice_loss = loss_ave_class.mean()
else:
dice_loss = loss_per_channel # [N, C]
return dice_loss
class MaxPoolLoss(Function):
""" loss max-pooling defined in 'Loss Max-Pooling for Semantic Image Segmentation' """
def __init__(self, criterion, ratio=0.3, p=1.3):
self.criterion = criterion
self.mpl = mpl.MaxPoolingLoss(ratio, p, reduce=True)
def __call__(self, output, target):
self.criterion.reduce = False
if isinstance(self.criterion, nn.NLLLoss):
output = F.log_softmax(output, dim=1)
loss = self.criterion(output, target)
loss = self.mpl(loss)
return loss
class WeightedKLDivLoss(Function):
""" weighted KL-divergence loss for batch with imbalanced class distribution
correctness has already been checked
"""
def __init__(self, weight=None, size_average=True):
self.weight = weight
self.size_average = size_average
def __call__(self, output, target):
""" forward propagation
:param output: Variable of output [N x C x *]
:param target: Variable of GT prob [N x C x *]
"""
smooth = 1.0e-9
output = F.log_softmax(output, 1)
kl_div = target * (torch.log(target + smooth) - output)
kl_div = kl_div.permute(0, *range(2, len(kl_div.size())), 1)
if self.weight is not None:
kl_div = self.weight * kl_div
if self.size_average:
loss = kl_div.mean()
else:
loss = kl_div.sum()
return loss
def dice_score(output, target, ignore_index=None, weight=None):
""" calculate batch-wise dice score """
smooth = 1.0e-9
target = target.data
output = F.softmax(output, dim=1)
_, pred = torch.max(output.data, 1)
encoded_target = output.data.clone().zero_() # make output size array and initialize with zeros
encoded_pred = output.data.clone().zero_()
encoded_pred.scatter_(1, pred.unsqueeze(1), 1)
if ignore_index is not None:
mask = (target == ignore_index)
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
unseq = target.long()
encoded_target.scatter_(1, unseq.unsqueeze(1), 1)
intersection = encoded_pred * encoded_target
numerator = 2 * torch.sum(intersection.view(*intersection.size()[:2], -1), 2) + smooth
denominator1 = torch.sum(encoded_pred.view(*encoded_pred.size()[:2], -1), 2)
denominator2 = torch.sum(encoded_target.view(*encoded_target.size()[:2], -1), 2)
mask_gt = (denominator2 == 0)
# print(mask_gt)
denominator = denominator1 + denominator2 + smooth
if weight is not None:
dice_per_channel = weight * (numerator / denominator)
else:
dice_per_channel = numerator / denominator
dice_per_channel = dice_per_channel.clone()
dice_per_channel[mask_gt] = 0
dice_ave_class = dice_per_channel.sum(1)/(mask_gt==0).sum(1).float()
dice_score = dice_ave_class.mean().item()
return dice_score
def dice_score_slicewise(output, target, ignore_index=None, weight=None):
""" calculate dice score slice-wisely for 3D volume """
smooth = 1.0e-9
target = target.data
output = F.softmax(output, dim=1)
_, pred = torch.max(output.data, 1)
encoded_target = output.data.clone().zero_() # make output size array and initialize with zeros
encoded_pred = output.data.clone().zero_()
encoded_pred.scatter_(1, pred.unsqueeze(1), 1)
if ignore_index is not None:
mask = (target == ignore_index)
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
unseq = target.long()
encoded_target.scatter_(1, unseq.unsqueeze(1), 1)
intersection = encoded_pred * encoded_target
numerator = 2 * intersection.sum(4).sum(3) + smooth
denominator1 = encoded_pred.sum(4).sum(3)
denominator2 = encoded_target.sum(4).sum(3) # [N, C, D]
mask_gt = (denominator2 == 0)
# print(mask_gt)
denominator = denominator1 + denominator2 + smooth
if weight is not None:
dice_per_channel = weight * (numerator / denominator)
else:
dice_per_channel = numerator / denominator
dice_per_channel = dice_per_channel.clone()
dice_per_channel[mask_gt] = 0
dice_ave_class = dice_per_channel.sum(1)/(mask_gt==0).sum(1).float() # [N, D]
dice_score = dice_ave_class.mean().item()
return dice_score
class GeneralizedDiceLoss(Function):
""" generalized dice score for multi-class segmentation defined in
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017
weight for each class is calculated from the distribution of Ground
Truth pixel/voxel belonging to each class
"""
def __init__(self, weight=None, ignore_index=None, weight_type='inv_square',
alpha=0.5):
"""
:param weight_type: str, in which way to calculate class weight
here we consider 3 strategies for deciding proper class weight
(1) inv_square class weight is set as inverse of summing up square of
ground truth for each class
(2) others_one_pred: class weight is set as others_over_one ratio of predicted
probabilities for each class
(3) others_one_gt: class weight is set as others_over_one ratio of ground truth
for each class
:param alpha: float, ratio of false positive
:param beta: float, ratio of false negative
increase alpha if you care more about false positive and beta otherwise
"""
self.ignore_index = ignore_index
self.weight = weight
self.weight_type = weight_type
self.alpha= alpha
self.beta = 1- self.alpha
def __call__(self, output, target):
# output : N x C x *, Variable of float tensor (* means any dimensions)
# target : N x *, Variable of long tensor (* means any dimensions)
# weights : C, float tensor
# ignore_index : int, class index to ignore from loss (0 for background)
# back propagation is checked to be correct
smooth = 1.0e-9
output = F.softmax(output, 1)
n_pixels = output[:, 0].numel()
n_classes = output.size(1)
if output.size() != target.size():
# for normal input
target = target.data
encoded_target = output.data.clone().zero_() # make output size array and initialize with zeros
if self.ignore_index is not None:
mask = (target == self.ignore_index)
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
unseq = target.long()
encoded_target.scatter_(1, unseq.unsqueeze(1), 1)
encoded_target = Variable(encoded_target, requires_grad=False)
else:
# for BC learning input
encoded_target = target
tp = output * encoded_target
fp = output * (1-encoded_target)
fn = (1.0 - output) * encoded_target
# add along all dimensions except the first (n_batch) and the second (n_class) dim
gt_sum = torch.sum(encoded_target.view(*encoded_target.size()[:2], -1), 2).sum(0)
mask_gt = (gt_sum == 0)
tp_sum = torch.sum(tp.view(*tp.size()[:2], -1), 2).sum(0)
fp_sum = torch.sum(fp.view(*fp.size()[:2], -1), 2).sum(0)
fn_sum = torch.sum(fn.view(*fn.size()[:2], -1), 2).sum(0)
numerator = tp_sum
denominator = tp_sum + self.alpha * fp_sum + self.beta * fn_sum
if self.weight is None:
if self.weight_type is None:
weight = Variable(torch.ones(n_classes).cuda(), requires_grad=False)
else:
if self.weight_type == 'inv_square':
weight = 1.0 / (gt_sum.pow(2) + smooth)
weight[gt_sum==0] = 0.0
elif self.weight_type == 'others_one_pred':
prob_sum_per_class = torch.sum(output.view(*output.size()[:2], -1), 2).sum(0)
weight = (n_pixels - prob_sum_per_class) / prob_sum_per_class
elif self.weight_type == 'others_one_gt':
weight = (n_pixels - gt_sum) / (gt_sum + smooth)
weight[gt_sum==0] = 0.0
weight = weight.detach()
else:
weight = self.weight
loss = 1.0 - (weight * numerator).sum() / (weight * denominator).sum()
return loss
class WeightedCrossEntropy(CrossEntropyLoss):
""" weighted cross entropy for multi-class semantic segmentation defined in
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100, reduce=True,
weight_type='log_inv_freq'):
super(WeightedCrossEntropy, self).__init__(weight, size_average, ignore_index, reduce)
self.weight_type = weight_type
def forward(self, output, target):
""" weighted cross entropy where weight is calculated from input data
:param output: Variable, N x C x *, probabilities for each class
:param target: Variable, N x *, GT labels
"""
if self.weight is None:
output_prob = F.softmax(output, 1)
prob_per_class = torch.sum(output_prob.view(*output_prob.size()[:2], -1), 2).sum(0)
prob_sum = prob_per_class.sum()
if self.weight_type == 'others_over_one':
weight = (prob_sum - prob_per_class)/prob_per_class
elif self.weight_type == 'log_inv_freq':
weight = torch.log(prob_sum/prob_per_class)
else:
weight = self.weight
return F.cross_entropy(output, target, weight, self.size_average,
self.ignore_index, self.reduce)
class FocalLoss(Function):
""" focal loss for multi-class object detection defined in
Tsung-Yi Lin et. al. Focal Loss for Dense Object Detection. CVPR 2017
"""
def __init__(self, alpha=0.25, gamma=2.0):
self.alpha = alpha
self.gamma = gamma
def __call__(self, output, target):
""" weighted cross entropy where weight is calculated from input data
:param output: Variable, N x C x *, probabilities for each class
:param target: Variable, N x *, GT labels
"""
encoded_target = output.data.clone().zero_()
encoded_target.scatter_(1, target.unsqueeze(1), 1)
encoded_target = Variable(encoded_target, requires_grad=False)
prob = output.sigmoid()
pt = prob*encoded_target + (1-prob)*(1-encoded_target) # pt = p if t > 0 else 1-p
# w is given for every element
weight = self.alpha*encoded_target + (1-self.alpha)*(1-encoded_target) # w = alpha if t > 0 else 1-alpha
weight = weight * (1-pt).pow(self.gamma)
# since loss decay very fast, sum is returned instead of average
return F.binary_cross_entropy_with_logits(output, encoded_target, weight, size_average=False)
def bound_weight_withdiff(target, sigmas=[5.0, 5.0], ws=[20.0, 10.0], n_classes=3, bound_output=True):
""" calculate boundary weight of each pixel given GT mask
this is a GPU version implementation
For more information, please refer to the original paper of U-Net
:param target: tensor, [N, H, W], target
:param sigmas: float, variance of Gaussian pdf of bounds
:param ws: float, amplitude of Gaussian pdf of bounds
:param n_classes: int, # of classes
:param bound_output: bool, whether bound input or not
:return: weights, tensor, [N, H, W], weights for each pixel
"""
h, w = target.size()[1:]
if n_classes == 3: # target is boundary map [1--inner bound, 2--outer bound]
if bound_output:
inner_bound = (target == 1)
outer_bound = (target == 2)
bounds = torch.stack([outer_bound, inner_bound], dim=1)
else: # target is background + central_part + outline
t_pad = F.pad(target, (1, 1, 1, 1))
mask = torch.zeros(t_pad.size(0), n_classes, *t_pad.size()[1:]).cuda()
mask.scatter_(1, t_pad.unsqueeze(1), 1)
conv_filter = torch.ones(1, 1, 3, 3).cuda()
y = torch.zeros(target.size(0), n_classes, h, w).cuda()
for i in range(n_classes):
tmp = mask[:, i].unsqueeze(1)
y[:,i] = F.conv2d(tmp, conv_filter, padding=0).squeeze(1)
y = y.long() # [N, C, H, W]
y[y == 9] = 0 # all pixels within the kernel are equal to 1
bounds = y[:, :2].long() # outer bound and inner bound
elif n_classes == 5: # treat inner bound and outer bound differently
inner_bound = (target == 3)
outer_bound = (target == 4)
bounds = torch.stack([outer_bound, inner_bound], dim=1)
pixel_cords = torch.meshgrid([torch.arange(h).cuda(), torch.arange(w).cuda()]) # 2, H, W
pixel_cords = torch.stack(pixel_cords).float()
weights = target.clone().zero_().float()
for ib, bound in enumerate(bounds):
for ic, bound_c in enumerate(bound): # bound in different channel [outer, inner]
b_cords = torch.nonzero(bound_c).float() # N, 2
if len(b_cords) > 0: # in case of no inner bound
tmp = pixel_cords.repeat(len(b_cords), 1, 1, 1).permute(2, 3, 0, 1).float()
# print(tmp.size())
tmp = (tmp - b_cords).norm(dim=-1)
tmp = torch.min(tmp, dim=-1)[0] # shortest distance between bound and each cord
weights[ib] += ws[ic] * torch.exp(-0.5 * tmp ** 2 / sigmas[ic] ** 2) # [H, W]
return weights
def bound_weight_wodiff(target, sigma=5.0, w0=10.0, n_classes=2, k=2):
""" calculate boundary weight of each pixel given GT mask
this is a GPU version implementation
inner bounds and outer bounds are treated as the same
For more information, please refer to the original paper of U-Net
:param target: tensor, [N, H, W], target
:param sigma: float, variance of Gaussian pdf
:param w0: float, aptitude of Gaussian pdf
:param n_classes: int, # of classes
:param k: int, top k shortest distances to calculate
:return: weights, tensor, [N, H, W], weights for each pixel
"""
h, w = target.size()[1:]
if n_classes == 2: # boundary detection
bounds = target
elif n_classes == 3:
t_pad = F.pad(target, (1, 1, 1, 1))
mask = torch.zeros(t_pad.size(0), n_classes, *t_pad.size()[1:]).cuda()
mask.scatter_(1, t_pad.unsqueeze(1), 1)
conv_filter = torch.ones(1, 1, 3, 3).cuda()
y = torch.zeros(target.size(0), n_classes, h, w).cuda()
for i in range(n_classes):
tmp = mask[:, i].unsqueeze(1)
y[:,i] = F.conv2d(tmp, conv_filter, padding=0).squeeze(1)
y = y.long()
y[y == 9] = 0 # all pixels within the kernel are equal to 1
bounds = y.sum(1).long()
elif n_classes >= 4:
bounds = (target == 3) | (target == 4)
pixel_cords = torch.meshgrid([torch.arange(h).cuda(), torch.arange(w).cuda()]) # 2, H, W
pixel_cords = torch.stack(pixel_cords).float()
weights = target.clone().zero_().float()
for i, b in enumerate(bounds):
b_cords = torch.nonzero(b).float() # N, 2
# print(b_cords.size())
tmp = pixel_cords.repeat(len(b_cords), 1, 1, 1).permute(2, 3, 0, 1).float()
# print(tmp.size())
tmp = (tmp - b_cords).norm(dim=-1)
tmp = torch.topk(tmp, k, largest=False, dim=-1)[0]
weights[i] = w0 * torch.exp(-0.5 * torch.sum(tmp, dim=-1) ** 2 / sigma ** 2) # [H, W]
return weights
class CrossEntropyBoundLoss(Function):
""" define a new cross entropy loss considering boundaries
pixels which are closer to the boundaries, higher weight is assigned
"""
def __init__(self, sigmas=[5.0, 5.0], ws=[20.0, 10.0], n_classes=3,
weight=None, ignore_index=None, bound_output=True, k=2):
self.sigmas = sigmas
self.ws = ws
self.n_classes = n_classes
self.weight = weight
self.ignore_index = ignore_index
self.bound_output = bound_output
self.k = k
def __call__(self, output, target):
""" cross entropy loss considering boundaries
:param output: Variable, N x C x *, probabilities for each class
:param target: Variable, N x *, GT labels
"""
# make sure to calculate the bound weight before target changes
if self.n_classes == 2:
w_bound = bound_weight_wodiff(target, self.sigmas[0], self.ws[0], self.n_classes, self.k)
elif self.n_classes == 3:
w_bound = bound_weight_withdiff(target, self.sigmas, self.ws, self.n_classes, self.bound_output) # [N, H, W]
weight = w_bound.repeat(self.n_classes, 1, 1, 1).permute(1, 0, 2, 3) # [N, C, H, W]
encoded_target = output.data.clone().zero_()
if self.ignore_index is not None:
mask = (target == self.ignore_index)
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
unseq = target.long()
encoded_target.scatter_(1, unseq.unsqueeze(1), 1)
if self.weight is not None:
w_prior = encoded_target.permute(0, 2, 3, 1) * self.weight # [N, H, W, C]
weight = weight + w_prior.permute(0, 3, 1, 2)
weight = weight.detach()
encoded_target = Variable(encoded_target, requires_grad=False)
return F.binary_cross_entropy_with_logits(output, encoded_target, weight, reduction="elementwise_mean")
############### weighted Hausdorff distance loss ###############
# probability of each class is included so that the loss can be back-propagated
def cdist(x, y):
''' :param x: Tensor of size Nxd
:param y: Tensor of size Mxd
:return dist: NxM matrix where dist[i,j] is the norm between x[i,:] and y[j,:]
i.e. dist[i,j] = ||x[i,:]-y[j,:]||
'''
differences = x.unsqueeze(1) - y.unsqueeze(0)
distances = torch.sum(differences**2, -1).sqrt()
return distances
class WeightedHausdorffDistanceLoss(Function):
""" weighted HausdorffDistanceLoss defined in
"Weighted Hausdorff Distance: A Loss Function For Object Localization". Javier Ribera.
for more information, please refer to the original paper.
"""
def __init__(self, return_2_terms=False, alpha=4, beta=2):
"""
:param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
:param resized_height: int, height after resize
:param resized_width: int, width after resize
:param alpha: int, decay factor
"""
self.return_2_terms = return_2_terms
self.alpha = alpha
self.beta = beta
def __call__(self, prob_map, gt):
""" Compute the Weighted Hausdorff Distance function between the estimated probability map
and ground truth points. The output is the WHD averaged through all the batch.
:param prob_map: (B x H x W) or (B x D x H x W), Tensor of the probability map of the estimation.
:param gt: (B x H x W) or (B x D x H x W), Tensor of the GT annotation
"""
eps = 1e-6
alpha = self.alpha
beta = self.beta
_assert_no_grad(gt)
# assert prob_map.dim() == 3, 'The probability map must be (B x H x W)'
if prob_map.dim() == 4 and gt.dim() == 4:
prob_map = prob_map.contiguous().view(prob_map.size(0) * prob_map.size(1), *prob_map.size()[2:]) # combine first 2 dims
gt = gt.contiguous().view(gt.size(0) * gt.size(1), *gt.size()[2:]) # [B*D, H, W]
batch_size, height, width = prob_map.size()
assert batch_size == len(gt), 'prob map and GT must have the same size'
max_dist = math.sqrt(height ** 2 + width ** 2)
n_pixels = height * width
all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
terms_1 = []
terms_2 = []
for b in range(batch_size):
prob_map_b, gt_b = prob_map[b], gt[b]
gt_pts = torch.nonzero(gt_b).float() # N, 2
n_gt_pts = gt_pts.size()[0]
if n_gt_pts > 0:
d_matrix = cdist(all_img_locations, gt_pts)
p = prob_map_b.view(prob_map_b.numel())
n_est_pts = (p**beta).sum()
p_replicated = p.view(-1, 1).repeat(1, n_gt_pts)
# term 1
term_1 = (1 / (n_est_pts + eps)) * torch.sum(p**beta * torch.min(d_matrix, 1)[0])
d_div_p = torch.min((d_matrix + eps) /
(p_replicated**alpha + eps / max_dist), 0)[0]
d_div_p = torch.clamp(d_div_p, 0, max_dist)
term_2 = torch.mean(d_div_p)
terms_1.append(term_1)
terms_2.append(term_2)
terms_1 = torch.stack(terms_1)
terms_2 = torch.stack(terms_2)
if self.return_2_terms:
res = terms_1.mean(), terms_2.mean()
else:
res = terms_1.mean() + terms_2.mean()
return res
class WeightedHausdorffDistanceDoubleBoundLoss(Function):
def __init__(self, return_boundwise_loss=False, alpha=4, beta=1, ratio=0.5):
""" whd loss for inner and outer bound separately
inner bound -- 1, outer bound -- 2
:param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
:param resized_height: int, height after resize
:param resized_width: int, width after resize
:param alpha: int, decay factor
:param ratio: float, ratio of inner bound, default is 0.5
"""
self.return_boundwise_loss = return_boundwise_loss
self.alpha = alpha
self.beta = beta
self.ratio = ratio
def __call__(self, prob_map, gt):
""" Compute the Weighted Hausdorff Distance function between the estimated probability map
and ground truth points. The output is the WHD averaged through all the batch.
:param prob_map: (B x C x H x W) Tensor of estimated probability map with multiple channels
:param gt: (B x H x W) Tensor of the GT annotation
"""
eps = 1e-6
alpha = self.alpha
beta = self.beta
_assert_no_grad(gt)
# assert prob_map.dim() == 4, 'The probability map must be (B x C x H x W)'
# prob_map size [B, C, T, H, W] or [B, C, H, W] | gt size [B, T, H, W] or [B, H, W]
if prob_map.dim() == 5: # 3D volume
prob_map = prob_map.permute(0, 2, 1, 3, 4)
prob_map = prob_map.contiguous().view(-1, *prob_map.size()[2:]) # combine first 2 dims
gt = gt.contiguous().view(-1, *gt.size()[2:]) # combine first 2 dims
batch_size, n_channel, height, width = prob_map.size()
assert batch_size == len(gt), 'prob map and GT must have the same size'
max_dist = math.sqrt(height ** 2 + width ** 2)
n_pixels = height * width
all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
# here we consider inner bound and outer bound respectively
res_bounds_lst = [[] for _ in range(0, n_channel)]
for b in range(batch_size):
prob_map_b, gt_b = prob_map[b], gt[b]
for bound_inx in range(1, n_channel):
gt_bb = (gt_b == bound_inx) # for different bounds (1 - inner, 2 - outer)
gt_pts = torch.nonzero(gt_bb).float() # N, 2
n_gt_pts = gt_pts.size()[0]
prob_map_bb = prob_map_b[bound_inx]
if n_gt_pts > 0:
d_matrix = cdist(all_img_locations, gt_pts)
p = prob_map_bb.view(prob_map_bb.numel())
n_est_pts = (p**beta).sum()
p_replicated = p.view(-1, 1).repeat(1, n_gt_pts)
term_1 = (1 / (n_est_pts + eps)) * torch.sum(p**beta * torch.min(d_matrix, 1)[0])
d_div_p = torch.min((d_matrix + eps) /
(p_replicated**alpha + eps / max_dist), 0)[0]
d_div_p = torch.clamp(d_div_p, 0, max_dist)
term_2 = torch.mean(d_div_p)
# set different ratio for inner and outer bound
res_bounds_lst[bound_inx].append(term_1 + term_2)
res_bounds = [torch.stack(res_bounds_lst[i]) for i in range(1, n_channel)]
res_bounds_mean = [res_bound.mean() for res_bound in res_bounds]
res_boundwise = torch.stack(res_bounds_mean) # convert list into torch array
# ratio: inner bound ratio
res = res_boundwise[0] * self.ratio + res_boundwise[1] * (1.0 - self.ratio)
if self.return_boundwise_loss: # return inner bound loss and outer bound loss respectively
return res, res_boundwise
else:
return res
class WeightedHausdorffDistanceDoubleBoundLossWithSnake(Function):
def __init__(self, return_multi_loss=False, alpha=4, beta=1, ratio=0.5, eps=1e-6):
""" whd loss for inner and outer bound separately
inner bound -- 1, outer bound -- 2
snake constraint is applied to
:param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
:param resized_height: int, height after resize
:param resized_width: int, width after resize
:param alpha: int, decay factor
:param ratio: float, ratio of inner bound, default is 0.5
"""
self.return_multi_loss = return_multi_loss
self.alpha = alpha
self.beta = beta
self.ratio = ratio
self.eps = eps
def __call__(self, prob_map, gt, snake):
""" Compute the Weighted Hausdorff Distance function between the estimated probability map
and ground truth points. The output is the WHD averaged through all the batch.
:param prob_map: (B x C x H x W) Tensor of estimated probability map with multiple channels
:param gt: (B x H x W) Tensor of the GT annotation
:param snake: (B x H x W) Tensor of snake annotation
"""
_assert_no_grad(gt)
_assert_no_grad(snake)
# prob_map size [B, C, T, H, W] or [B, C, H, W] | gt size [B, T, H, W] or [B, H, W]
if prob_map.dim() == 5: # 3D volume
prob_map = prob_map.permute(0, 2, 1, 3, 4)
prob_map = prob_map.contiguous().view(-1, *prob_map.size()[2:]) # combine first 2 dims
gt = gt.contiguous().view(-1, *gt.size()[2:]) # combine first 2 dims
batch_size, n_channel, height, width = prob_map.size()
assert batch_size == gt.size(0), 'prob map and GT must have the same size'
assert batch_size == snake.size(0), 'prob_map and snake must have the same size'
n_pixels = height * width
all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
# here we consider GT annotation and snake respectively
res_bounds_lst = [[] for _ in range(0, n_channel-1)]
for b in range(batch_size):
prob_map_b, gt_b, snake_b = prob_map[b], gt[b], snake[b]
res_gt = self.weighted_hausdorff_distance(gt_b, prob_map_b, all_img_locations)
res_snake = self.weighted_hausdorff_distance(snake_b, prob_map_b, all_img_locations)
res_bounds_lst[0].append(res_gt)
res_bounds_lst[1].append(res_snake)
res_bounds = [torch.stack(res_bounds_lst[i]) for i in range(0, n_channel-1)]
res_bounds_mean = [res_bound.mean() for res_bound in res_bounds]
res_boundwise = torch.stack(res_bounds_mean) # convert list into torch array
# mean of pred loss and reg loss
res = res_boundwise.mean()
if self.return_multi_loss: # return inner bound loss and outer bound loss respectively
return res, res_boundwise
else:
return res
def weighted_hausdorff_distance(self, gt_b, prob_map_b, all_img_locations):
""" calculate weighted Hausdorff distance
:param gt: long tensor of size [H, W]
:param prob_map: float tensor of size [C, H, W]
"""
n_channel, height, width = prob_map_b.size()
max_dist = math.sqrt(height ** 2 + width ** 2)
res_bounds_lst = []
for bound_inx in range(1, n_channel):
gt_bb = (gt_b == bound_inx) # for different bounds (1 - inner, 2 - outer)
gt_pts = torch.nonzero(gt_bb).float() # N, 2
n_gt_pts = gt_pts.size()[0]
prob_map_bb = prob_map_b[bound_inx]
# print("# of GT points: {}".format(n_gt_pts))
if n_gt_pts > 0:
# loss1(GT bound and pred) and loss2(GT snake and pred) are calculated
d_matrix = cdist(all_img_locations, gt_pts)
p = prob_map_bb.view(prob_map_bb.numel())
n_est_pts = (p ** self.beta).sum()
p_replicated = p.view(-1, 1).repeat(1, n_gt_pts)
term_1 = (1 / (n_est_pts + self.eps)) * torch.sum(p ** self.beta * torch.min(d_matrix, 1)[0])
d_div_p = torch.min((d_matrix + self.eps) /
(p_replicated ** self.alpha + self.eps / max_dist), 0)[0]
d_div_p = torch.clamp(d_div_p, 0, max_dist)
term_2 = torch.mean(d_div_p)
res_bounds_lst.append(term_1 + term_2)
if len(res_bounds_lst) >= 2:
res = res_bounds_lst[0] * self.ratio + res_bounds_lst[1] * (1.0 - self.ratio)
else:
res = res_bounds_lst[0]
return res
class WeightedMaximumHausdorffDistanceDoubleBoundLoss(Function):
def __init__(self, return_boundwise_loss=False, alpha=4, beta=1, ratio=0.5):
""" whd loss for inner and outer bound separately
instead of averaged whd, maximum whd is used
inner bound -- 1, outer bound -- 2
:param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
:param resized_height: int, height after resize
:param resized_width: int, width after resize
:param alpha: int, decay factor
:param ratio: float, ratio of inner bound, default is 0.5
"""
self.return_boundwise_loss = return_boundwise_loss
self.alpha = alpha
self.beta = beta
self.ratio = ratio
def __call__(self, prob_map, gt):
""" Compute the Weighted Hausdorff Distance function between the estimated probability map
and ground truth points. The output is the WHD averaged through all the batch.
:param prob_map: (B x C x H x W) Tensor of estimated probability map with multiple channels
:param gt: (B x H x W) Tensor of the GT annotation
"""
eps = 1e-6
alpha = self.alpha
beta = self.beta
_assert_no_grad(gt)
# assert prob_map.dim() == 4, 'The probability map must be (B x C x H x W)'
# prob_map size [B, C, T, H, W] or [B, C, H, W] | gt size [B, T, H, W] or [B, H, W]
if prob_map.dim() == 5: # 3D volume
prob_map = prob_map.permute(0, 2, 1, 3, 4)
prob_map = prob_map.contiguous().view(-1, *prob_map.size()[2:]) # combine first 2 dims
gt = gt.contiguous().view(-1, *gt.size()[2:]) # combine first 2 dims
batch_size, n_channel, height, width = prob_map.size()
assert batch_size == len(gt), 'prob map and GT must have the same size'
max_dist = math.sqrt(height ** 2 + width ** 2)
n_pixels = height * width
all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
# here we consider inner bound and outer bound respectively
res_bounds_lst = [[] for _ in range(0, n_channel)]
for b in range(batch_size):
prob_map_b, gt_b = prob_map[b], gt[b]
for bound_inx in range(1, n_channel):
gt_bb = (gt_b == bound_inx) # for different bounds (1 - inner, 2 - outer)
gt_pts = torch.nonzero(gt_bb).float() # N, 2
n_gt_pts = gt_pts.size()[0]
prob_map_bb = prob_map_b[bound_inx]
if n_gt_pts > 0:
d_matrix = cdist(all_img_locations, gt_pts)
p = prob_map_bb.view(prob_map_bb.numel())
p_replicated = p.view(-1, 1).repeat(1, n_gt_pts)
term_1 = torch.max(p**beta * torch.min(d_matrix, 1)[0])
d_div_p = torch.min((d_matrix + eps) /
(p_replicated**alpha + eps / max_dist), 0)[0]
d_div_p = torch.clamp(d_div_p, 0, max_dist)
term_2 = torch.max(d_div_p)
# set different ratio for inner and outer bound
res_bounds_lst[bound_inx].append(term_1 + term_2)
res_bounds = [torch.stack(res_bounds_lst[i]) for i in range(1, n_channel)]
res_bounds_mean = [res_bound.mean() for res_bound in res_bounds]
res_boundwise = torch.stack(res_bounds_mean) # convert list into torch array
# ratio: inner bound ratio
res = res_boundwise[0] * self.ratio + res_boundwise[1] * (1.0 - self.ratio)
if self.return_boundwise_loss: # return inner bound loss and outer bound loss respectively
return res, res_boundwise
else:
return res
class ModifiedWeightedHausdorffDistanceDoubleBoundLoss(Function):
def __init__(self, return_boundwise_loss=False, alpha=4, beta=1, ratio=0.5, thres=0.5):
""" whd loss for inner and outer bound separately
the loss is modified to only calculate distances between GT pixels and predicted boundary pixels with
probability higher than pre-set threshold
inner bound -- 1, outer bound -- 2
:param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
:param resized_height: int, height after resize
:param resized_width: int, width after resize
:param alpha: int, decay factor
:param ratio: float, ratio of inner bound, default is 0.5
"""
self.return_boundwise_loss = return_boundwise_loss
self.alpha = alpha
self.beta = beta
self.ratio = ratio
self.thres = thres
def __call__(self, prob_map, gt):
""" Compute the Weighted Hausdorff Distance function between the estimated probability map
and ground truth points. The output is the WHD averaged through all the batch.
:param prob_map: (B x C x H x W) Tensor of estimated probability map with multiple channels
:param gt: (B x H x W) Tensor of the GT annotation
"""
eps = 1e-6
alpha = self.alpha
beta = self.beta
_assert_no_grad(gt)
# assert prob_map.dim() == 4, 'The probability map must be (B x C x H x W)'
# prob_map size [B, C, T, H, W] or [B, C, H, W] | gt size [B, T, H, W] or [B, H, W]
if prob_map.dim() == 5: # 3D volume
prob_map = prob_map.permute(0, 2, 1, 3, 4)
prob_map = prob_map.contiguous().view(-1, *prob_map.size()[2:]) # combine first 2 dims
gt = gt.contiguous().view(-1, *gt.size()[2:]) # combine first 2 dims
batch_size, n_channel, height, width = prob_map.size()
assert batch_size == len(gt), 'prob map and GT must have the same size'
max_dist = math.sqrt(height ** 2 + width ** 2)
n_pixels = height * width
all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
# here we consider inner bound and outer bound respectively
res_bounds_lst = [[] for _ in range(0, n_channel)]
for b in range(batch_size):
prob_map_b, gt_b = prob_map[b], gt[b]
for bound_inx in range(1, n_channel):
gt_bb = (gt_b == bound_inx) # for different bounds (1 - inner, 2 - outer)
gt_pts = torch.nonzero(gt_bb).float() # N, 2
n_gt_pts = gt_pts.size()[0]
prob_map_bb = prob_map_b[bound_inx]
# filter out estimated probs those are lower than threshold
p = prob_map_bb.view(prob_map_bb.numel())
p_sel = p[p >= self.thres]
all_img_locations_sel = all_img_locations[p >= self.thres]
# print("# img locations : {}, # GT locations : {}".format(len(all_img_locations_sel), n_gt_pts))
if n_gt_pts > 0 and len(all_img_locations_sel) > 0:
d_matrix = cdist(all_img_locations_sel, gt_pts)
n_est_pts = (p_sel**beta).sum()
p_replicated = p_sel.view(-1, 1).repeat(1, n_gt_pts)
term_1 = (1 / (n_est_pts + eps)) * torch.sum(p_sel**beta * torch.min(d_matrix, 1)[0])
d_div_p = torch.min((d_matrix + eps) /
(p_replicated**alpha + eps / max_dist), 0)[0]
d_div_p = torch.clamp(d_div_p, 0, max_dist)
term_2 = torch.mean(d_div_p)
# set different ratio for inner and outer bound
res_bounds_lst[bound_inx].append(term_1 + term_2)
res_bounds = [torch.stack(res_bounds_lst[i]) for i in range(1, n_channel)]
res_bounds_mean = [res_bound.mean() for res_bound in res_bounds]
res_boundwise = torch.stack(res_bounds_mean) # convert list into torch array
# ratio: inner bound ratio
res = res_boundwise[0] * self.ratio + res_boundwise[1] * (1.0 - self.ratio)
if self.return_boundwise_loss: # return inner bound loss and outer bound loss respectively
return res, res_boundwise
else:
return res
# class ModifiedWeightedHausdorffDistanceDoubleBoundLoss(Function):
#
# def __init__(self, return_2_terms=False, alpha=4, beta=1, ratio=0.5, thres=0.5):
# """ modified whd loss for inner and outer bound separately in which
# d(x, y) in term1 is modified as exp{d(x, y)} - 1
# inner bound -- 1, outer bound -- 2
# :param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
# :param resized_height: int, height after resize
# :param resized_width: int, width after resize
# :param alpha: int, decay factor
# :param ratio: float, ratio of inner bound, default is 0.5
# """
# self.return_2_terms = return_2_terms
# self.alpha = alpha
# self.beta = beta
# self.ratio = ratio
# self.thres = thres
#
# def __call__(self, prob_map, gt):
# """ Compute the Weighted Hausdorff Distance function between the estimated probability map
# and ground truth points. The output is the WHD averaged through all the batch.
# :param prob_map: (B x C x H x W) Tensor of estimated probability map with multiple channels
# :param gt: (B x H x W) Tensor of the GT annotation
# """
# eps = 1e-6
# alpha = self.alpha
# beta = self.beta
# _assert_no_grad(gt)
#
# # assert prob_map.dim() == 4, 'The probability map must be (B x C x H x W)'
# # prob_map size [B, C, T, H, W] or [B, C, H, W] | gt size [B, T, H, W] or [B, H, W]
# if prob_map.dim() == 5: # 3D volume
# prob_map = prob_map.permute(0, 2, 1, 3, 4)
# prob_map = prob_map.contiguous().view(-1, *prob_map.size()[2:]) # combine first 2 dims
# gt = gt.contiguous().view(-1, *gt.size()[2:]) # combine first 2 dims
#
# batch_size, n_channel, height, width = prob_map.size()
# assert batch_size == len(gt), 'prob map and GT must have the same size'
#
# max_dist = math.sqrt(height ** 2 + width ** 2)
# n_pixels = height * width
# all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
# all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
#
# terms_1 = []
# terms_2 = []
# for b in range(batch_size):
# prob_map_b, gt_b = prob_map[b], gt[b]
# for bound_inx in range(1, n_channel):
# gt_bb = (gt_b == bound_inx) # for different bounds (1 - inner, 2 - outer)
# gt_pts = torch.nonzero(gt_bb).float() # N, 2
# n_gt_pts = gt_pts.size(0)
# prob_map_bb = prob_map_b[bound_inx]
#
# # filter out estimated probs those are lower than threshold
# p = prob_map_bb.view(prob_map_bb.numel())
# p_sel = p[p >= self.thres]
# all_img_locations_sel = all_img_locations[p >= self.thres]
#
# if n_gt_pts > 0 and len(all_img_locations_sel) > 0:
# d_matrix = cdist(all_img_locations_sel, gt_pts)
# n_est_pts = (p_sel ** beta).sum()
# p_replicated = p_sel.view(-1, 1).repeat(1, n_gt_pts)
#
# term_1 = (1 / (n_est_pts + eps)) * torch.sum(p_sel ** beta * torch.min(d_matrix, 1)[0])
# d_div_p = torch.min((d_matrix + eps) /
# (p_replicated ** alpha + eps / max_dist), 0)[0]
# d_div_p = torch.clamp(d_div_p, 0, max_dist)
# term_2 = torch.mean(d_div_p)
# # set different ratio for inner and outer bound
# ratio = 2 * self.ratio if bound_inx == 1 else 2 * (1.0 - self.ratio)
#
# terms_1.append(ratio * term_1)
# terms_2.append(ratio * term_2)
#
# terms_1 = torch.stack(terms_1)
# terms_2 = torch.stack(terms_2)
#
# if self.return_2_terms:
# res = terms_1.mean(), terms_2.mean()
# else:
# res = terms_1.mean() + terms_2.mean()
#
# return res
class ModifiedWeightedHausdorffDistanceLoss(Function):
def __init__(self, return_2_terms=False, alpha=4, thres=0.5):
""" modified weighted Hausdorff Distance based on pixels whose prob are higher than pre-set threshold
in experiment, this modification doesn't work well
:param return_2_terms: bool, Whether to return the 2 terms of the WHD instead of their sum. Default: False.
:param resized_height: int, height after resize
:param resized_width: int, width after resize
:param alpha: int, decay factor
"""
self.return_2_terms = return_2_terms
self.alpha = alpha
self.thres = thres
def __call__(self, prob_map, gt):
""" Compute modified Weighted Hausdorff Distance function between the estimated probability map
and ground truth points. The output is the WHD averaged through all the batch.
:param prob_map: (B x H x W) Tensor of the probability map of the estimation.
:param gt: (B x H x W) Tensor of the GT annotation
"""
eps = 1e-6
alpha = self.alpha
_assert_no_grad(gt)
assert prob_map.dim() == 3, 'The probability map must be (B x H x W)'
batch_size, height, width = prob_map.size()
assert batch_size == len(gt), 'prob map and GT must have the same size'
max_dist = math.sqrt(height ** 2 + width ** 2)
n_pixels = height * width
all_img_locations = torch.meshgrid([torch.arange(height).cuda(), torch.arange(width).cuda()])
all_img_locations = torch.stack(all_img_locations).permute(1, 2, 0).view(n_pixels, -1).float() # H*W, 2
terms_1 = []
terms_2 = []
for b in range(batch_size):
prob_map_b, gt_b = prob_map[b], gt[b]
gt_pts = torch.nonzero(gt_b).float() # N, 2
n_gt_pts = gt_pts.size()[0]
if n_gt_pts > 0:
p = prob_map_b.view(prob_map_b.numel())
d_matrix = cdist(all_img_locations, gt_pts)
mask = (p <= self.thres)
px = p.clone()
px[mask] = 0.0 # filter out pixels whose prob are lower than pre-set threshold
n_est_pts = px.sum()
p_replicated = p.view(-1, 1).repeat(1, n_gt_pts)
# term 1
term_1 = (1 / (n_est_pts + eps)) * torch.sum(px * torch.min(d_matrix, 1)[0])
d_div_p = torch.min((d_matrix + eps) /
(p_replicated**alpha + eps / max_dist), 0)[0]
d_div_p = torch.clamp(d_div_p, 0, max_dist)
term_2 = torch.mean(d_div_p)
terms_1.append(term_1)
terms_2.append(term_2)
terms_1 = torch.stack(terms_1)
terms_2 = torch.stack(terms_2)
if self.return_2_terms:
res = terms_1.mean(), terms_2.mean()
else:
res = terms_1.mean() + terms_2.mean()
return res
def grad_check():
""" check whether the implementation is correct or not """
# prepare weight and output
weight = Variable(torch.ones(5))
output = torch.rand(20, 5, 96, 96).float()
output = F.softmax(output, 1)
output1 = Variable(output, requires_grad=True)
# output2 = Variable(output, requires_grad=True)
# prepare target
# encoded_target = output.data.clone().zero_()
target = torch.randint(0, 5, (20, 96, 96)).long()
encoded_target = target
# encoded_target.scatter_(1, target.unsqueeze(1), 1)
# encoded_target = torch.rand(2, 5, 10, 10).float()
encoded_target = Variable(encoded_target, requires_grad=False)
# print(target)
gdl = GeneralizedDiceLoss()
g2 = gdl.cal_loss(output1, encoded_target)
loss = gdl(output1, encoded_target)
loss.backward()
g1 = output1.grad
# g2 = output2.grad
diff = torch.abs(g1 - g2).sum()
g1_sum = torch.abs(g1).sum()
print(g1_sum)
print("grad difference: {}".format(diff.data.item()))
# # kl_div = nn.KLDivLoss()(output1, encoded_target)
# print(my_kl_div)
# print(kl_div)
#
# kl_div.backward()
# g1 = output1.grad
# my_kl_div.backward()
def check_bound_weight():
""" check whether boundary weight is correctly calculated or not """
w1, w2 = 50.0, 10.0
sigma1, sigma2 = 1.0, 1.0
img_dir = "/data/ugui0/antonio-t/CPR_multiview_interp2/S218801d0c_S2052ee2457ad29_20160809" \
"/I10/applicate/mask/038.tiff"
image = io.imread(img_dir)
image = image[176:336, 176:336]
images = np.tile(image, (10, 1, 1))
bounds = [gray2bounds(image, width=2) for image in images]
bounds = np.stack(bounds)
target = torch.from_numpy(bounds).long().cuda()
# prepare prob map
prob = torch.rand(10, 160, 160).float().cuda()
whd_loss = WeightedHausdorffDistanceLoss(resized_height=96, resized_width=96)
loss = whd_loss(prob, target)
print("loss = {}".format(loss.item()))
# weights = bound_weight_withdiff(target, sigmas=[sigma1, sigma2], ws=[w1, w2], n_classes=3, bound_output=True)
# plt.figure()
# plt.imshow(weights[0].cpu().numpy(), cmap='seismic')
# plt.savefig("./bound_weight/debug_{:.1f}_{:.1f}_{:.1f}_{:.1f}.pdf".format(w1, w2, sigma1, sigma2))
if __name__ == "__main__":
# img_dir = "../mask/038.tiff"
# image = io.imread(img_dir)
# image[image == 76] = 255
# image[image == 151] = 255
# image = gray2mask(image)
# image = image[176:336, 176:336]
# plt.figure()
# plt.imshow(image)
# plt.savefig("input.png")
# # plt.show()
# image = np.tile(image, (128, 1, 1))
# target = torch.from_numpy(image).long().cuda()
#
# x = torch.FloatTensor(128, 3, 160, 160).float().cuda()
#
# weight = torch.from_numpy(np.load('nlf_weight_all_3.npy')).cuda().float()
# loss = CrossEntropyBoundLoss(n_classes=3, weight=weight)(x, target)
# print(loss.data.item())
#
# x = torch.rand(10, 20, 20).float()
# y = x.repeat(1, 5, 1, 1)
# print(y.size())
check_bound_weight() | Python |
3D | kkhuang1990/PlaqueDetection | metric.py | .py | 15,092 | 417 | # _*_ coding: utf-8 _*_
""" metrics used to evaluate the performance of our approach """
from sklearn.preprocessing import label_binarize
from sklearn.metrics import f1_score
import warnings
warnings.filterwarnings('ignore', module='sklearn') # omit sklearn warning
import torch
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from medpy.metric.binary import hd95, asd
from medpy.metric.binary import ravd
torch.set_default_dtype(torch.float32)
def _assert_no_grad(variables):
for var in variables:
assert not var.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
def cdist(x, y):
"""
:param x: N x d Tensor
:param y: M x d Tensor
:return distances: sum of point-wise distances
"""
differences = x.unsqueeze(1) - y.unsqueeze(0)
distances = torch.sum(differences ** 2, -1).sqrt()
return distances
def volumewise_ahd(preds, targets, return_slicewise_hdf=False, n_classes=3):
""" calculate volume-wise Averaged Hausdorff Distance between preds and targets
:param preds: Array/Tensor with size [B, D, H, W] or [B, H, W]
:param targets: Array/Tensor with size [B, D, H, W] or [B, H, W]
:param return_slicewise_hdf: bool, whether return slice-wise hdf or not
:para n_classes, int, how many classes for calculating HDF
"""
batch_res = []
if not isinstance(preds, np.ndarray): # set1 and set2 are tensors
preds = preds.data.cpu().numpy()
targets = targets.data.cpu().numpy()
assert len(preds) == len(targets), \
"length of preds should be equal to that of targets, but got {} and {}".format(len(preds), len(targets))
if preds.ndim == 4 and targets.ndim == 4: # convert 3D to 2D if preds and targets are volumes
preds = np.reshape(preds, (preds.shape[0]*preds.shape[1], *preds.shape[2:]))
targets = np.reshape(targets, (targets.shape[0]*targets.shape[1], *targets.shape[2:]))
for pred, target in zip(preds, targets):
if np.sum(target) != 0:
slice_ahd = slicewise_ahd(pred, target, n_classes)
batch_res.append(slice_ahd)
mean_ahd = sum(batch_res) / len(batch_res)
if return_slicewise_hdf:
return mean_ahd, batch_res
else:
return mean_ahd
def volumewise_hd95(preds, targets, return_slicewise_hdf=False, n_classes=3):
""" calculate volume-wise 95 percentile Hausdorff distance
:param preds: Array/Tensor with size [B, D, H, W] or [B, H, W]
:param targets: Array/Tensor with size [B, D, H, W] or [B, H, W]
:param return_slicewise_hdf: bool, whether return slice-wise hdf or not
:para n_classes, int, how many classes for calculating HDF
"""
batch_res = []
if not isinstance(preds, np.ndarray): # set1 and set2 are tensors
preds = preds.data.cpu().numpy()
targets = targets.data.cpu().numpy()
assert len(preds) == len(targets), \
"length of preds should be equal to that of targets, but got {} and {}".format(len(preds), len(targets))
if preds.ndim == 4 and targets.ndim == 4: # convert 3D to 2D if preds and targets are volumes
preds = np.reshape(preds, (preds.shape[0]*preds.shape[1], *preds.shape[2:]))
targets = np.reshape(targets, (targets.shape[0]*targets.shape[1], *targets.shape[2:]))
for pred, target in zip(preds, targets):
if np.sum(target) != 0:
slice_hd95 = slicewise_hd95(pred, target, n_classes)
batch_res.append(slice_hd95)
mean_hd95 = sum(batch_res) / len(batch_res)
if return_slicewise_hdf:
return mean_hd95, batch_res
else:
return mean_hd95
def slicewise_ahd(pred, target, n_classes=3):
""" calculate Average Hausdorff distance between pred and target of single image
:param pred: ndarray with size [H, W], predicted bound
:param target: ndarray with size [H, W], ground truth
:param n_classes: int, # of classes
"""
max_ahd = 2 * target.shape[0]
slice_res = []
for c_inx in range(1, n_classes):
set1 = np.array(np.where(pred == c_inx)).transpose()
set2 = np.array(np.where(target == c_inx)).transpose()
if len(set2) != 0:
if len(set1) == 0:
res = max_ahd
else:
d2_matrix = pairwise_distances(set1, set2, metric='euclidean')
res = np.average(np.min(d2_matrix, axis=0)) + \
np.average(np.min(d2_matrix, axis=1))
slice_res.append(res)
mean_res = sum(slice_res) / len(slice_res)
return mean_res
def slicewise_hd95(pred, target, n_classes=3):
""" calculate Average Hausdorff distance between pred and target of single image
:param pred: ndarray with size [H, W], predicted bound
:param target: ndarray with size [H, W], ground truth
:param n_classes: int, # of classes
"""
max_hd95 = 2 * target.shape[0]
slice_res = []
for c_inx in range(1, n_classes):
pred_cinx = (pred == c_inx)
target_cinx = (target == c_inx)
if np.sum(target_cinx) != 0:
if np.sum(pred_cinx) == 0:
res = max_hd95
else:
res = hd95(pred_cinx, target_cinx)
slice_res.append(res)
mean_res = sum(slice_res) / len(slice_res)
return mean_res
def channelwise_ahd(pred, target):
""" calculate channel wise average Hausdorff distance,
especially for pred with multiple channels.
:param pred: ndarray with size [C, H, W], predicted boundary
:param target: ndarray with size [C, H, W], GT boundary
:return mean_res: float, average hdf
"""
max_ahd = 2 * target.shape[1]
slice_res = []
for c_inx in range(0, len(pred)):
set1 = np.array(np.where(pred[c_inx])).transpose()
set2 = np.array(np.where(target[c_inx])).transpose()
if len(set2) != 0:
if len(set1) == 0:
res = max_ahd
else:
d2_matrix = pairwise_distances(set1, set2, metric='euclidean')
res = np.average(np.min(d2_matrix, axis=0)) + \
np.average(np.min(d2_matrix, axis=1))
slice_res.append(res)
mean_res = sum(slice_res) / len(slice_res)
return mean_res
def channelwise_hd95(pred, target):
""" calculate channel-wise 95 percentile symmetric Hausdorff distance,
especially for pred with multiple channels.
:param pred: ndarray with size [C, H, W], predicted boundary
:param target: ndarray with size [C, H, W], GT boundary
:return mean_hd95: float, average hdf
"""
max_hd95 = 2 * target.shape[1]
channel_res = []
for c_inx in range(0, len(pred)):
if np.sum(target[c_inx]) != 0:
if np.sum(pred[c_inx]) == 0:
res = max_hd95
else:
res = hd95(pred[c_inx], target[c_inx])
channel_res.append(res)
mean_hd95 = sum(channel_res) / len(channel_res)
return mean_hd95
def channelwise_asd(pred, target):
""" calculate channel-wise average symmetric surface distance """
max_asd = 2 * target.shape[1]
channel_res = []
for c_inx in range(0, len(pred)):
if np.sum(target[c_inx]) != 0:
if np.sum(pred[c_inx]) == 0:
res = max_asd
else:
res = asd(pred[c_inx], target[c_inx])
channel_res.append(res)
mean_asd = sum(channel_res) / len(channel_res)
return mean_asd
def slicewise_asd(pred, target, n_classes=3):
""" calculate slice-wise average symmetric surface distance
:param pred: ndarray with size [H, W], predicted bound
:param target: ndarray with size [H, W], ground truth
:param n_classes: int, # of classes
"""
slice_res = []
max_asd = 2 * target.shape[0]
for c_inx in range(1, n_classes):
pred_cinx = (pred == c_inx)
target_cinx = (target == c_inx)
if np.sum(target_cinx) != 0:
if np.sum(pred_cinx) == 0:
res = max_asd
else:
res = asd(pred_cinx, target_cinx)
slice_res.append(res)
mean_res = sum(slice_res) / len(slice_res)
return mean_res
def volumewise_asd(preds, targets, n_classes=3):
""" calculate volume-wise asd """
vol_res = []
for pred, target in zip(preds, targets):
if np.sum(target) != 0:
res = slicewise_asd(pred, target, n_classes)
vol_res.append(res)
return sum(vol_res) / len(vol_res)
def slicewise_ravd(pred, target):
# in this case ravd should not be calculated for this slice
if np.sum(target) == 0:
raise IOError("target should contain at least one nonzero pixel")
else:
return abs(ravd(pred, target))
def volumewise_ravd(preds, targets):
""" calculate volume-wise ravd """
ravds = []
for pred, target in zip(preds, targets):
if np.sum(target) != 0:
ravds.append(slicewise_ravd(pred, target))
return (sum(ravds) / len(preds))
def cal_f_score(preds, labels, n_class=5, return_class_f1= False, return_slice_f1=False):
""" calculate average f1_score of given output and target batch
outputs: Variable(n_batches, n_classes, *), model output (* means any dimensions)
labels: Variable(n_batches, *), label (* means any dimensions)
ignore_index: int, ignored index when calculating f1 score
"""
if not isinstance(preds, np.ndarray):
n_batch = preds.size(0)
preds_np, labels_np = preds.cpu().numpy(), labels.data.cpu().numpy()
else:
n_batch = preds.shape[0]
preds_np, labels_np = preds, labels
f_scores = np.zeros(n_class, dtype=np.float32)
n_effect_samples = np.zeros(n_class, dtype=np.uint32)
ave_f_score_batch = 0.0
f_scores_batch = []
for pred, label in zip(preds_np, labels_np):
if n_class > 2:
label_binary = label_binarize(label.flatten(), classes=range(n_class))
pred_binary = label_binarize(pred.flatten(), classes=range(n_class))
else:
label_binary = np.stack([1 - label.flatten(), label.flatten()], axis=1)
pred_binary = np.stack([1 - pred.flatten(), pred.flatten()], axis=1)
f_score = np.zeros(n_class, dtype=np.float32)
effect_class_per_slice = np.ones(n_class, dtype=np.uint8)
for i in range(n_class):
if np.sum(label_binary[:, i]) == 0:
f_score[i] = 0.0
effect_class_per_slice[i] = 0
else:
n_effect_samples[i] += 1
f_score[i] = f1_score(label_binary[:, i], pred_binary[:, i])
f_scores += f_score
if n_class > 2:
ave_f_score_per_slice = f_score.sum() / effect_class_per_slice.sum()
else:
ave_f_score_per_slice = f_score[1]
# calculate f1 score for each batch to do hard mining
f_scores_batch.append(ave_f_score_per_slice)
ave_f_score_batch += ave_f_score_per_slice
ave_f_score_batch = ave_f_score_batch / n_batch
if return_class_f1 and return_slice_f1:
return ave_f_score_batch, f_scores, n_effect_samples, f_scores_batch
elif return_class_f1:
return ave_f_score_batch, f_scores, n_effect_samples
elif return_slice_f1:
return ave_f_score_batch, f_scores_batch
else:
return ave_f_score_batch
def cal_f_score_slicewise(preds, labels, n_class=5, return_class_f1=False, return_slice_f1=False):
""" calculate average f1_score of given output and target batch
outputs: Variable(n_batches, n_classes, D, H, W), model output
labels: Variable(n_batches, D, H, W), label
return_class_f1: bool, whether return class-wise F1 score or not
return_slice_f1: bool, whether return slice-wise/volume-wise F1 score or not
"""
# if tensor data, convert to numpy first
if not isinstance(preds, np.ndarray):
n_batch = preds.size(0)
preds_np, labels_np = preds.cpu().numpy(), labels.data.cpu().numpy()
else:
n_batch = preds.shape[0]
preds_np, labels_np = preds, labels
# print("preds shape: {}".format(preds_np.shape))
f_scores = np.zeros(n_class, dtype=np.float32)
n_effect_samples = np.zeros(n_class, dtype=np.uint32)
ave_f_score_batch = 0.0
f_scores_batch = []
n_slices = 0
for pred_vol, label_vol in zip(preds_np, labels_np):
n_slices += len(pred_vol)
# calculate average F1 score for a volume
f_scores_vol = 0.0
for pred, label in zip(pred_vol, label_vol):
label_binary = label_binarize(label.flatten(), classes=range(n_class))
pred_binary = label_binarize(pred.flatten(), classes=range(n_class))
f_score = np.zeros(n_class, dtype=np.float32)
effect_class_per_slice = np.ones(n_class, dtype=np.uint8)
for i in range(n_class):
if np.sum(label_binary[:, i]) == 0:
f_score[i] = 0.0
effect_class_per_slice[i] = 0
else:
n_effect_samples[i] += 1
f_score[i] = f1_score(label_binary[:, i], pred_binary[:, i])
f_scores += f_score
ave_f_score_per_slice = f_score.sum() / effect_class_per_slice.sum()
f_scores_vol += ave_f_score_per_slice
ave_f_score_batch += ave_f_score_per_slice
f_scores_batch.append(f_scores_vol/len(pred_vol))
ave_f_score_batch = ave_f_score_batch / n_slices
if return_class_f1 and return_slice_f1:
return ave_f_score_batch, f_scores, n_effect_samples, f_scores_batch
elif return_class_f1:
return ave_f_score_batch, f_scores, n_effect_samples
elif return_slice_f1:
return ave_f_score_batch, f_scores_batch
else:
return ave_f_score_batch
def slicewise_multiclass_f1(pred, label, n_class=3):
""" calculate slice-wise F1 score with multi-class outputs
:param pred: ndarray of size [H, W], predicted seg result
:param label: ndarray of size [H, W], GT seg annotation
:return: ave_slice_f1
"""
label_binary = label_binarize(label.flatten(), classes=range(n_class))
pred_binary = label_binarize(pred.flatten(), classes=range(n_class))
f_score = np.zeros(n_class, dtype=np.float32)
n_effect_class = 0
for i in range(n_class):
if np.sum(label_binary[:, i]) == 0:
f_score[i] = 0.0
else:
n_effect_class += 1
f_score[i] = f1_score(label_binary[:, i], pred_binary[:, i])
ave_slice_f1 = np.sum(f_score) / n_effect_class
return ave_slice_f1
# if __name__ == "__main__":
# label = torch.zeros(1, 64, 64, 16).long()
# label[0:8, 0:32, 0:32, 0:3] = 1
# pred = torch.zeros(1, 64, 64, 16).long()
# # pred[9:12, 16:48, 16:48, 2:7] = 1
# print("Dice score: {}".format(bin_dice_score(label, pred)))
# print("F1 score: {}".format(bin_f_score(label, pred))) | Python |
3D | kkhuang1990/PlaqueDetection | __init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | vision.py | .py | 42,317 | 920 | # _*_ coding: utf-8 _*_
""" functions for visualization """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from utils import mask2rgb
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from metric import cal_f_score
import numpy as np
import _pickle as pickle
from sklearn.preprocessing import label_binarize
from sklearn.metrics import f1_score
import os
import os.path as osp
import itertools
from sklearn.metrics import confusion_matrix
from utils import gray2rgb, gray2rgb, mask2outerbound, mask2innerouterbound
from sklearn.metrics import auc
from sklearn.metrics.pairwise import pairwise_distances
from metric import slicewise_hd95
import matplotlib.animation as animation
# for customed colormap
from matplotlib import cm
from matplotlib.colors import ListedColormap
def sample_stack(stack, rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name = None):
""" show stacked image samples
Args:
stack: numpy ndarray, input stack to plot
"""
_, ax = plt.subplots(rows, cols, figsize=[scale*cols, scale*rows])
for i in range(rows*cols):
ind = start_with + i*show_every
if ind < len(stack):
ax[int(i/cols),int(i % cols)].set_title('slice %d' % ind)
ax[int(i/cols),int(i % cols)].imshow(stack[ind], cmap=plt.cm.gray)
ax[int(i/cols),int(i % cols)].axis('off')
if fig_name:
plt.savefig(fig_name+'.png')
plt.close()
# plt.show()
def sample_stack_color(stack, metrics, rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name = None):
""" show stacked image samples
Args:
stack: numpy ndarray, input stack to plot
"""
_, ax = plt.subplots(rows, cols, figsize=[scale*cols, scale*rows])
for i in range(rows*cols):
ind = start_with + i*show_every
if ind < len(stack):
ax[int(i/cols),int(i % cols)].set_title('F1= {:.4f}'.format(metrics[ind]))
ax[int(i/cols),int(i % cols)].imshow(gray2rgb(stack[ind]))
ax[int(i/cols),int(i % cols)].axis('off')
if fig_name:
plt.savefig(fig_name+'.png')
plt.close()
def plot_metrics(metrics, labels, fig_dir):
""" plot experiment metric results
Args:
metrics: list, with each element as measures for train and test
labels: list, label for each metric
file_name: str, where to save the plotted figure
"""
if fig_dir:
if not osp.exists(fig_dir):
os.makedirs(fig_dir)
for metric, label in zip(metrics, labels):
plt.figure()
for phase, value in metric.items():
plt.plot(value, label=phase)
plt.title("{} at different epoches".format(label))
plt.xlabel("epoch")
plt.ylabel(label)
plt.legend()
plt.savefig("./{}/{}.png".format(fig_dir, label))
plt.close()
def plot_class_f1(metrics, fig_dir):
""" plot class-wise f1
:param metrics: dict, dict of f1 scores for different epochs
:param fig_dir: str, to where to plot figure
"""
keys = ['background', 'central part', 'outline', 'cal plaque', 'non-cal plaque']
for phase, value in metrics.items():
plt.figure()
f1s = [[item[i] for item in value] for i in range(len(value[0]))]
for key, f1 in zip(keys, f1s):
plt.plot(f1, label=key)
plt.title("class-wise F1-score at different epoches")
plt.xlabel("epoch")
plt.ylabel("F1-score")
plt.legend()
plt.savefig("./{}/class-wise_F1_{}.png".format(fig_dir, phase))
plt.close()
def sample_list2(data_list, rows=15, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0, n_class=5):
""" show sample of a list of data
this function is mainly for plotting outputs, predictions as well as average F1 scores
Args:
data_list: list, list of data in which each element is a dictionary
start_inx: int, starting slice index for current figure
"""
input_cols = len(data_list[0]['input'])
if input_cols == 1:
input_names = ['input']
elif input_cols == 2:
input_names = ['input(cal)', 'input(non-cal)']
cols = 4 + input_cols - 1
# n_class = data_list[0]['pred'].shape[0]
n_batch = len(data_list)
_, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
for ind in range(n_batch):
# read data and calculate average precision
input = data_list[ind]['input']
# output = data_list[ind]['output']
label = data_list[ind]['GT']
pred = data_list[ind]['pred']
# calculate F score and average precision
# output = np.transpose(output, (1, 2, 0))
# output = np.reshape(output, (-1, n_class))
label_binary = label_binarize(label.flatten(), classes=range(n_class))
pred_binary = label_binarize(pred.flatten(), classes=range(n_class))
f_score = np.zeros(n_class, dtype=np.float32)
slice_effect_class = 0
for i in range(n_class):
if np.sum(label_binary[:,i]) == 0:
f_score[i] = 0.0
else:
slice_effect_class += 1
f_score[i] = f1_score(label_binary[:,i], pred_binary[:,i])
ave_f_score = np.sum(f_score)/slice_effect_class
if (ind - start_with) % show_every == 0:
i = (ind - start_with) // show_every
if i < rows:
for col in range(input_cols):
ax[i, col].imshow(input[col], cmap='gray')
ax[i, col].set_title("Slice {} : {}".format(ind+start_inx, input_names[col]))
ax[i, col].axis('off')
ax[i, input_cols].imshow(mask2rgb(label))
ax[i, input_cols].set_title('Slice %d : %s' % (ind+start_inx, 'ground truth'))
ax[i, input_cols].axis('off')
ax[i, input_cols+1].imshow(mask2rgb(pred))
ax[i, input_cols+1].set_title('Slice %d : %s' % (ind+start_inx, 'prediction'))
ax[i, input_cols+1].axis('off')
ax[i, input_cols+2].scatter(range(0,n_class), f_score)
ax[i, input_cols+2].set_title('Slice %d : Ave F-score = %0.2f' % (ind+start_inx, ave_f_score))
ax[i, input_cols+2].set_ylabel('F score')
ax[i, input_cols+2].set_ylim([-0.1, 1.1])
# plt.show()
if fig_name:
plt.savefig(fig_name + '.png')
plt.close()
def sample_list_hdf(data_list, rows=15, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0, n_class=5):
""" show results as a list with Hausdorff distance calculated from each slice
Args:
data_list: list, list of data in which each element is a dictionary
start_inx: int, starting slice index for current figure
"""
output_cols = len(data_list[0]['output']) # whether single or multiple channels
cols = 5 + output_cols - 1
n_batch = len(data_list)
_, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
for ind in range(n_batch):
input = data_list[ind]['input']
label = data_list[ind]['GT']
pred = data_list[ind]['pred']
output = data_list[ind]['output'] # [C, H, W]
hdf = slicewise_hd95(pred, label, n_class)
if (ind - start_with) % show_every == 0:
i = (ind - start_with) // show_every
if i < rows:
ax[i, 0].imshow(input, cmap='gray') # we don't consider multiple inputs here
ax[i, 0].set_title("Slice {} : {}".format(ind+start_inx, 'input'))
ax[i, 0].axis('off')
ax[i, 1].imshow(mask2rgb(label))
ax[i, 1].set_title('Slice %d : %s' % (ind+start_inx, 'ground truth'))
ax[i, 1].axis('off')
ax[i, 2].imshow(mask2rgb(pred))
ax[i, 2].set_title("Slice {:d} : prediction (hdf={:.4f})".format(ind+start_inx, hdf))
ax[i, 2].axis('off')
# plot overlapping between pred ang GT annotation
overlap = pred.copy()
overlap[label != 0] = 4
ax[i, 3].imshow(mask2rgb(overlap))
ax[i, 3].set_title("Slice {:d} : {}".format(ind + start_inx, 'overlap of GT and pred'))
ax[i, 3].axis('off')
# plot prob map for different channels
# if more than 3 channels, plot all channels which are not equal to 0
output_title = ['prob map (inner bound)', 'prob map (outer bound)'] if output_cols >= 3 else ['prob map']
for c_inx in range(1, output_cols):
ax[i, 3 + c_inx].imshow(output[c_inx], cmap='seismic')
ax[i, 3 + c_inx].set_title("Slice {:d} : {}".format(ind + start_inx, output_title[c_inx-1]))
ax[i, 3 + c_inx].axis('off')
# plt.show()
if fig_name:
plt.savefig(fig_name + '.pdf')
plt.close()
def sample_seg_with_hfd(data_list, rows=15, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0,
n_class=5, width=1):
""" show segmentation result with bound and corresponding hdf calculated
plot input, annotation, prediction, bounds and F1 scores
:param data_list: list, list of data in which each element is a dictionary
:param start_inx: int, starting slice index for current figure """
cols = 5
n_batch = len(data_list)
_, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
for ind in range(n_batch):
input = data_list[ind]['input']
# print("input shape: {}".format(input.shape))
label = data_list[ind]['GT']
pred = data_list[ind]['pred']
# calculate average F1 score
label_binary = label_binarize(label.flatten(), classes=range(n_class))
pred_binary = label_binarize(pred.flatten(), classes=range(n_class))
f_score = np.zeros(n_class, dtype=np.float32)
slice_effect_class = 0
for i in range(n_class):
if np.sum(label_binary[:,i]) == 0:
f_score[i] = 0.0
else:
slice_effect_class += 1
f_score[i] = f1_score(label_binary[:,i], pred_binary[:,i])
ave_f_score = np.sum(f_score)/slice_effect_class
# calculate HDF between pred and GT bound
label_bound = mask2innerouterbound(label, width=width)
pred_bound = mask2innerouterbound(pred, width=width)
hdf = slicewise_hd95(pred_bound, label_bound, n_class)
if (ind - start_with) % show_every == 0:
i = (ind - start_with) // show_every
if i < rows:
ax[i, 0].imshow(input, cmap='gray')
ax[i, 0].set_title("Slice {} : {}".format(ind+start_inx, 'input'))
ax[i, 0].axis('off')
ax[i, 1].imshow(mask2rgb(label))
ax[i, 1].set_title('Slice %d : %s' % (ind+start_inx, 'ground truth'))
ax[i, 1].axis('off')
ax[i, 2].imshow(mask2rgb(pred))
ax[i, 2].set_title('Slice %d : %s' % (ind+start_inx, 'prediction'))
ax[i, 2].axis('off')
# print("# of non-cal pixels in label: {}, in pred: {}".format(np.sum(label == 4), np.sum(pred == 4)))
# plot overlapping between pred_bound and label_bound
overlap = pred_bound.copy()
overlap[label_bound != 0] = 4
ax[i, 3].imshow(mask2rgb(overlap))
ax[i, 3].set_title("Slice {:d} : bound hdf={:.4f}".format(ind + start_inx, hdf))
ax[i, 3].axis('off')
ax[i, 4].scatter(range(0, n_class), f_score)
ax[i, 4].set_title('Slice %d : Ave F-score = %0.2f' % (ind+start_inx, ave_f_score))
ax[i, 4].set_ylabel('F score')
ax[i, 4].set_ylim([-0.1, 1.1])
if fig_name:
plt.savefig(fig_name + '.pdf')
plt.close()
#####################################################################################
## input | GT seg | pred seg | bound (overlap with GT bound) | inner probmap | outer probmap | F1 score
#####################################################################################
def sample_wnet(data_list, rows=15, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0,
n_class=5, width=1):
""" show segmentation result with bound and corresponding hdf calculated
plot input, annotation, prediction, bounds and F1 scores
:param data_list: list, list of data in which each element is a dictionary
:param start_inx: int, starting slice index for current figure """
n_probmaps = data_list[0]['bound'].shape[0] # number of bounds
cols = 5 + n_probmaps - 1
n_batch = len(data_list)
_, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
for ind in range(n_batch):
input = data_list[ind]['input']
# print("input shape: {}".format(input.shape))
label = data_list[ind]['GT']
pred = data_list[ind]['pred']
bound_probmap = data_list[ind]['bound'] # predicted bound probmap
# calculate average F1 score
label_binary = label_binarize(label.flatten(), classes=range(n_class))
pred_binary = label_binarize(pred.flatten(), classes=range(n_class))
f_score = np.zeros(n_class, dtype=np.float32)
slice_effect_class = 0
for i in range(n_class):
if np.sum(label_binary[:,i]) == 0:
f_score[i] = 0.0
else:
slice_effect_class += 1
f_score[i] = f1_score(label_binary[:,i], pred_binary[:,i])
ave_f_score = np.sum(f_score)/slice_effect_class
# calculate average HFD
label_bound = mask2innerouterbound(label, width=width)
pred_bound = mask2innerouterbound(pred, width=width)
hdf = slicewise_hd95(pred_bound, label_bound, n_class)
if (ind - start_with) % show_every == 0:
i = (ind - start_with) // show_every
if i < rows:
ax[i, 0].imshow(input, cmap='gray')
ax[i, 0].set_title("Slice {} : {}".format(ind+start_inx, 'input'))
ax[i, 0].axis('off')
ax[i, 1].imshow(mask2rgb(label))
ax[i, 1].set_title('Slice %d : %s' % (ind+start_inx, 'ground truth'))
ax[i, 1].axis('off')
ax[i, 2].imshow(mask2rgb(pred))
ax[i, 2].set_title('Slice %d : %s' % (ind+start_inx, 'prediction'))
ax[i, 2].axis('off')
# plot overlapping between pred_bound and label_bound
overlap = pred_bound.copy()
overlap[label_bound != 0] = 4
ax[i, 3].imshow(mask2rgb(overlap))
ax[i, 3].set_title("Slice {:d} : bound hdf={:.4f}".format(ind + start_inx, hdf))
ax[i, 3].axis('off')
# plot prob maps for intermediate bounds
output_title = ['prob map (inner bound)', 'prob map (outer bound)'] if n_probmaps >= 3 else ['prob map']
for c_inx in range(1, n_probmaps):
ax[i, 3 + c_inx].imshow(bound_probmap[c_inx], cmap='seismic')
ax[i, 3 + c_inx].set_title("Slice {:d} : {}".format(ind + start_inx, output_title[c_inx - 1]))
ax[i, 3 + c_inx].axis('off')
ax[i, 3 + n_probmaps].scatter(range(0, n_class), f_score)
ax[i, 3 + n_probmaps].set_title('Slice %d : Ave F-score = %0.2f' % (ind+start_inx, ave_f_score))
ax[i, 3 + n_probmaps].set_ylabel('F score')
ax[i, 3 + n_probmaps].set_ylim([-0.1, 1.1])
if fig_name:
plt.savefig(fig_name + '.pdf')
plt.close()
def plaque_detection_rate(labels, preds, n_classes=5, thres=0):
""" calculate calcified and non-calcified plaque detection accuracy
as well as slice-wise recall/precision/F1
"""
if not isinstance(labels, np.ndarray):
labels = labels.data.cpu().numpy()
preds = preds.cpu().numpy()
pgt_cnt = [0, 0]
pp_cnt = [0, 0]
tp_cnt = [0, 0]
# 5-class segmentation or 2-class segmentation
plaques = [3, 4] if n_classes == 5 else [0, 1]
if labels.ndim == 3: # for 2D image
for label, pred in zip(labels, preds):
for inx, plaque in enumerate(plaques):
if np.sum(label == plaque) != 0:
pgt_cnt[inx] += 1
if np.sum(pred == plaque) >= thres:
pp_cnt[inx] += 1
if np.sum(label == plaque) != 0 and np.sum(pred == plaque) >= thres:
tp_cnt[inx] += 1
elif labels.ndim == 4:
for label_vol, pred_vol in zip(labels, preds):
for label, pred in zip(label_vol, pred_vol):
for inx, plaque in enumerate(plaques):
if np.sum(label == plaque) != 0:
pgt_cnt[inx] += 1
if np.sum(pred == plaque) >= thres:
pp_cnt[inx] += 1
if np.sum(label == plaque) != 0 and np.sum(pred == plaque) >= thres:
tp_cnt[inx] += 1
return pgt_cnt[0], pp_cnt[0], tp_cnt[0], pgt_cnt[1], pp_cnt[1], tp_cnt[1]
def plot_risk_confusion_matrix(y_test, y_pred, root_fig_path):
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title.split('/')[-1])
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if title:
plt.savefig(title+'.png')
plt.close()
if not osp.exists(root_fig_path):
os.makedirs(root_fig_path)
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
class_names = ['1', '2', '3', '4']
# Plot non-normalized confusion matrix
plt.figure()
title = root_fig_path + '/' + 'Confusion_matrix_without_normalization'
plot_confusion_matrix(cnf_matrix, classes=class_names,
title=title)
# Plot normalized confusion matrix
plt.figure()
title = root_fig_path + '/' + 'Normalized_confusion_matrix'
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title=title)
# sample data for assignment from Harada sensei
def sample_list3(data_list, rows=15, cols=4, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0):
""" show sample of a list of data
here we plot slice, label, hu0050, overlap
this function is mainly for plotting outputs, predictions as well as average F1 scores
Args:
data_list: list, list of data in which each element is a dictionary
start_inx: int, starting slice index for current figure
"""
n_batch = len(data_list)
_, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
for ind in range(n_batch):
# read data and calculate average precision
input1 = data_list[ind]['slice1']
input2 = data_list[ind]['slice2']
label = data_list[ind]['label']
hu0050 = data_list[ind]['hu0050']
overlap = data_list[ind]['overlap']
f_score = data_list[ind]['f1']
mix_overlap = data_list[ind]['mix_overlap']
noncal_eval = data_list[ind]['noncal_eval']
file_path = data_list[ind]['file_path']
if (ind - start_with) % show_every == 0:
i = (ind - start_with) // show_every
if i < rows:
ax[i, 0].imshow(input1, cmap='gray')
ax[i, 0].set_title("Slice {} ({}) \n {}".format(ind + start_inx, file_path, 'Input with HU(-100~155)'), loc='left')
ax[i, 0].axis('off')
ax[i, 1].imshow(input2, cmap='gray')
ax[i, 1].set_title("{}".format('Input with HU(200~1200)'))
ax[i, 1].axis('off')
ax[i, 2].imshow(gray2rgb(label))
ax[i, 2].set_title('{}'.format('Label'))
ax[i, 2].axis('off')
ax[i, 3].imshow(gray2rgb(hu0050))
ax[i, 3].set_title('{}'.format('Mask HU(0~50)'))
ax[i, 3].axis('off')
ax[i, 4].imshow(gray2rgb(overlap))
ax[i, 4].set_title('{} (F1= {:.4f})'.format('Overlap', f_score))
ax[i, 4].axis('off')
# not all red pixels are within HU range 0~50
if(np.sum(overlap == 76)) != 0:
n_above50, n_below0, topk, buttomk = noncal_eval[0], noncal_eval[1], noncal_eval[2:7], noncal_eval[7:12]
ax[i, 4].text(5, 30, "top5 HU: {}".format(topk), color='red')
ax[i, 4].text(5, 60, "but5 HU: {}".format(buttomk), color='red')
ax[i, 4].text(5, 90, "Num of pixels HU>50: {}".format(n_above50), color='red')
ax[i, 4].text(5, 120, "Num of pixels HU<0: {}".format(n_below0), color='red')
ax[i, 5].imshow(gray2rgb(mix_overlap))
ax[i, 5].set_title('{} (F1= {:.4f})'.format('Label+Overlap', f_score))
ax[i, 5].axis('off')
# ax[i, 3].scatter(range(0, n_class), f_score)
# ax[i, 3].set_title('Slice %d : Ave F-score = %0.2f' % (ind + start_inx, ave_f_score))
# ax[i, 3].set_ylabel('F score')
# ax[i, 3].set_ylim([-0.1, 1.1])
# plt.show()
if fig_name:
plt.savefig(fig_name + '.pdf')
plt.close()
def plot_slice_wise_measures(labels, preds, args):
""" In test phase, plot various measures such as ROC, AUC, PR, RC, F1 et al """
cal_roc = [[], []]
cal_prrcf1 = [[], [], []] # save PR, RC, F1 respectively
noncal_prrcf1 = [[], [], []]
thres_all = []
noncal_roc = [[], []]
n_slices = len(labels)
for thres in range(500, -1, -5):
print("[Threshold # of pixels: {}]".format(thres))
thres_all.append(thres)
cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = \
plaque_detection_rate(labels, preds, thres=thres)
cal_prrcf1[0].append(float(cal_tp) / cal_pp if cal_pp != 0 else 0.0)
cal_prrcf1[1].append(float(cal_tp) / cal_pgt)
cal_prrcf1[2].append(2.0 * cal_tp / (cal_pgt + cal_pp))
noncal_prrcf1[0].append(float(noncal_tp) / noncal_pp if noncal_pp != 0 else 0.0)
noncal_prrcf1[1].append(float(noncal_tp) / noncal_pgt)
noncal_prrcf1[2].append(2.0 * noncal_tp / (noncal_pgt + noncal_pp))
cal_roc[0].append((cal_pp - cal_tp) / (n_slices - cal_pgt)) # false negative ratio
cal_roc[1].append(cal_tp / cal_pgt) # true positive ratio
noncal_roc[0].append((noncal_pp - noncal_tp) / (n_slices - noncal_pgt)) # false negative ratio
noncal_roc[1].append(noncal_tp / noncal_pgt) # true positive ratio
print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(
cal_prrcf1[0][-1], cal_prrcf1[1][-1], cal_prrcf1[2][-1],
noncal_prrcf1[0][-1], noncal_prrcf1[1][-1], noncal_prrcf1[2][-1]))
print('Cal: fpr - {:.4f} tpr - {:.4f} Noncal: fpr - {:.4f} tpr - {:.4f}'.format(
cal_roc[0][-1], cal_roc[1][-1], noncal_roc[0][-1], noncal_roc[1][-1]))
# plot the roc curve and calculate AUC
fig_names = ['calcified', 'non-calcified']
for plq_metrics, fig_name in zip([cal_roc, noncal_roc], fig_names):
plt.figure()
lw = 2
auc_metric = auc(plq_metrics[0], plq_metrics[1])
print("{} : {}".format(fig_name, auc_metric))
plt.plot(plq_metrics[0], plq_metrics[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % auc_metric)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('slice-wise ROC curve of {} plaques'.format(fig_name))
plt.legend(loc="lower right")
plt.savefig("./{}/{}_roc.png".format(args.fig_dir, fig_name))
for plq_metrics, fig_name in zip([cal_prrcf1, noncal_prrcf1], fig_names):
plt.figure()
lw = 2
plt.plot(thres_all, plq_metrics[0], color='r', lw=lw, label='precision')
plt.plot(thres_all, plq_metrics[1], color='g', lw=lw, label='recall')
plt.plot(thres_all, plq_metrics[2], color='b', lw=lw, label='f1')
plt.xlim([min(thres_all), max(thres_all)])
plt.ylim([0.0, 1.05])
plt.xlabel('Threshold Number of Pixels')
plt.title('{} measures under different thresholds'.format(fig_name))
plt.legend(bbox_to_anchor=(1, 0.95), loc="upper right")
plt.savefig("./{}/{}_prrcf1.png".format(args.fig_dir, fig_name))
def plot_seg_bound_comparison(data_list, rows, start_with, show_every, start_inx, n_class, fig_name=None, width=2, scale=4):
""" plot result comparison between seg and bound detection """
cols = 6 # [input, label_seg, label_bound, pred_bound(converted), pred_bound_2d, pred_bound_3d]
n_batch = len(data_list)
# print("number of slices: {}".format(n_batch))
_, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
for ind in range(n_batch):
input = data_list[ind]['input']
label_seg = data_list[ind]['GT_seg']
pred_seg = data_list[ind]['pred_seg'] # seg prediction is not plotted here
pred_bound_conv = mask2outerbound(pred_seg, width=width) # convert seg to inner-outer bound
label_bound = data_list[ind]['GT_bound']
pred_bound_2d = data_list[ind]['pred_2d_bound']
pred_bound_3d = data_list[ind]['pred_3d_bound']
# print("input: {}, seg: {}, pred_seg: {}, label_bound: {}, pred_bound_2d: {}, pred_bound_3d: {}".format(input.shape,
# label_seg.shape, pred_seg.shape, label_bound.shape, pred_bound_2d.shape, pred_bound_3d.shape))
# print()
# # calculate average F1 score
# label_binary = label_binarize(label_seg.flatten(), classes=range(n_class))
# pred_binary = label_binarize(pred_seg.flatten(), classes=range(n_class))
#
# f_score = np.zeros(n_class, dtype=np.float32)
# slice_effect_class = 0
# for i in range(n_class):
# if np.sum(label_binary[:,i]) == 0:
# f_score[i] = 0.0
# else:
# slice_effect_class += 1
# f_score[i] = f1_score(label_binary[:,i], pred_binary[:,i])
#
# ave_f_score = np.sum(f_score) / slice_effect_class
# calculate average HFD
hdf_seg = slicewise_hd95(pred_bound_conv, label_bound, n_class)
hdf_bound_2d = slicewise_hd95(pred_bound_2d, label_bound, n_class)
hdf_bound_3d = slicewise_hd95(pred_bound_3d, label_bound, n_class)
if (ind - start_with) % show_every == 0:
i = (ind - start_with) // show_every
if i < rows:
ax[i, 0].imshow(input, cmap='gray')
ax[i, 0].set_title("Slice {} : {}".format(ind+start_inx, 'input'))
ax[i, 0].axis('off')
ax[i, 1].imshow(mask2rgb(label_seg))
ax[i, 1].set_title('Slice %d : %s' % (ind+start_inx, 'label_seg'))
ax[i, 1].axis('off')
label_bound_cp = label_bound.copy()
label_bound_cp[label_bound != 0] = 4
ax[i, 2].imshow(mask2rgb(label_bound_cp))
ax[i, 2].set_title('Slice %d : %s' % (ind + start_inx, 'label_bound'))
ax[i, 2].axis('off')
# plot overlapping between pred_bound_conv and label_bound
overlap_seg = pred_bound_conv.copy()
overlap_seg[label_bound != 0] = 4
ax[i, 3].imshow(mask2rgb(overlap_seg))
ax[i, 3].set_title("Slice {:d} : bound from seg (hdf={:.4f})".format(ind + start_inx, hdf_seg))
ax[i, 3].axis('off')
overlap_bound_2d = pred_bound_2d.copy()
overlap_bound_2d[label_bound != 0] = 4
ax[i, 4].imshow(mask2rgb(overlap_bound_2d))
ax[i, 4].set_title("Slice {:d} : 2D bound (hdf={:.4f})".format(ind + start_inx, hdf_bound_2d))
ax[i, 4].axis('off')
overlap_bound_3d = pred_bound_3d.copy()
overlap_bound_3d[label_bound != 0] = 4
ax[i, 5].imshow(mask2rgb(overlap_bound_3d))
ax[i, 5].set_title("Slice {:d} : 3D bound (hdf={:.4f})".format(ind + start_inx, hdf_bound_3d))
ax[i, 5].axis('off')
if fig_name:
plt.savefig(fig_name + '.pdf')
plt.close()
def seg_bound_comparison(orig_label_path, seg_data_path, bound_data_2d_path, bound_data_3d_path, fig_save_dir, sample_stack_rows=50):
""" compare segmentation and bound detection results and plot them into a single graph
:param seg_data_path: str, segmentation result path
:param bound_data_path: str, boundary detection result path
:param fig_save_dir: str, to where to save the result comparison
"""
for sample in os.listdir(seg_data_path):
if not sample.startswith('.') and osp.isdir(osp.join(seg_data_path, sample)):
sample_path = osp.join(seg_data_path, sample)
for artery in os.listdir(sample_path):
orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')
seg_pick_path = osp.join(seg_data_path, sample, artery, 'data.pkl')
bound_2d_pick_path = osp.join(bound_data_2d_path, sample, artery, 'data.pkl')
bound_3d_pick_path = osp.join(bound_data_3d_path, sample, artery, 'data.pkl')
artery_save_dir = osp.join(fig_save_dir, sample, artery)
if not osp.exists(artery_save_dir):
os.makedirs(artery_save_dir)
# load original segmentation label
with open(orig_label_pick_path, 'rb') as reader:
labels_gt = pickle.load(reader)['label']
with open(seg_pick_path, 'rb') as reader:
data_seg = pickle.load(reader)
# inputs_seg here is a list of length 1 (not modified yet)
inputs_seg, labels_seg, preds_seg = data_seg['input'], data_seg['label'], data_seg['pred']
start, n_class, width = data_seg['start'], data_seg['n_class'], data_seg['width']
with open(bound_2d_pick_path, 'rb') as reader:
data_bound = pickle.load(reader)
# inputs_bound here is a list of length 1 (not modified yet)
inputs_bound_2d, labels_bound_2d, preds_bound_2d, outputs_bound_2d = \
data_bound['input'], data_bound['label'], data_bound['pred'], data_bound['output']
with open(bound_3d_pick_path, 'rb') as reader:
data_bound = pickle.load(reader)
# inputs_bound here is a list of length 1 (not modified yet)
inputs_bound_3d, labels_bound_3d, preds_bound_3d, outputs_bound_3d = data_bound['input'], \
data_bound['label'], data_bound['pred'], data_bound['output']
print("# of slices in total: {}".format(len(inputs_seg[0]))) # number of slices
for inx in range(0, len(inputs_seg[0]), sample_stack_rows):
over = min(inx + sample_stack_rows, len(inputs_seg[0]))
input_plot, label_gt_plot, label_bound_2d_plot, pred_seg_plot, pred_bound_2d_plot, pred_bound_3d_plot\
= inputs_seg[0][inx:over], labels_gt[inx:over], labels_bound_2d[inx:over], preds_seg[inx:over], \
preds_bound_2d[inx:over], preds_bound_3d[inx:over]
# for result check
print("input: {}, label_seg: {}, label_bound_2d: {}, pred_seg: {}, pred_bound_2d: {}, pred_bound_3d: {}".format(
input_plot.shape, label_gt_plot.shape, label_bound_2d_plot.shape, pred_seg_plot.shape, pred_bound_2d_plot.shape,
pred_bound_3d_plot.shape))
data_list = [{"input": input, "GT_seg": label_seg, "pred_seg": pred_seg, "GT_bound": label_bound, "pred_2d_bound": pred_bound_2d,
"pred_3d_bound" : pred_bound_3d} for (input, label_seg, pred_seg, label_bound, pred_bound_2d, pred_bound_3d)
in zip(input_plot, label_gt_plot, pred_seg_plot, label_bound_2d_plot, pred_bound_2d_plot, pred_bound_3d_plot)]
# print("# of slices in batch: {}".format(len(data_list)))
file_name = "{}/{:03d}".format(artery_save_dir, inx + start)
plot_seg_bound_comparison(data_list, rows=over - inx, start_with=0, show_every=1, start_inx=inx + start,
n_class=n_class, fig_name=file_name, width=width, scale=4)
def gif_generation(orig_label_path, bound_data_path):
""" generate gif animation from slices saved in bound_data_path, plus the original label
:param orig_label_path: str, from where to read original label
:param bound_data_path: str, from where to read boundary detection results
"""
for sample in os.listdir(bound_data_path):
if not sample.startswith('.') and osp.isdir(osp.join(bound_data_path, sample)):
sample_path = osp.join(bound_data_path, sample)
for artery in os.listdir(sample_path):
orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')
bound_pick_path = osp.join(bound_data_path, sample, artery, 'data.pkl')
# function to save result of each artery into gif
save_gif_artery(orig_label_pick_path, bound_pick_path)
def save_gif_artery(orig_label_pick_path, bound_pick_path):
"""
:param orig_label_pick_path: str, path of original segmentation label
:param bound_pick_path: str, path of boundary detection result
figures are arranged in the order of
input | GT_seg | GT_bound | pred_bound
heatmap[0-256] | heatmap[0-100] | inner bound probmap | outer bound probmap
besides, we only consider heatmap with range of
0~(70)~256, namely 0.38438 ~ 0.41066 ~ 0.48048 and
0~(50)~100, namely 0.38438 ~ 0.40315 ~ 0.42192 respectively
"""
gif_save_dir = '/'.join(bound_pick_path.split('/')[:-1])
print("Processing {}".format(gif_save_dir))
# load original segmentation label
with open(orig_label_pick_path, 'rb') as reader:
data_seg = pickle.load(reader)
labels_seg, start_seg = data_seg["label"], data_seg["start"]
with open(bound_pick_path, 'rb') as reader:
data_bound = pickle.load(reader)
inputs_bound, labels_bound, preds_bound, start_bound, probmaps = \
data_bound['input'], data_bound['label'], data_bound['pred'], data_bound['start'], data_bound['output']
assert len(inputs_bound) == len(labels_bound) == len(preds_bound), "inputs, GT and preds should have the " \
"same number of slices"
print(len(inputs_bound), len(labels_seg), start_bound, start_seg)
scale, rows, cols = 4, 2, 4
fig = plt.figure(figsize=[scale * cols, scale * rows])
artery_name = '/'.join(gif_save_dir.split('/')[-2:])
# add subplots for each figure
ax1 = fig.add_subplot(rows, cols, 1)
ax2 = fig.add_subplot(rows, cols, 2)
ax3 = fig.add_subplot(rows, cols, 3)
ax4 = fig.add_subplot(rows, cols, 4)
ax5 = fig.add_subplot(rows, cols, 5)
ax6 = fig.add_subplot(rows, cols, 6)
ax7 = fig.add_subplot(rows, cols, 7)
ax8 = fig.add_subplot(rows, cols, 8)
# create customed colormap
top = cm.get_cmap('Reds', 186)
bottom = cm.get_cmap('Blues', 70)
newcolors = np.vstack((bottom(np.linspace(1, 0, 70)),
top(np.linspace(0, 1, 186))))
bluered = ListedColormap(newcolors, name='BlueReds')
labels_seg_cal = labels_seg[(start_bound-start_seg):] # seg labels after calibration
lines = []
for i in range(len(inputs_bound)):
input, label_seg, label_bound, pred_bound, probmap = \
inputs_bound[i], labels_seg_cal[i], labels_bound[i], preds_bound[i], probmaps[i]
# calculate HDF distance between GT bound and pred bound
hdf_bound = slicewise_hd95(pred_bound, label_bound, n_classes=3)
ax1.set_title("{} \n {}".format(artery_name, 'Input'), loc='left')
ax1.axis('off')
line1 = ax1.imshow(input, cmap='gray', animated=True)
line1_text = ax1.text(48, -3, "Slice {}".format(i + start_bound), color='red', fontsize=10)
ax2.set_title('label_seg')
ax2.axis('off')
line2 = ax2.imshow(mask2rgb(label_seg), animated=True)
ax3.set_title('label_bound')
ax3.axis('off')
line3 = ax3.imshow(mask2rgb(label_bound), animated=True)
ax4.set_title("pred_bound", loc='left')
ax4.axis('off')
line4 = ax4.imshow(mask2rgb(pred_bound), animated=True)
line4_text = ax1.text(400, -3, "Hdf: {:.4f}".format(hdf_bound), color='black', fontsize=10)
# plot inputs with range [0~256] in colormap
ax5.set_title("input colormap HU[0~250]")
ax5.axis('off')
line5 = ax5.imshow(input, cmap=bluered, vmin=0.38438, vmax=0.48048, animated=True) # crop HU range 0~255
# plot inputs with range [0~100] in colormap
ax6.set_title("input colormap HU[0~100]")
ax6.axis('off')
line6 = ax6.imshow(input, cmap=bluered, vmin=0.38438, vmax=0.42192, animated=True) # crop HU range 0~100
# inner bound probmap
ax7.set_title("inner bound probmap")
ax7.axis('off')
line7 = ax7.imshow(probmap[1], cmap='seismic', animated=True) # crop HU range 0~100
# outer bound probmap
ax8.set_title("outer bound probmap")
ax8.axis('off')
line8 = ax8.imshow(probmap[2], cmap='seismic', animated=True) # crop HU range 0~100
lines.append([line1, line1_text, line2, line3, line4, line4_text, line5, line6, line7, line8])
# Build the animation using ArtistAnimation function
ani = animation.ArtistAnimation(fig, lines, interval=50, blit=True)
# save into gif and mp4 respectively
# ani.save('{}/artery.gif'.format(gif_save_dir), writer="imagemagick")
ani.save('{}/artery.mp4'.format(gif_save_dir), writer="ffmpeg", codec='mpeg4', fps=10)
# if __name__ == "__main__":
# # file_name = 'test_result_2.pickle'
# # print("file name: {}".format(file_name))
# # with open(file_name, 'rb') as reader:
# # data = pickle.load(reader)
# # labels = data['GT'] # [N, H, W]
# # outputs = data['output'] # [N, C, H, W]
# #
# # binary_class_slice_wise_pr(labels, outputs, fig_name= 'test_2_binary_pr')
# # multi_class_slice_wise_pr(labels, outputs, fig_name='test_2_multi_pr_micro')
# # average_precision(labels, outputs)
# # path of original annotation
# orig_label_path = "./PlaqueSegmentation/OrigAnnotation/2d_res_unet_dp_0.001_0.90_0.9_theta-1.0-0.0_100_2_10_dice_Adam_" \
# "r-True_flip-True_w-True_rcp-True_tr-False_ns-Falseptr-False_mv-False_sl-False_ds-2_a-0.5_lr-StepLR_" \
# "wt-None_o-5_b-False_cal0gt-False_cf-config_dp-0.0_ig-None_w0-10.0_sg-5.0_96_wt-1_mo_False"
#
# # seg_data_path = "/home/mil/huang/CPR_Segmentation_ver7/PlaqueSegmentation/Experiment23/2d_res_unet_dp_0.0001_0.90_" \
# # "0.9_theta-1.0-0.0_100_100_10_ceb_Adam_r-True_flip-True_w-True_rcp-True_tr-False_ns-Falseptr-False_mv" \
# # "-False_sl-False_ds-2_a-0.5_lr-StepLR_wt-None_o-3_b-False_cal0gt-False_cf-config_dp-0.0_ig-None_w0-10.0_" \
# # "sg-5.0_96_wt-1_mo_False"
# # bound_data_2d_path = "/home/mil/huang/CPR_Segmentation_ver7/PlaqueBound/Experiment3/2d_res_unet_dp_0.001_0.0_100_100_10" \
# # "_whd_Adam_r-True_flip-True_w-False_ptr-False_mv-False_sl-False_lr-StepLR_wt-None_o-2_b-True_cf-config" \
# # "_dp-0.0_w1-10.0_w2-10.0_sg1-5.0_sg2-5.0_rs-96_wt-2_bt-outer_whda-4_whdb-1"
# # bound_data_3d_path = "./BoundDetection/Experiment4/3d_res_unet_0.001_100_100_whd_Adam_w-False_sl-True_lr-StepLR_wt-None_o" \
# # "-2_b-True_cf-config_dp-0.0_rs-96_cc-192_wt-2_bt-outer_whda-4_whdb-1_whdr-0.5"
# # fig_save_dir = "/home/mil/huang/CPR_Segmentation_ver7/PlaqueDetection_20181127/ResultsComparison/seg_bound_comp_debug3"
#
# # seg_bound_comparison(orig_label_path, seg_data_path, bound_data_2d_path, bound_data_3d_path, fig_save_dir, sample_stack_rows=50)
#
# bound_data_path = "./BoundDetection/Experiment7/HybridResUNet_ds1int15_0.167"
# gif_generation(orig_label_path, bound_data_path) | Python |
3D | kkhuang1990/PlaqueDetection | operation.py | .py | 40,489 | 856 | # _*_ coding: utf-8 _*_
""" calculate risk statistics and HU value statistics of the whole data set
this part is not directly used in training our network
"""
import matplotlib as mpl
mpl.use('Agg')
from image.transforms import Intercept
import matplotlib.pyplot as plt
import os
import os.path as osp
from os import listdir
import numpy as np
from skimage import io
from multiprocessing import Pool
from functools import reduce
from sklearn.metrics import f1_score
from vision import sample_stack_color, sample_list3
from image.transforms import HU2Gray, RandomCentralCrop, Rescale, Gray2Triple, RandomRotation, RandomFlip
from torchvision import transforms
from utils import hu2lut
import random
def risk_statistic(data_dir):
""" count the number of slices for each risk class range from 1 to 4 """
# data_dir = "/data/ugui0/antonio-t/CPR_20180518/20180518"
n_slices_risk = {0:0, 1:0, 2:0, 3:0}
samples = [sample for sample in listdir(data_dir) if sample.startswith('IRB')]
for sample in samples:
sample_path = osp.join(data_dir, sample)
series = [s for s in listdir(sample_path) if not s.startswith('.')]
series = sorted(series, key=lambda x: int(x[1:]))
series_path = osp.join(sample_path, series[0])
exclusions = ['.tiff', 'conf']
phases = [phase for phase in sorted(listdir(series_path))
if not any([phase.endswith(ex) for ex in exclusions]) and not phase.startswith('.')]
n_slices_per_sample = {0:0, 1:0, 2:0, 3:0}
for phase in phases:
# load masks
mask_dir = osp.join(series_path, phase + 'conf')
mask_files = [file for file in listdir(mask_dir) if file.startswith('I0') and file.endswith('.tiff')]
mask_files = sorted(mask_files, key=lambda x: int(x.split('.')[0][2:]))
# load images
img_dir = osp.join(series_path, phase)
img_files = [file for file in listdir(img_dir) if file.startswith('I0')]
img_files = sorted(img_files, key=lambda x: int(x.split('.')[0][2:]))
if len(mask_files) > 0 and len(img_files) > 0:
slice_info_file = osp.join(mask_dir, 'sliceinfo.txt')
start, end, risk, _, _, _ = get_slice_info(slice_info_file)
inxs, cnts = np.unique(risk, return_counts=True)
for i, cnt in zip(inxs, cnts):
n_slices_risk[i] += cnt
n_slices_per_sample[i] += cnt
print("{}: {} - {}".format(sample, n_slices_per_sample.keys(), n_slices_per_sample.values()))
print("Overall: {} - {}".format(n_slices_risk.keys(), n_slices_risk.values()))
########################################################################################
trans = transforms.Compose([RandomCentralCrop(),
Intercept(),
RandomRotation(),
RandomFlip(),
Rescale(96)])
def hist_mean_var_statistic(sample_path):
""" calculate histogram, mean, variance, max, min et al statistic information for each patient sample """
sample = sample_path.split('/')[-1]
# num_slices_per_class = np.zeros(5, dtype=np.uint16)
imgs_samp = []
for artery in sorted(listdir(sample_path)):
mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
img_path = osp.join(sample_path, artery, 'applicate', 'image')
# extract label files
files = sorted(
[file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
imgs_trans = np.zeros((len(files), 96, 96), dtype=np.float32)
for f_inx, file in enumerate(files):
slice = io.imread(osp.join(img_path, file))
label = io.imread(osp.join(mask_path, file))
imgs_trans[f_inx], _ = trans((slice, label))
imgs_samp.append(imgs_trans)
print("sample: {}".format(sample))
arteries = sorted(listdir(sample_path))
fig, axes = plt.subplots(1, len(arteries))
for a_inx in range(len(arteries)):
artery = arteries[a_inx]
imgs_artery = imgs_samp[a_inx]
print(artery)
print("# of slices: {}".format(len(imgs_artery)))
print("Max: {}, Min: {}, Mean: {}, Std: {}".format(imgs_artery.max(), imgs_artery.min(), imgs_artery.mean(), imgs_artery.std()))
per95 = np.percentile(imgs_artery.flatten(), 95)
per5 = np.percentile(imgs_artery.flatten(), 5)
print("Percentile 95: {}, Percentile 5: {}".format(per95, per5))
axes[a_inx].hist(imgs_artery.flatten(), bins=2000)
axes[a_inx].set_title("Histogram of {}".format(artery))
axes[a_inx].set_xlabel("HU value")
# axes[a_inx].set_ylabel("count")
x_pos = int(imgs_artery.min())
# print(x_pos)
hist, hist_edge = np.histogram(imgs_artery.flatten(), bins=2000)
y_pos = np.max(hist)
# print(y_pos)
axes[a_inx].text(x_pos, int(y_pos*1.0), "# of slices: {:4d}".format(len(imgs_artery)), color="red")
axes[a_inx].text(x_pos, int(y_pos*0.95), "Max: {:4.2f}".format(imgs_artery.max()), color="red")
axes[a_inx].text(x_pos, int(y_pos*0.9), "Min: {:4.2f}".format(imgs_artery.min()), color="red")
axes[a_inx].text(x_pos, int(y_pos*0.85), "Mean: {:4.2f}".format(imgs_artery.mean()), color="red")
axes[a_inx].text(x_pos, int(y_pos*0.8), "Std: {:4.2f}".format(imgs_artery.std()), color="red")
axes[a_inx].text(x_pos, int(y_pos*0.75), "Per95: {:4.2f}".format(per95), color="red")
axes[a_inx].text(x_pos, int(y_pos*0.7), "Per5: {:4.2f}".format(per5), color="red")
plt.savefig("./samples_hist/pixel_hist_per_artery/{}.png".format(sample))
imgs_samp = np.concatenate(imgs_samp, axis=0)
return (imgs_samp.mean(), imgs_samp.std())
def hist_mean_var_statistic_multi_preocess(num_workers=24):
""" calculate plaque statistic for given data """
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
args = []
for mode in ['train', 'val', 'test']:
with open(osp.join('./configs/config_35', mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
args.append(osp.join(data_dir, sample))
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
results = pool.map(hist_mean_var_statistic, args)
means, stds = [[result[i] for result in results] for i in range(len(results[0]))]
plt.figure()
plt.errorbar(range(1, len(means)+1), means, yerr=stds, fmt='-o')
for i in range(len(means)):
plt.text(i+1, means[i]+stds[i]+1, "{:d}".format(int(stds[i])), color="red",
horizontalalignment="center", verticalalignment="center")
plt.ylabel("standard variance")
plt.xlabel("index of sample")
plt.title("variance bar for each sample")
plt.savefig("./samples_hist/pixel_hist_per_sample/varbar.png")
########################################################################################
# plaque statistics
def plaque_statistic_multi_preocess(num_workers=24):
""" calculate plaque statistic for given data """
data_dir = "/data/ugui0/antonio-t/CPR_multiview"
for mode in ['train', 'val', 'test']:
with open("./num_slices_per_class.txt", "a") as writer:
writer.write("{}\n".format(mode))
with open(osp.join('./config', mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
args = [osp.join(data_dir, sample) for sample in samples]
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
num_slices_per_class = pool.map(plaque_statistic, args)
pool.close()
total_slices_per_class = reduce(lambda x, y: x+y, num_slices_per_class)
print("total slices for each class: {}".format(total_slices_per_class))
with open("./num_slices_per_class.txt", "a") as writer:
writer.write("total slices for each class: {}\n".format(total_slices_per_class))
def plaque_statistic(sample_path):
""" count how many slices for each class, especially for cal and non-cal """
sample = sample_path.split('/')[-1]
num_slices_per_class = np.zeros(5, dtype=np.uint16)
for artery in sorted(listdir(sample_path)):
mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
# extract label files
label_files = sorted(
[file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
for label_file in label_files:
label_path = osp.join(mask_path, label_file)
label = io.imread(label_path)
for i, j in enumerate([0, 29, 255, 151, 76]):
num_slices_per_class[i] += (np.sum(label == j) != 0)
print("{} -- # of slices for each class: {}".format(sample, num_slices_per_class))
with open("./num_slices_per_class.txt", "a") as writer:
writer.write("{} -- # of slices for each class: {}\n".format(sample, num_slices_per_class))
return num_slices_per_class
# plaque statistics
def hu_statistic_multi_preocess(num_workers=24):
""" calculate plaque statistic for given data """
# data_dir = "/home/mil/huang/Dataset/CPR_multiview"
data_dir = "/data/ugui0/antonio-t/CPR_multiview"
thres = 10
interval = 8
for mode in ['train']:
with open(osp.join('./configs/config', mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
args = [osp.join(data_dir, sample) for sample in samples]
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
hus_all_samples = pool.map(hu_statistic, args)
pool.close()
names = ['background', 'central_part', 'outline', 'cal', 'noncal']
if osp.exists("./samples_hist/hist_alldata/hist.txt"):
os.remove("./samples_hist/hist_alldata/hist.txt")
# joint list is returned
for name, val in zip(names, zip(*hus_all_samples)):
datas = reduce(lambda x, y: x + y, val) # add up all columns
if name == 'outline':
ratio = datas[1500:1551].sum()/ datas.sum()
print("ratio of pixels with HU range 0~50 in outline: {:.4f}".format(ratio))
tmp = datas
if name == 'noncal':
ratio = datas[1500:1551].sum() / datas.sum()
print("ratio of pixels with HU range 0~50 in noncal: {:.4f}".format(ratio))
print("{} pixels in {}".format(datas.sum(), name))
if name == 'outline' or name == 'background':
data_list = []
with open("./samples_hist/hist_alldata/hist.txt", 'a') as writer:
writer.write("{}\n".format(name))
for inx in range(len(datas)):
curr = datas[inx-interval:inx].mean() if inx>interval else datas[:inx+1].mean()
prev = datas[inx-1-interval:inx-1].mean() if inx>interval else datas[:inx+1].mean()
hu_diff = curr - prev
writer.write("{} : {}\n".format(inx - 1500, int(hu_diff)))
data_list.append(abs(int(hu_diff)))
# plot the absolute HU difference
plt.figure()
plt.plot(np.arange(-1000, -200), data_list[500:1300], 'g')
plt.xlabel("HU value")
plt.ylabel("count difference")
plt.title("Histogram of count difference for {}".format(name))
plt.savefig('./samples_hist/hist_alldata/{}_diff.jpg'.format(name))
min_hu = np.array(np.where(datas >= thres)).min() - 1500
max_hu = np.array(np.where(datas >= thres)).max() - 1500
mean_hu = (np.arange(-1500, 2500) * datas).sum() / datas.sum()
print("min : {} max : {} ave : {}".format(min_hu, max_hu, mean_hu))
plt.figure()
plt.plot(np.arange(min_hu, max_hu), datas[min_hu+1500:max_hu+1500], 'g')
plt.xlabel("HU value")
plt.ylabel("count")
plt.title("Histogram of {}".format(name))
plt.savefig('./samples_hist/hist_alldata/' + name + '.jpg')
plt.figure()
plt.xlabel("HU value")
plt.ylabel("count")
plt.title("Histogram comparison before/after adding noncal pixels")
plt.plot(np.arange(-1500, 2500), tmp, 'b', label="before adding noncal")
datas = datas + tmp
plt.plot(np.arange(-1500, 2500), datas, 'r', label="after adding noncal")
plt.legend()
plt.savefig("./samples_hist/hist_alldata/hist_comparison.jpg")
def hu_statistic(sample_path):
""" count how many slices for each class, especially for cal and non-cal """
cnt_list = [np.zeros(4000, dtype=np.int64) for _ in range(5)] # for background, central_part, outline, cal and noncal respectively
sample = sample_path.split('/')[-1]
print("Processing ", sample)
for artery in sorted(listdir(sample_path)):
mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
img_path = osp.join(sample_path, artery, 'applicate', 'image')
# extract label files
label_files = sorted(
[file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
image = np.stack([io.imread(osp.join(img_path, file)) for file in label_files])
mask = np.stack([io.imread(osp.join(mask_path, file)) for file in label_files])
# central crop with size 160. if not, codes will be very slow
image = image[:, 128:384, 128:384]
mask = mask[:, 128:384, 128:384]
# if np.sum(label == 76) != 0:
for i, grayscale in enumerate([0, 29, 255, 151, 76]):
pixels = image[mask == grayscale].flatten()
pixels_unq, cnts= np.unique(pixels, return_counts=True)
for pixel, cnt in zip(pixels_unq, cnts):
cnt_list[i][pixel+1500] += cnt
return cnt_list
# plaque statistics
def outline_noncal_overlap_statistic_multi_preocess(num_workers=24, step=100):
""" calculate overlapping between noncal and outline with HU range 0 ~ 50 """
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
for mode in ['train']:
with open(osp.join('./config', mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
args = [osp.join(data_dir, sample) for sample in samples]
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
results = pool.map(outline_noncal_overlap_statistic, args)
f1s_all_samples, noncal_all_maps, outline0050_all_maps, overlap_maps = \
[[result[i] for result in results] for i in range(len(results[0]))]
pool.close()
f1s_all_samples = reduce(lambda x, y: x + y, f1s_all_samples)
noncal_all_maps = np.concatenate(noncal_all_maps, axis=0)
outline0050_all_maps = np.concatenate(outline0050_all_maps, axis=0)
overlap_maps = np.concatenate(overlap_maps, axis=0)
print(noncal_all_maps.shape, outline0050_all_maps.shape, overlap_maps.shape)
noncal_map_ave = np.mean(noncal_all_maps, axis=0)
outline0050_map_ave = np.mean(outline0050_all_maps, axis=0)
save_dir = "./overlap_map"
if not osp.exists(save_dir):
os.makedirs(save_dir)
for i in range(0, len(overlap_maps), step):
end = min(i + step, len(overlap_maps))
data = overlap_maps[i:end]
metrics = f1s_all_samples[i:end]
fig_name = osp.join(save_dir,"{:03d}".format(i+1))
sample_stack_color(data, metrics, rows=step//10, cols=10, start_with=0, show_every=1,
scale=4, fig_name=fig_name)
# plot noncal heatmap
plt.figure()
plt.title("heatmap of noncalcified plaque")
plt.imshow(noncal_map_ave)
plt.colorbar()
plt.savefig(osp.join(save_dir, "noncal_heatmap.jpg"))
# plot outline 0050 heatmap
plt.figure()
plt.title("heatmap of outline with HU range 0 ~ 50")
plt.imshow(outline0050_map_ave)
plt.colorbar()
plt.savefig(osp.join(save_dir, "outline0050_heatmap.jpg"))
plt.figure()
plt.hist(f1s_all_samples, bins=100)
plt.xlabel("overlapping (measured in F1)")
plt.ylabel("histogram")
ave_f1 = sum(f1s_all_samples) / len(f1s_all_samples)
plt.title("Histogram of overlap between noncal and outline[0~50]: {:.4f}".format(ave_f1))
plt.savefig(osp.join(save_dir, "hist_overlap.jpg"))
print("average f1 score between noncal and outline with HU range 0 ~ 50 for all noncal slices: {}".format(ave_f1))
def outline_noncal_overlap_statistic(sample_path):
""" calculate overlapping between noncal and outline with HU range 0 ~ 50 """
f1s = []
noncal_flag = False
sample = sample_path.split('/')[-1]
print("Processing ", sample)
for artery in sorted(listdir(sample_path)):
mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
img_path = osp.join(sample_path, artery, 'applicate', 'image')
# extract label files
label_files = sorted(
[file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
for label_file in label_files:
label_path = osp.join(mask_path, label_file)
slice_path = osp.join(img_path, label_file)
label = io.imread(label_path)
slice = io.imread(slice_path)
if np.sum(label == 76) != 0:
overlap_map = np.zeros(label.shape, dtype=np.uint8)
# noncal map
mask_noncal = (label == 76)
noncal_pixels = slice[mask_noncal].flatten()
# print(noncal_pixels.max(), noncal_pixels.min())
mask_hu0050 = np.logical_and(slice <= 50, slice >= 0)
mask_outline = np.logical_or(label == 76, label == 255)
mask_outline = np.logical_or(mask_outline, label == 151)
mask_outline_hu0050 = np.logical_and(mask_outline, mask_hu0050)
# mask_outline_hu0050 = mask_outline
try:
f1s.append(f1_score(mask_noncal.flatten(), mask_outline_hu0050.flatten()))
except:
print(label_path)
overlap_map[mask_noncal] = 76
overlap_map[mask_outline_hu0050] = 150
overlap_map[np.logical_and(mask_noncal, mask_outline_hu0050)] = 226 # yellow for overlap
if not noncal_flag:
overlap_maps = overlap_map[np.newaxis, :, :]
noncal_maps = mask_noncal[np.newaxis, :, :]
outline0050_maps = mask_outline_hu0050[np.newaxis, :, :]
noncal_flag = True
else:
noncal_maps = np.concatenate((noncal_maps, mask_noncal[np.newaxis, :, :]), axis=0)
outline0050_maps = np.concatenate((outline0050_maps, mask_outline_hu0050[np.newaxis, :, :]), axis=0)
overlap_maps = np.concatenate([overlap_maps, overlap_map[np.newaxis, :, :]], axis=0)
if not noncal_flag:
noncal_maps = np.empty((0, *label.shape), dtype=np.uint8)
outline0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)
return f1s, noncal_maps, outline0050_maps, overlap_maps
def get_slice_info(slice_info_path):
""" extract range of 'good' slices and risk value
(Be careful with sample of slices which are all effective or all non-effective)
"""
with open(slice_info_path, "rb") as reader:
flag = 0
risk_list, sig_stenosis_list, pos_remodeling_list, napkin_ring_list = [], [], [], []
lines = reader.readlines()
start_inx, end_inx = len(lines), len(lines)
for l_inx, line in enumerate(lines):
slice_info = line.decode('utf8').strip().split(',')
is_effective = slice_info[-1]
risk, sig_stenosis, pos_remodeling, napkin_ring = slice_info[1:5]
if is_effective == "true":
risk_list.append(int(risk))
sig_stenosis_list.append(int(sig_stenosis=="true"))
pos_remodeling_list.append(int(pos_remodeling=="true"))
napkin_ring_list.append(int(napkin_ring=="true"))
if not flag and is_effective == "true":
start_inx = l_inx
flag = 1
if flag and is_effective == "false":
end_inx = l_inx
break
end_inx = min(end_inx, len(lines))
print("{}: start-{} end-{}".format(slice_info_path.split('/')[-4:], start_inx, end_inx))
return start_inx, end_inx, np.array(risk_list), np.array(sig_stenosis_list), \
np.array(pos_remodeling_list), np.array(napkin_ring_list)
# except UnboundLocalError:
# print("errors happened in {}".format(slice_info_path.split('/')[-4:]))
def overall_statistic_multi_preocess(method, num_workers=24, step=10):
""" calculate overlapping between noncal and outline with HU range 0 ~ 50 """
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
for mode in ['train']:
with open(osp.join('./config', mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
args = [osp.join(data_dir, sample) for sample in samples]
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
results = pool.map(method, args)
noncal_evals, f1s_samples, file_paths, noncal_maps, outline0050_maps, overlap_maps, labels, slices1, slices2, \
hu0050_maps, mix_overlap_maps = [[result[i] for result in results] for i in range(len(results[0]))]
pool.close()
f1s_samples = reduce(lambda x, y: x + y, f1s_samples)
file_paths = reduce(lambda x, y: x + y, file_paths)
noncal_evals = np.concatenate(noncal_evals, axis=0)
noncal_maps = np.concatenate(noncal_maps, axis=0)
outline0050_maps = np.concatenate(outline0050_maps, axis=0)
overlap_maps = np.concatenate(overlap_maps, axis=0)
labels = np.concatenate(labels, axis=0)
slices1 = np.concatenate(slices1, axis=0)
slices2 = np.concatenate(slices2, axis=0)
hu0050_maps = np.concatenate(hu0050_maps, axis=0)
mix_overlap_maps = np.concatenate(mix_overlap_maps, axis=0)
print(noncal_maps.shape, outline0050_maps.shape, overlap_maps.shape,
labels.shape, slices1.shape, hu0050_maps.shape)
save_dir = "./noncal_map_9"
if not osp.exists(save_dir):
os.makedirs(save_dir)
# save file paths
with open(osp.join(save_dir, 'noncal_paths.txt'), 'w') as writer:
for inx, file_path in enumerate(file_paths):
writer.write("{} : {}\n".format(inx + 1, file_path))
# calculate mean noncal map and mean outline_hu0050 map
noncal_map_ave = np.mean(noncal_maps, axis=0)
outline0050_map_ave = np.mean(outline0050_maps, axis=0)
# calculate average F1
ave_f1 = sum(f1s_samples) / len(f1s_samples)
print("average f1 score between noncal and outline with HU range 0 ~ 50 for all noncal slices: {}".format(ave_f1))
plt.figure()
plt.title("heatmap of noncalcified plaque")
plt.imshow(noncal_map_ave)
plt.colorbar()
plt.savefig(osp.join(save_dir, "noncal_heatmap.jpg"))
# plot outline 0050 heatmap
plt.figure()
plt.title("heatmap of outline with HU range 0 ~ 50")
plt.imshow(outline0050_map_ave)
plt.colorbar()
plt.savefig(osp.join(save_dir, "outline0050_heatmap.jpg"))
plt.figure()
plt.hist(f1s_samples, bins=100)
plt.xlabel("overlapping (measured in F1)")
plt.ylabel("histogram")
plt.title("Histogram of overlap between noncal and outline[0~50]: {:.4f}".format(ave_f1))
plt.savefig(osp.join(save_dir, "hist_overlap.jpg"))
datas = [{'slice1':slice1, 'slice2':slice2, 'label':label, 'hu0050':hu0050, 'overlap': overlap, 'f1': f1,
'mix_overlap':mix_overlap, 'noncal_eval':noncal_eval, 'file_path':file_path}
for (slice1, slice2, label, hu0050, overlap, f1, mix_overlap, noncal_eval, file_path)
in zip(slices1, slices2, labels, hu0050_maps, overlap_maps, f1s_samples, mix_overlap_maps, noncal_evals, file_paths)]
for i in range(0, len(datas), step):
end = min(i + step, len(datas))
data_batch = datas[i:end]
fig_name = osp.join(save_dir,"{:03d}".format(i+1))
sample_list3(data_batch, rows=step, cols=6, start_with=0,
show_every=1, scale=4, fig_name=fig_name, start_inx=i+1)
def noncal_statistic(sample_path):
""" calculate overlapping between noncal and outline with HU range 0 ~ 50 """
k= 5
f1s = []
file_paths = [] # save paths of slice with non-calcified plaque
noncal_flag = False
np.random.seed(42)
sample = sample_path.split('/')[-1]
print("Processing ", sample)
for artery in sorted(listdir(sample_path)):
mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
img_path = osp.join(sample_path, artery, 'applicate', 'image')
# extract label files
label_files = sorted(
[file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
rand_seeds = np.random.uniform(0.0, 1.0, len(label_files))
for inx, label_file in enumerate(label_files):
label_path = osp.join(mask_path, label_file)
slice_path = osp.join(img_path, label_file)
label = io.imread(label_path)
slice = io.imread(slice_path)
if np.sum(label == 76) != 0: # noncal-76, cal-151
if rand_seeds[inx] < 2.0:
# save file path
file_path = '/'.join([sample, artery, label_file])
file_paths.append(file_path)
# calculate noncal evaluations
n_above50 = np.sum(np.logical_and(label==76, slice>50))
n_below0 = np.sum(np.logical_and(label==76, slice<0))
if np.sum(label == 76) != 0:
noncal_pxiels_sort = sorted(slice[label == 76].flatten())
topk = noncal_pxiels_sort[-k:]
buttomk = noncal_pxiels_sort[:k]
else:
topk = [51 for _ in range(k)]
buttomk = [-1 for _ in range(k)]
noncal_eval = np.array([n_above50, n_below0, *topk, *buttomk]).astype(np.int16)
# hu0050 map
mask_hu0050 = np.logical_and(slice <= 50, slice >= 0)
hu0050_map = np.zeros(label.shape, dtype=np.uint8)
hu0050_map[mask_hu0050] = 150
slice1 = hu2lut(slice, window=255, level=27.5) # only extract HU range [-100, 155]
slice2 = hu2lut(slice, window=1000, level=700) # for calcification
# noncal map
mask_noncal = (label == 76)
mask_outline = np.logical_or(label == 76, label == 255)
mask_outline = np.logical_or(mask_outline, label == 151)
mask_outline_hu0050 = np.logical_and(mask_outline, mask_hu0050)
# calculate F1 score
f1s.append(f1_score(mask_noncal.flatten(), mask_outline_hu0050.flatten()))
# calculate overlap
overlap_map = np.zeros(label.shape, dtype=np.uint8)
overlap_map[mask_noncal] = 76
overlap_map[mask_outline_hu0050] = 150
overlap_map[np.logical_and(mask_noncal, mask_outline_hu0050)] = 226 # yellow for overlap
# combine overlap with GT label
mix_overlap = label.copy()
mix_overlap[mask_outline_hu0050] = 150
mix_overlap[np.logical_and(mask_noncal, mask_outline_hu0050)] = 226 # yellow for overlap
if not noncal_flag:
noncal_evals = noncal_eval[np.newaxis, :]
labels = label[np.newaxis, :, :]
slices1 = slice1[np.newaxis, :, :]
slices2 = slice2[np.newaxis, :, :]
hu0050_maps = hu0050_map[np.newaxis, :, :]
overlap_maps = overlap_map[np.newaxis, :, :]
noncal_maps = mask_noncal[np.newaxis, :, :]
outline0050_maps = mask_outline_hu0050[np.newaxis, :, :]
mix_overlap_maps = mix_overlap[np.newaxis, :, :]
noncal_flag = True
else:
noncal_evals = np.concatenate([noncal_evals, noncal_eval[np.newaxis, :]])
labels = np.concatenate([labels, label[np.newaxis, :, :]], axis=0)
slices1 = np.concatenate([slices1, slice1[np.newaxis, :, :]], axis=0)
slices2 = np.concatenate([slices2, slice2[np.newaxis, :, :]], axis=0)
hu0050_maps = np.concatenate([hu0050_maps, hu0050_map[np.newaxis, :, :]], axis=0)
noncal_maps = np.concatenate((noncal_maps, mask_noncal[np.newaxis, :, :]), axis=0)
outline0050_maps = np.concatenate((outline0050_maps, mask_outline_hu0050[np.newaxis, :, :]), axis=0)
overlap_maps = np.concatenate([overlap_maps, overlap_map[np.newaxis, :, :]], axis=0)
mix_overlap_maps = np.concatenate([mix_overlap_maps, mix_overlap[np.newaxis, :, :]], axis=0)
if not noncal_flag:
noncal_evals = np.empty((0, 2*k+2), dtype=np.int16)
labels = np.empty((0, *label.shape), dtype=np.uint8)
slices1 = np.empty((0, *label.shape), dtype=np.uint8)
slices2 = np.empty((0, *label.shape), dtype=np.uint8)
noncal_maps = np.empty((0, *label.shape), dtype=np.uint8)
outline0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)
hu0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
mix_overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)
print(f1s)
return noncal_evals, f1s, file_paths, noncal_maps, outline0050_maps, overlap_maps, labels, slices1, slices2, hu0050_maps, mix_overlap_maps
def sample_hist_statistic_multi_preocess(method, num_workers=24, step=80):
""" calculate overlapping between noncal and outline with HU range 0 ~ 50 """
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
for mode in ['train']:
with open(osp.join('./configs/config', mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
args = [osp.join(data_dir, sample) for sample in samples]
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
results = pool.map(method, args)
noncal_evals, f1s_samples, file_paths, noncal_maps, outline0050_maps, overlap_maps, labels, slices1, slices2, \
hu0050_maps, mix_overlap_maps = [[result[i] for result in results] for i in range(len(results[0]))]
pool.close()
f1s_samples = reduce(lambda x, y: x + y, f1s_samples)
file_paths = reduce(lambda x, y: x + y, file_paths)
noncal_evals = np.concatenate(noncal_evals, axis=0)
noncal_maps = np.concatenate(noncal_maps, axis=0)
outline0050_maps = np.concatenate(outline0050_maps, axis=0)
overlap_maps = np.concatenate(overlap_maps, axis=0)
labels = np.concatenate(labels, axis=0)
slices1 = np.concatenate(slices1, axis=0)
slices2 = np.concatenate(slices2, axis=0)
hu0050_maps = np.concatenate(hu0050_maps, axis=0)
mix_overlap_maps = np.concatenate(mix_overlap_maps, axis=0)
print(noncal_maps.shape, outline0050_maps.shape, overlap_maps.shape,
labels.shape, slices1.shape, hu0050_maps.shape)
save_dir = "./samples_hist/hu_lower-800_224"
if not osp.exists(save_dir):
os.makedirs(save_dir)
# save file paths
with open(osp.join(save_dir, 'noncal_paths.txt'), 'w') as writer:
for inx, file_path in enumerate(file_paths):
writer.write("{} : {}\n".format(inx + 1, file_path))
# calculate mean noncal map and mean outline_hu0050 map
noncal_map_ave = np.mean(noncal_maps, axis=0)
outline0050_map_ave = np.mean(outline0050_maps, axis=0)
# calculate average F1
ave_f1 = sum(f1s_samples) / len(f1s_samples)
print("average f1 score between noncal and outline with HU range 0 ~ 50 for all noncal slices: {}".format(ave_f1))
plt.figure()
plt.title("heatmap of noncalcified plaque")
plt.imshow(noncal_map_ave)
plt.colorbar()
plt.savefig(osp.join(save_dir, "noncal_heatmap.jpg"))
# plot outline 0050 heatmap
plt.figure()
plt.title("heatmap of outline with HU range 0 ~ 50")
plt.imshow(outline0050_map_ave)
plt.colorbar()
plt.savefig(osp.join(save_dir, "outline0050_heatmap.jpg"))
plt.figure()
plt.hist(f1s_samples, bins=100)
plt.xlabel("overlapping (measured in F1)")
plt.ylabel("histogram")
plt.title("Histogram of overlap between noncal and outline[0~50]: {:.4f}".format(ave_f1))
plt.savefig(osp.join(save_dir, "hist_overlap.jpg"))
datas = [{'slice1':slice1, 'slice2':slice2, 'label':label, 'hu0050':hu0050, 'overlap': overlap, 'f1': f1,
'mix_overlap':mix_overlap, 'noncal_eval':noncal_eval, 'file_path':file_path}
for (slice1, slice2, label, hu0050, overlap, f1, mix_overlap, noncal_eval, file_path)
in zip(slices1, slices2, labels, hu0050_maps, overlap_maps, f1s_samples, mix_overlap_maps, noncal_evals, file_paths)]
for i in range(0, len(datas), step):
end = min(i + step, len(datas))
data_batch = datas[i:end]
fig_name = osp.join(save_dir,"{:03d}".format(i+1))
sample_list3(data_batch, rows=step, cols=6, start_with=0,
show_every=1, scale=4, fig_name=fig_name, start_inx=i+1)
def sample_hist_statistic(sample_path):
""" calculate overlapping between noncal and outline with HU range 0 ~ 50 """
k= 5
f1s = []
file_paths = [] # save paths of slice with non-calcified plaque
noncal_flag = False
np.random.seed(42)
sample = sample_path.split('/')[-1]
print("Processing ", sample)
for artery in sorted(listdir(sample_path)):
mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
img_path = osp.join(sample_path, artery, 'applicate', 'image')
# extract label files
label_files = sorted(
[file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
rand_seeds = np.random.uniform(0.0, 1.0, len(label_files))
for inx, label_file in enumerate(label_files):
label_path = osp.join(mask_path, label_file)
slice_path = osp.join(img_path, label_file)
label = io.imread(label_path)[144:368, 144:368]
slice = io.imread(slice_path)[144:368, 144:368]
if rand_seeds[inx] < 0.05:
# save file path
file_path = '/'.join([sample, artery, label_file])
file_paths.append(file_path)
# calculate noncal evaluations
n_above50 = np.sum(np.logical_and(label==76, slice>50))
n_below0 = np.sum(np.logical_and(label==76, slice<0))
if np.sum(label == 76) != 0:
noncal_pxiels_sort = sorted(slice[label == 76].flatten())
topk = noncal_pxiels_sort[-k:]
buttomk = noncal_pxiels_sort[:k]
else:
topk = [51 for _ in range(k)]
buttomk = [-1 for _ in range(k)]
noncal_eval = np.array([n_above50, n_below0, *topk, *buttomk]).astype(np.int16)
# hu0050 map
# mask_hu0050 = np.logical_and(slice <= -800, slice >= -1000)
mask_hu0050 = (slice <= -800)
hu0050_map = np.zeros(label.shape, dtype=np.uint8)
hu0050_map[mask_hu0050] = 150
slice1 = slice # only extract HU range [-100, 155]
slice2 = hu2lut(slice, window=1000, level=700) # for calcification
# noncal map
mask_noncal = (label == 76)
mask_outline = np.logical_or(label == 76, label == 255)
mask_outline = np.logical_or(mask_outline, label == 151)
mask_outline_hu0050 = np.logical_and(mask_outline, mask_hu0050)
# calculate F1 score
f1s.append(f1_score(mask_noncal.flatten(), mask_outline_hu0050.flatten()))
# calculate overlap
overlap_map = np.zeros(label.shape, dtype=np.uint8)
overlap_map[mask_noncal] = 76
overlap_map[mask_outline_hu0050] = 150
overlap_map[np.logical_and(mask_noncal, mask_outline_hu0050)] = 226 # yellow for overlap
# combine overlap with GT label
mix_overlap = label.copy()
mix_overlap[mask_outline_hu0050] = 150
mix_overlap[np.logical_and(mask_noncal, mask_outline_hu0050)] = 226 # yellow for overlap
if not noncal_flag:
noncal_evals = noncal_eval[np.newaxis, :]
labels = label[np.newaxis, :, :]
slices1 = slice1[np.newaxis, :, :]
slices2 = slice2[np.newaxis, :, :]
hu0050_maps = hu0050_map[np.newaxis, :, :]
overlap_maps = overlap_map[np.newaxis, :, :]
noncal_maps = mask_noncal[np.newaxis, :, :]
outline0050_maps = mask_outline_hu0050[np.newaxis, :, :]
mix_overlap_maps = mix_overlap[np.newaxis, :, :]
noncal_flag = True
else:
noncal_evals = np.concatenate([noncal_evals, noncal_eval[np.newaxis, :]])
labels = np.concatenate([labels, label[np.newaxis, :, :]], axis=0)
slices1 = np.concatenate([slices1, slice1[np.newaxis, :, :]], axis=0)
slices2 = np.concatenate([slices2, slice2[np.newaxis, :, :]], axis=0)
hu0050_maps = np.concatenate([hu0050_maps, hu0050_map[np.newaxis, :, :]], axis=0)
noncal_maps = np.concatenate((noncal_maps, mask_noncal[np.newaxis, :, :]), axis=0)
outline0050_maps = np.concatenate((outline0050_maps, mask_outline_hu0050[np.newaxis, :, :]), axis=0)
overlap_maps = np.concatenate([overlap_maps, overlap_map[np.newaxis, :, :]], axis=0)
mix_overlap_maps = np.concatenate([mix_overlap_maps, mix_overlap[np.newaxis, :, :]], axis=0)
if not noncal_flag:
noncal_evals = np.empty((0, 2*k+2), dtype=np.int16)
labels = np.empty((0, *label.shape), dtype=np.uint8)
slices1 = np.empty((0, *label.shape), dtype=np.uint8)
slices2 = np.empty((0, *label.shape), dtype=np.uint8)
noncal_maps = np.empty((0, *label.shape), dtype=np.uint8)
outline0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)
hu0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
mix_overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)
print(f1s)
return noncal_evals, f1s, file_paths, noncal_maps, outline0050_maps, overlap_maps, labels, slices1, slices2, hu0050_maps, mix_overlap_maps
| Python |
3D | kkhuang1990/PlaqueDetection | utils.py | .py | 12,990 | 395 | # _*_ coding: utf-8 _*_
""" Often used functions for data loading and visualisation """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore', category=RuntimeWarning, module='scipy')
import numpy as np
from sklearn.preprocessing import label_binarize
from scipy import ndimage
import cv2
def count_parameters(model):
""" count number of parameters in a model """
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def rgb2gray(rgb):
""" convert rgb image to grayscale one """
if rgb.ndim == 2:
img_gray = rgb
elif rgb.ndim == 3:
img_gray = np.dot(rgb, [0.299, 0.587, 0.114])
return img_gray.astype(np.uint8)
def denormalize(image, v=182.7666473388672, m=-4.676876544952393):
""" de-normalize image into original HU range """
return (image * v + m).astype(np.int16)
def rgb2mask(rgb):
""" convert rgb image into mask
red - (255, 0, 0) : low-density plaque --> 4
black - (0, 0, 0) : background --> 0
orange - (255, 128, 0) : calcification --> 3
white - (255, 255, 255) : Border of the artery (small in healthy patients) --> 2
blue - (0, 0, 255) : inside of the artery --> 1
"""
h, w = rgb.shape[:2]
mask = np.zeros((h, w), dtype=np.uint8)
mask[np.all(rgb == [255, 0, 0], axis=2)] = 4
mask[np.all(rgb == [255, 128, 0], axis=2)] = 3
mask[np.all(rgb == [255, 255, 255], axis=2)] = 2
mask[np.all(rgb == [0, 0, 255], axis=2)] = 1
return mask
def gray2rgb(gray):
""" convert grayscale rgb for some discrete values """
h, w = gray.shape[:2]
rgb = np.zeros((h, w, 3), dtype=np.uint8)
rgb[gray == 29] = [0, 0, 255]
rgb[gray == 255] = [255, 255, 255]
rgb[gray == 151] = [255, 128, 0]
rgb[gray == 76] = [255, 0, 0]
rgb[gray == 226] = [255, 255, 0]
rgb[gray == 150] = [0, 255, 0]
return rgb
def mask2rgb(mask):
""" convert mask into RGB image """
h, w = mask.shape[:2]
rgb = np.zeros((h, w, 3), dtype=np.uint8)
rgb[mask == 1] = [0, 0, 255]
rgb[mask == 2] = [255, 255, 255]
rgb[mask == 3] = [255, 128, 0]
rgb[mask == 4] = [255, 0, 0]
return rgb
def mask2gray(mask):
""" convert mask to gray """
h, w = mask.shape[:2]
gray = np.zeros((h, w), dtype=np.uint8)
gray[mask == 1] = 29
gray[mask == 2] = 255
gray[mask == 3] = 151
gray[mask == 4] = 76
return gray
def gray2mask(gray):
""" convert gray-scale image to 2D mask
red - 76 : low-density plaque --> 4
black - 0 : background --> 0
orange - 151 : calcification --> 3
white - 255 : Border of the artery (small in healthy patients) --> 2
blue - 29 : inside of the artery --> 1
"""
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray == 76] = 4
mask[gray == 151] = 3
mask[gray == 255] = 2
mask[gray == 29] = 1
return mask
def central_crop(image, patch_size):
""" centre crop the given image
Args:
im: numpy ndarray, input image
new_size: tuple, new image size
"""
assert isinstance(patch_size, (int, tuple)), "size must be int or tuple"
if isinstance(patch_size, int):
size = (patch_size, patch_size)
else:
size = patch_size
h, w = image.shape[:2]
assert (h - size[0]) % 2 == 0 and (w - size[1]) % 2 == 0, \
"new image size must match with the input image size"
h_low, w_low = (h - size[0]) // 2, (w - size[1]) // 2
h_high, w_high = (h + size[0]) // 2, (w + size[1]) // 2
new_image = image[h_low:h_high, w_low:w_high]
return new_image
def dcm2hu(dcm):
""" convert dicom image into Hounsfield (HU) value """
image = dcm.pixel_array
intercept = dcm.RescaleIntercept
slope = dcm.RescaleSlope
image = slope * image + intercept
return np.array(image, dtype=np.int16)
def hu2gray(image, hu_max=1640.0, hu_min=-1024.0):
scale = float(255) / (hu_max - hu_min)
image = (image - hu_min) * scale
return image
def gray2m11range(image):
""" convert grayscale to -1~1 """
return 2.0 * image / 255.0 - 1.0
def hu2lut(data, window, level):
lut = np.piecewise(data, [data <= (level - 0.5 - (window - 1) / 2),
data > (level - 0.5 + (window - 1) / 2)],
[0, 255, lambda data: ((data - (level - 0.5)) / (window - 1) + 0.5) * (255 - 0)])
return lut.astype(np.float32)
def hu2norm(image, hu_max=1440.0, hu_min=-1024.0):
""" scale into (0.0 1.0) """
scale = 1.0 / (hu_max - hu_min)
image = (image - hu_min) * scale
return image
def shuffle_backward(l, order):
""" shuffle back to original """
l_out = np.zeros_like(l)
for i, j in enumerate(order):
l_out[j] = l[i]
return l_out
def gray2dboulebound(gray, width=2):
""" convert mask with grayscale value to inner bounds and outer bounds respectively """
h, w = gray.shape[:2]
gray[gray == 76] = 255
gray[gray == 151] = 255
label = gray2mask(gray)
label_binary = label_binarize(label.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(3): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
inner_bound, outer_bound = bound_binary[:, :, 1], bound_binary[:, :, 0]
return inner_bound, outer_bound
def gray2bound(gray, n_classes=3, width=2):
""" convert mask with grayscale value to inner bound, outer bound, cal and noncal bound """
h, w = gray.shape[:2]
if n_classes <= 3: # if n_classes less than 3, cal and noncal are not considered
gray[gray == 76] = 255
gray[gray == 151] = 255
label = gray2mask(gray)
label_binary = label_binarize(label.flatten(), classes=range(0, n_classes))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(n_classes): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
bound = np.any(bound_binary, axis=2).astype(np.uint8)
return bound
def gray2triplewithbound(gray, n_classes=4, width=1):
""" convert gray to triple seg with bounds """
h, w = gray.shape[:2]
gray[gray == 76] = 255
gray[gray == 151] = 255
label = gray2mask(gray)
label_binary = label_binarize(label.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(3): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
bound = np.any(bound_binary, axis=2)
mask = np.zeros((h, w), dtype=np.uint8)
mask[gray == 255] = 2
mask[gray == 29] = 1
if n_classes == 4:
mask[bound] = 3
elif n_classes == 5:
mask[bound_binary[:, :, 1] == 1] = 3 # inner boundary
mask[bound_binary[:, :, 0] == 1] = 4 # outer boundary
return mask
def gray2innerbound(gray, width):
""" convert grayscale annotation to inner bound """
h, w = gray.shape[:2]
gray[gray == 76] = 255
gray[gray == 151] = 255
label = gray2mask(gray)
label_binary = label_binarize(label.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
tmp = ndimage.distance_transform_cdt(label_binary[:, :, 1], 'taxicab')
inner_bound = np.logical_and(tmp >= 1, tmp <= width).astype(np.uint8)
return inner_bound
def gray2outerbound(gray, width):
""" convert grayscale annotation to outer bound """
h, w = gray.shape[:2]
gray[gray == 76] = 255
gray[gray == 151] = 255
label = gray2mask(gray)
label_binary = label_binarize(label.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
tmp = ndimage.distance_transform_cdt(label_binary[:, :, 0], 'taxicab')
outer_bound = np.logical_and(tmp >= 1, tmp <= width).astype(np.uint8)
return outer_bound
def gray2innerouterbound(gray, width):
""" convert mask annotation into inner and outer boundaries
where inner and outer boundaries are treated as different classes
"""
h, w = gray.shape[:2]
gray_cp = gray.copy()
gray_cp[gray == 76] = 255
gray_cp[gray == 151] = 255
bound = np.zeros_like(gray, dtype=np.uint8)
label = gray2mask(gray_cp)
label_binary = label_binarize(label.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(3): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
bound[bound_binary[:, :, 0] != 0] = 2 # outer bound marked as 2
bound[bound_binary[:, :, 1] != 0] = 1 # inner bound marked as 1
return bound
def mask2innerouterbound(mask, width):
""" convert mask annotation into inner and outer boundaries
where inner and outer boundaries are treated as different classes
"""
h, w = mask.shape[:2]
mask_np = mask.copy()
mask_np[mask == 3] = 2
mask_np[mask == 4] = 2
bound = np.zeros_like(mask_np, dtype=np.uint8)
label_binary = label_binarize(mask_np.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(3): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
bound[bound_binary[:, :, 0] != 0] = 2 # outer bound marked as 2
bound[bound_binary[:, :, 1] != 0] = 1 # inner bound marked as 1
return bound
def innerouterbound2mask(innerouter, n_classes=3):
""" transform innerouter bound to mask segmentation, cv2.drawContours is used for transformation
:param innerouter: ndarray of size [H, W], 1 - inner, 2 - outer
:param n_classes: int, number of classes
:return: mask: ndarray of size [H, W]
"""
# only apply to situation with n_classes = 3
ls = np.zeros(innerouter.shape[:2], np.uint8)
for c_inx in range(n_classes-1, 0, -1):
points = np.array(np.where(innerouter == c_inx)).transpose() # [N, 2]
points = np.expand_dims(np.flip(points, axis=1), axis=1) # [N, 2] --> [N, 1, 2]
ls = cv2.drawContours(ls, [points], -1, c_inx, thickness=cv2.FILLED)
return ls
def mask2outerbound(mask, width):
""" convert mask annotation into inner and outer boundaries
where inner and outer boundaries are treated as different classes
"""
h, w = mask.shape[:2]
mask[mask == 3] = 2
mask[mask == 4] = 2
bound = np.zeros_like(mask, dtype=np.uint8)
label_binary = label_binarize(mask.flatten(), classes=range(0, 3))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(3): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
bound[bound_binary[:, :, 0] != 0] = 1 # outer bound marked as 2
return bound
def mask2bounds(mask, width=2, n_classes=3):
""" convert mask (with value range from 0 to n_classes-1) to bounds
this operation is similar to gray2bounds
"""
h, w = mask.shape[:2]
label_binary = label_binarize(mask.flatten(), classes=range(0, n_classes))
label_binary = np.reshape(label_binary, (h, w, -1))
bound_binary = np.zeros_like(label_binary)
for i in range(n_classes): # number of classes before edge detection
tmp = ndimage.distance_transform_cdt(label_binary[:, :, i], 'taxicab')
cdt = np.logical_and(tmp >= 1, tmp <= width)
bound_binary[:, :, i] = cdt
bounds = np.any(bound_binary, axis=2).astype(np.uint8)
return bounds
def ls2bound(ls, width=1):
""" convert morphological snake result into boundary """
tmp = ndimage.distance_transform_cdt(ls, 'taxicab')
bound = np.logical_and(tmp >= 1, tmp <= width)
return bound
def lslist2bound(ls_list):
""" convert ls list to boundary """
h, w = ls_list[0].shape
bound = np.zeros((h, w), dtype=np.uint8)
for inx, ls in enumerate(ls_list):
bound[ls2bound(ls, width=1)] = inx + 1
return bound
| Python |
3D | kkhuang1990/PlaqueDetection | playground.py | .py | 93 | 5 | # _*_ coding: utf-8 _*_
""" playground for debug
you can check functions freely here
""" | Python |
3D | kkhuang1990/PlaqueDetection | BoundDetection/train.py | .py | 27,444 | 555 | # _*_ coding: utf-8 _*_
""" define train and test functions here """
import matplotlib as mpl
mpl.use('Agg')
import imageio
import warnings
warnings.filterwarnings('ignore', module='imageio')
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
from sklearn.metrics import auc
import copy
from collections import Counter
from vision import sample_list_hdf
import numpy as np
np.set_printoptions(precision=4)
from tqdm import tqdm
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
import os.path as osp
import os
import pickle
from metric import cal_f_score, cal_f_score_slicewise, volumewise_hd95, volumewise_asd, volumewise_ravd
from loss import WeightedKLDivLoss
from vision import plot_metrics, plaque_detection_rate, plot_class_f1
from vision import plot_slice_wise_measures
from image.models.deeplab_resnet import adjust_learning_rate, cal_loss
from snake import probmap2bound
from loss import WeightedHausdorffDistanceDoubleBoundLoss
from utils import innerouterbound2mask
from image.models.deeplab_resnet import adjust_learning_rate, cal_loss
def train_model(model, criterion, optimizer, scheduler, args):
""" train the model
Args:
model: model inheriting from nn.Module class
criterion: criterion class, loss function used
optimizer: optimizer, optimization strategy
scheduler: lr scheduler
args: parser arguments
"""
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1.0e9
loss_keep = 0 # check how many times the val loss has decreased continuously
epoch_loss_prev = 1.0e9 # loss at the previous epoch
# metrics for each epoch
epoch_acc = {'train': [], 'val': [], 'test': []}
epoch_loss = {'train': [], 'val': [], 'test': []}
epoch_loss_boundwise = {'train': [], 'val': [], 'test': []} # for prediction and regularization respectively
epoch_hdist = {'train': [], 'val': [], 'test': []}
epoch_reghdf = {'train': [], 'val': [], 'test': []}
epoch_asd = {'train': [], 'val': [], 'test': []}
epoch_vd = {'train': [], 'val': [], 'test': []}
epoch_f1_score = {'train': [], 'val': [], 'test': []}
epoch_f1_score_class = {'train': [], 'val': [], 'test': []}
# for hard mining
metric_prev_epoch = None
phases_prev_epoch = None
# start training
for epoch in range(args.num_train_epochs):
print("{}/{}".format(epoch+1, args.num_train_epochs))
if epoch != 0 and epoch % args.n_epoch_hardmining == 0:
is_hard_mining = True
else:
is_hard_mining = False
if args.model_type == '2d':
from image.dataloader import read_train_data
if args.only_plaque: # only use samples containing plaques for training
dataloaders = read_train_data(args.data_dir, args.compose, 'train', None, None, True,
is_hard_mining, args.num_workers, args.batch_size, args.percentile, args.multi_view,
args.only_plaque, args.config, args.bc_learning)
else:
dataloaders = read_train_data(args.data_dir, args.compose, 'train', metric_prev_epoch, phases_prev_epoch, True,
is_hard_mining, args.num_workers, args.batch_size, args.percentile, args.multi_view,
args.only_plaque, args.config, args.bc_learning)
else: # parameters of dataloader for 2.5D and 3D is the same
if args.model_type == '3d':
from volume.dataloader import read_train_data
elif args.model_type == '2.5d':
from hybrid.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, metric_prev_epoch, phases_prev_epoch, args.compose, 'train',
is_hard_mining, args.percentile, args.multi_view, args.interval, args.down_sample,
args.batch_size, args.num_workers, True, args.config)
# during hard mining, if # of training samples is lower than threshold, stop training
if len(dataloaders['train'].dataset.phases) <= 20:
break
dataset_sizes = {'train': 0, 'val': 0, 'test': 0}
for phase in ['train', 'val', 'test']:
if phase == 'train':
scheduler.step()
if args.model == 'deeplab_resnet':
adjust_learning_rate(optimizer, scheduler)
model.train() # Set model to training mode
slicewise_metric_epoch = [] # for hard mining
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_hdist, running_reghdf, running_asd, running_vd = 0.0, 0.0, 0.0, 0.0
running_corrects, running_f1_score = 0.0, 0.0
running_fscores = np.zeros(args.output_channel, dtype=np.float32) # class-wise F1 score
# record # of effective samples for each class for segmentation
running_effect_samples = np.zeros(args.output_channel, dtype=np.uint32)
if args.criterion.startswith('whddb') or args.criterion == 'mwhddb': # record inner and outer bound respectively
running_boundwise_loss = np.zeros(args.output_channel-1)
dl_pbar = tqdm(dataloaders[phase])
for sample_inx, sample in enumerate(dl_pbar):
dl_pbar.update(100)
inputs, labels = sample
patch_size = len(inputs)
dataset_sizes[phase] += patch_size
# wrap them in Variable
if args.use_gpu:
inputs = Variable(inputs.cuda()).float()
labels = Variable(labels.cuda()).long()
else:
inputs = Variable(inputs).float()
labels = Variable(labels).long()
optimizer.zero_grad()
outputs = model(inputs)
# snake constraint
regs = probmap2bound(F.softmax(outputs, 1), n_workers=32, thres=0.7, kernel_size=9)
if args.model == 'deeplab_resnet':
loss = cal_loss(outputs, labels, args.criterion, criterion)
outputs = outputs[-1] # max fusion output is saved
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs)
elif args.model_type == "2.5d" and args.model == "res_unet_reg": # Hybrid res-unet with regularization
prob_map, reg = outputs
n_gt_pts = torch.sum(labels.view(labels.size(0), -1) != 0, 1).float()
criterion_reg = nn.SmoothL1Loss()
loss_reg = criterion_reg(reg, n_gt_pts)
assert (args.criterion.startswith('whddb') or args.criterion == 'mwhddb'), \
"Hybrid Res-UNet should match with WHD loss"
loss_whd, loss_boundwise = criterion(F.softmax(prob_map, dim=1), labels)
loss = loss_whd + 1.0 * loss_reg
outputs = prob_map
else: # with single output
if args.criterion == 'nll' and not args.mpl:
loss = criterion(F.log_softmax(outputs, dim=1), labels)
elif args.criterion == 'whd':
loss = criterion(F.softmax(outputs, dim=1)[:, 1], labels)
elif args.criterion == 'mwhd':
loss = criterion(F.softmax(outputs, dim=1)[:, 1], labels)
# whddb series loss
elif args.criterion == 'whddb' or args.criterion == 'mwhddb' or args.criterion == "whddbmax":
loss, loss_boundwise = criterion(F.softmax(outputs, dim=1), labels)
# snake constrained whddb loss
elif args.criterion == "whddbsnake":
if epoch <= 10:
criterion_base = WeightedHausdorffDistanceDoubleBoundLoss(return_boundwise_loss=True,
alpha=args.whd_alpha, beta=args.whd_beta, ratio=args.whd_ratio)
loss, loss_boundwise = criterion_base(F.softmax(outputs, dim=1), labels)
else:
# reg has already been calculated before
loss, loss_boundwise = criterion(F.softmax(outputs, dim=1), labels, regs)
elif args.criterion == 'whddb_cereg':
loss_whd, loss_boundwise = criterion(F.softmax(outputs, dim=1), labels)
loss_ce = nn.CrossEntropyLoss(ignore_index=0)(outputs, labels)
loss = 0.2 * loss_whd + 0.8 * loss_ce
else: # dice, ce, gdl1, gdl2, ceb
loss = criterion(outputs, labels)
_, preds = torch.max(outputs.data, 1)
if phase == 'train':
loss.backward()
optimizer.step()
if args.criterion.startswith('whddb') or args.criterion == 'mwhddb':
running_boundwise_loss += loss_boundwise.data.cpu().numpy() * patch_size
# various metrics
running_loss += loss.data.item() * patch_size
running_corrects += float(torch.sum(preds == labels.data)) / preds[0].numel()
# calculate hd95 and asd
preds_bound_np, labels_bound_np = preds.cpu().numpy(), labels.data.cpu().numpy()
regs_np = regs.data.cpu().numpy()
mean_hdf, batch_hdf = volumewise_hd95(preds_bound_np, labels_bound_np, return_slicewise_hdf=True)
mean_reghdf = volumewise_hd95(regs_np, labels_bound_np, return_slicewise_hdf=False)
mean_asd = volumewise_asd(regs_np, labels_bound_np, n_classes=3)
running_hdist += mean_hdf * patch_size
running_reghdf += mean_reghdf * patch_size
running_asd += mean_asd * patch_size
# calculate F1, VD
preds_np = np.stack([innerouterbound2mask(r, args.output_channel) for r in regs_np])
labels_np = np.stack([innerouterbound2mask(label, args.output_channel) for label in labels_bound_np])
cal_f1 = cal_f_score_slicewise if args.model_type == '3d' else cal_f_score
_, f_scores, n_effect_samples = cal_f1(preds_np, labels_np, n_class=args.output_channel,
return_slice_f1=False, return_class_f1=True)
running_fscores += f_scores
running_effect_samples += n_effect_samples
mean_vd = volumewise_ravd(preds_np, labels_np)
running_vd += mean_vd * patch_size
if phase == 'train':
slicewise_metric_epoch += batch_hdf
dl_pbar.close()
print()
if args.criterion.startswith('whddb') or args.criterion == 'mwhddb':
epoch_loss_boundwise[phase].append(running_boundwise_loss / dataset_sizes[phase])
epoch_loss[phase].append(running_loss / dataset_sizes[phase])
epoch_acc[phase].append(float(running_corrects) / dataset_sizes[phase])
epoch_hdist[phase].append(running_hdist / dataset_sizes[phase])
epoch_asd[phase].append(running_asd / dataset_sizes[phase])
epoch_vd[phase].append(running_vd / dataset_sizes[phase])
epoch_reghdf[phase].append(running_reghdf / dataset_sizes[phase])
running_f1_class = running_fscores / running_effect_samples
epoch_f1_score_class[phase].append(running_f1_class) # f1 score for each class
epoch_f1_score[phase].append(running_f1_class.mean())
if args.criterion.startswith('whddb') or args.criterion == 'mwhddb':
print("[{:5s}({} samples)] Loss: {:.4f} Loss_boundwise: {} Acc: {:.4f} Ave_F1: {:.4f} class-wise F1: {} "
"Ave_hdf: {:.4f} Ave_reghdf: {:.4f} Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(phase, len(dataloaders[phase].dataset.phases),
epoch_loss[phase][-1], epoch_loss_boundwise[phase][-1], epoch_acc[phase][-1], epoch_f1_score[phase][-1],
running_f1_class, epoch_hdist[phase][-1], epoch_reghdf[phase][-1], epoch_asd[phase][-1], epoch_vd[phase][-1]))
else:
print("[{:5s}({} samples)] Loss: {:.4f} Acc: {:.4f} Ave_F1: {:.4f} class-wise F1: {} Ave_hdf: {:.4f} "
"Ave_reghdf: {:.4f} Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(phase, len(dataloaders[phase].dataset.phases),
epoch_loss[phase][-1], epoch_acc[phase][-1], epoch_f1_score[phase][-1], running_f1_class,
epoch_hdist[phase][-1], epoch_reghdf[phase][-1], epoch_asd[phase][-1], epoch_vd[phase][-1]))
# update metric_prev_epoch and phases_prev_epoch
if phase == 'train':
metric_prev_epoch = np.array(slicewise_metric_epoch)
phases_prev_epoch = dataloaders['train'].dataset.phases
# save the learnt best model evaluated on validation data
if phase == 'val':
val_loss_bf = sum(epoch_loss['val'][-5:]) / len(epoch_loss['val'][-5:])
if val_loss_bf <= best_loss:
best_loss = val_loss_bf
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model, args.model_save_name)
if val_loss_bf > epoch_loss_prev:
loss_keep += 1
else:
loss_keep = 0
epoch_loss_prev = val_loss_bf
# plot temporal loss, acc, f1_score for train, val and test respectively.
if (epoch+1) % 5 == 0 and phase == 'test':
metrics = [epoch_loss, epoch_acc, epoch_f1_score, epoch_asd, epoch_vd, epoch_hdist, epoch_reghdf]
labels = ['total_loss', 'pixel_acc', 'F1_score', 'asd', 'vd', 'hd95_pred', 'hd95_reg']
plot_metrics(metrics, labels, fig_dir=args.fig_dir)
plot_class_f1(epoch_f1_score_class, args.fig_dir)
## for plot innerbound and outerbound loss respectively
# if args.criterion.startswith('whddb') or args.criterion == 'mwhddb':
# metrics= [{k:v[i] for k, v in epoch_loss_boundwise.items()} for i in range(args.output_channel-1)]
# labels = ['innerbound_loss', 'outerbound_loss']
# plot_metrics(metrics, labels, fig_dir=args.fig_dir)
if loss_keep == 10:
break
metrics = [epoch_loss, epoch_acc, epoch_f1_score, epoch_asd, epoch_vd, epoch_hdist, epoch_reghdf]
labels = ['total_loss', 'pixel_acc', 'F1_score', 'asd', 'vd', 'hd95_pred', 'hd95_reg']
plot_metrics(metrics, labels, fig_dir=args.fig_dir)
plot_class_f1(epoch_f1_score_class, args.fig_dir)
# if args.criterion.startswith('whddb') or args.criterion == 'mwhddb':
# metrics = [{k: v[i] for k, v in epoch_loss_boundwise.items()} for i in range(args.output_channel - 1)]
# labels = ['innerbound_loss', 'outerbound_loss']
# plot_metrics(metrics, labels, fig_dir=args.fig_dir)
print('Best val loss: {:4f}'.format(best_loss))
model.load_state_dict(best_model_wts)
torch.save(model, args.model_save_name)
def model_reference(args, sample_stack_rows=50):
""" model reference and plot the segmentation results
for model reference, several epochs are used to balance the risks and other metrics
for segmentation results plotting, only one epoch is used without data augmentation
Args:
args: parser arguments
sample_stack_rows: int, how many slices to plot per image
"""
#############################################################################################
# Part 1: model reference and metric evaluations
#############################################################################################
model = torch.load(args.model_save_name, map_location=lambda storage, loc: storage)
if args.use_gpu:
model = model.cuda()
dataset_sizes = 0
running_hdist, running_reghdf, running_asd, running_vd = 0.0, 0.0, 0.0, 0.0
running_corrects, running_f1, running_dice_score = 0.0, 0.0, 0.0
running_fscores = np.zeros(args.output_channel, dtype=np.float32)
running_effect_samples = np.zeros(args.output_channel, dtype=np.uint32)
if args.model_type == '2d':
from image.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, args.compose, 'test', None, None, True,
False, args.num_workers, args.batch_size, args.percentile,
args.multi_view, args.only_plaque, args.config)
else: # parameters of dataloader for 2.5D and 3D is the same
if args.model_type == '3d':
from volume.dataloader import read_train_data
elif args.model_type == '2.5d':
from hybrid.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, None, None, args.compose, 'test',
False, args.percentile, args.multi_view, args.interval, args.down_sample,
args.batch_size, args.num_workers, True, args.config)
for samp_inx, sample in enumerate(dataloaders['test']):
inputs, labels = sample
patch_size = len(inputs)
dataset_sizes += patch_size
# wrap them in Variable
if args.use_gpu:
inputs = Variable(inputs.cuda()).float()
labels = Variable(labels.cuda()).long()
else:
inputs = Variable(inputs).float()
labels = Variable(labels).long()
outputs = model(inputs) # outputs can be tensor, tuple or list based on model we choose
regs = probmap2bound(F.softmax(outputs, 1), n_workers=32, thres=0.7, kernel_size=9)
if args.model == 'deeplab_resnet':
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs[-1])
elif args.model_type == "2.5d" and args.model == "res_unet_reg": # multiple outputs
outputs = outputs[0]
_, preds = torch.max(outputs.data, 1)
running_corrects += float(torch.sum(preds == labels.data)) / preds[0].numel()
# calculate hd95 and asd
preds_bound_np, labels_bound_np = preds.cpu().numpy(), labels.data.cpu().numpy()
regs_np = regs.data.cpu().numpy()
mean_reghdf = volumewise_hd95(regs, labels, return_slicewise_hdf=False)
mean_hdf, batch_hdf = volumewise_hd95(preds_bound_np, labels_bound_np, return_slicewise_hdf=True)
mean_asd = volumewise_asd(preds_bound_np, labels_bound_np, n_classes=3)
running_hdist += mean_hdf * patch_size
running_asd += mean_asd * patch_size
running_reghdf += mean_reghdf * patch_size
# calculate F1, VD
preds_np = np.stack([innerouterbound2mask(r, args.output_channel) for r in regs_np])
labels_np = np.stack([innerouterbound2mask(label, args.output_channel) for label in labels_bound_np])
cal_f1 = cal_f_score_slicewise if args.model_type == '3d' else cal_f_score
_, f_scores, n_effect_samples = cal_f1(preds_np, labels_np, n_class=args.output_channel,
return_slice_f1=False, return_class_f1=True)
running_fscores += f_scores
running_effect_samples += n_effect_samples
mean_vd = volumewise_ravd(preds_np, labels_np)
running_vd += mean_vd * patch_size
epoch_acc = float(running_corrects) / dataset_sizes
epoch_class_f1 = running_fscores / running_effect_samples
epoch_f1 = epoch_class_f1.mean()
epoch_hdist = running_hdist / dataset_sizes
epoch_asd = running_asd / dataset_sizes
epoch_vd = running_vd / dataset_sizes
epoch_reghdf = running_reghdf / dataset_sizes
# print various metrics
print("Acc: {:.4f} Ave_F1: {:.4f} Ave_hdf: {:.4f}, Ave_reghdf: {:.4f}. Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(
epoch_acc, epoch_f1, epoch_hdist, epoch_reghdf, epoch_asd, epoch_vd))
for c_inx, each_f1 in enumerate(epoch_class_f1):
print("Class-{}: F1-{:.4f}".format(c_inx, each_f1))
##########################################################################################
# plot the prediction results
##########################################################################################
if args.do_plot:
plot_data = args.plot_data
args.compose[plot_data] = args.compose['test']
if args.model_type == '2d':
from image.dataloader import read_plot_data
dataloaders = read_plot_data(args.data_dir, args.compose, plot_data, False, args.num_workers,
args.batch_size, args.multi_view, args.config)
else: # parameters of dataloader for 2.5D and 3D is the same
if args.model_type == '3d':
from volume.dataloader import read_plot_data
elif args.model_type == '2.5d':
from hybrid.dataloader import read_plot_data
dataloaders = read_plot_data(args.data_dir, args.compose, plot_data, args.multi_view, args.interval,
args.down_sample, args.num_workers, False, args.config)
for samp_inx, sample in enumerate(dataloaders[plot_data]):
inputs_batch, labels, sample_name, start = sample
sample_name, start = sample_name[0], start.item()
inputs_batch = torch.squeeze(inputs_batch, dim=0) # [N, 1, T, H, W]
labels = torch.squeeze(labels, dim=0) # [N, T, H, W]
patch_size = len(inputs_batch)
# process each mini-batch
for mb_inx in range(0, patch_size, args.batch_size):
end = min(mb_inx + args.batch_size, patch_size)
inputs =inputs_batch[mb_inx:end]
# wrap them in Variable
if args.use_gpu:
inputs = Variable(inputs.cuda()).float()
else:
inputs = Variable(inputs).float()
outputs = model(inputs) # both outputs and preds are tensors
if args.model == 'deeplab_resnet':
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs[-1])
elif args.model_type == "2.5d" and args.model == "res_unet_reg":
outputs = outputs[0]
outputs_mb_np = F.softmax(outputs, dim=1).data.cpu().numpy() # don't forget the softmax here
# outputs_mb_np = outputs_mb_np[:, 1] # only choose channel 1
_, preds = torch.max(outputs.data, 1)
preds_mb_np = preds.cpu().numpy()
if mb_inx == 0:
preds_np = np.zeros((patch_size, *(preds_mb_np[0].shape)), dtype=preds_mb_np.dtype)
# outputs_np shape: [N * C * T * H * W] or [N * C * H * W]
outputs_np = np.zeros((patch_size, *(outputs_mb_np[0].shape)), dtype=outputs_mb_np.dtype)
preds_np[mb_inx:end], outputs_np[mb_inx:end] = preds_mb_np, outputs_mb_np
# convert into numpy
labels_np = labels.cpu().numpy()
if inputs_batch.size(1) == 1: # only one channel
inputs_np = torch.squeeze(inputs_batch, dim=1).cpu().numpy() # [N, T, H, W]
else: # if 3 channels, only select the first channel
inputs_np = inputs_batch[:, 0].cpu().numpy()
if args.model_type == '2.5d':
n_slices = inputs_np.shape[1]
inputs_np = inputs_np[:, n_slices//2]
# for 2D images, we can directly use it for plot, for 3D volume, transform is necessary
if args.model_type == '3d':
inputs_np, labels_np, preds_np, outputs_np = rearrange_volume(
inputs_np, labels_np, preds_np, outputs_np, args)
if args.model_type == '2.5d': # shift start index if 2.5D model
start += (args.interval // 2) * args.down_sample
# save predictions into pickle and plot the results
plot_save_result(labels_np, inputs_np, preds_np, outputs_np, start, sample_name, args.fig_dir,
sample_stack_rows, args.output_channel)
def rearrange_volume(inputs, labels, preds, outputs, args):
""" rearrange volumes into the correct order
:param inputs: list of ndarrays (N, D, H, W)
:param labels: ndarray (N, D, H, W)
:param preds: ndarray (N, D, H, W)
:param outputs: ndarray (N, C, D, H, W)
:return:
"""
inputs = np.reshape(inputs, (-1, *(inputs.shape[2:])))
labels = np.reshape(labels, (-1, *(labels.shape[2:])))
preds = np.reshape(preds, (-1, *(preds.shape[2:])))
outputs = outputs.transpose(0, 2, 1, 3, 4) # [N, C, T, H, W] -- > [N, T, C, H, W]
outputs = np.reshape(outputs, (-1, *(outputs.shape[2:])))
num_slices = len(inputs)
indexes = []
args.stride = args.down_sample * args.interval
for s_inx in range(0, num_slices, args.stride):
for i in range(args.interval):
for j in range(args.down_sample):
inx = s_inx + i + j * args.interval
if inx < num_slices:
indexes.append(inx)
inputs, labels, preds, outputs = inputs[indexes], labels[indexes], preds[indexes], outputs[indexes]
return (inputs, labels, preds, outputs)
def plot_save_result(labels, inputs, preds, outputs, start, samp_art_name, root_fig_dir, sample_stack_rows, n_class):
""" plot segmentation results and save the risks """
fig_dir = root_fig_dir + '/' + samp_art_name
if not osp.exists(fig_dir):
os.makedirs(fig_dir)
data = {'input': inputs, 'label': labels, 'pred': preds, 'output': outputs,
'sample_name': samp_art_name, 'start': start, 'n_class': n_class}
# resave slices into gif animation
data_types = {"input", "label", "pred"}
for data_type in data_types:
arrays = list(data[data_type])
imageio.mimsave('{}/{}.gif'.format(fig_dir, data_type), arrays)
with open(osp.join(fig_dir, 'data.pkl'), 'wb') as writer:
pickle.dump(data, writer, protocol=pickle.HIGHEST_PROTOCOL)
# plot the inputs, ground truth, outputs and F1 scores with sample_stack2
for inx in range(0, len(inputs), sample_stack_rows):
over = min(inx + sample_stack_rows, len(inputs))
label_plot, input_plot, pred_plot, output_plot = labels[inx:over], inputs[inx:over], \
preds[inx:over], outputs[inx:over]
data_list = [{"input": input, "GT": label, "pred": pred, "output": output}
for (input, label, pred, output) in zip(input_plot, label_plot, pred_plot, output_plot)]
file_name = "{}/{:03d}".format(fig_dir, inx + start)
sample_list_hdf(data_list, rows=over - inx, start_with=0, show_every=1, fig_name=file_name,
start_inx=inx + start, n_class=n_class) | Python |
3D | kkhuang1990/PlaqueDetection | BoundDetection/main.sh | .sh | 4,401 | 110 | #!/bin/bash
# input/output
OUTPUT_CHANNEL=3
BOUND_OUTPUT='True'
# For output_channel=2, [inner, outer, innerouter] is available, for output_channel=3, only innerouter is available
BOUND_TYPE='innerouter'
WIDTH=1 # boundary width
#DATA_DIR="/home/mil/huang/Dataset/CPR_multiview"
#DATA_DIR="/data/ugui0/antonio-t/CPR_multiview"
DATA_DIR="/data/ugui0/antonio-t/CPR_multiview_interp2_huang" # after interpolation with 2 pixels
# Experiment
EXPERIMENT="Experiment12"
SUB_FOLDER="HybridResUNet_int23_0.167_whddb"
# optimizer
LR_SCHEDULER='StepLR'
MOMENTUM=0.90
GAMMA=0.9
CRITERION='whddb' # weighted Hausdorff Distance of double boundaries
## only for Cross Entropy Bound loss which treat inner bounds and outer bounds differently
W1=10.0 # outer bound amplitude
W2=10.0 # inner bound amplitude
SIGMA1=5.0 # outer bound variance
SIGMA2=5.0 # inner bound variance
WHD_ALPHA=4 # only for WHD loss
WHD_BETA=1 # only for WHD loss
WHD_RATIO=0.167 # only for WHD loss
IGNORE_INDEX='None'
CAL_ZEROGT='False' # whether calculate GT with all pixels equal to zero (only for dice loss)
ALPHA=0.5
OPT='Adam'
WEIGHT='False'
MOD_OUTLINE='False' # modify outline weight to put higher importance on outline
WEIGHT_TYPE='None' # what type of weight to use 'None', 'nlf' or 'mfb'
LR=0.001
STEP_SIZE=10
W_DECAY=0.0005
MPL='False'
# training
SING_GPU_ID=5
ONLY_TEST='False'
NUM_WORKERS=16
BATCH_SIZE=32 # 32 for 15 slices and 16 for 31 slices
NUM_TRAIN_EPOCHS=50
USE_PRE_TRAIN='False'
PRE_TRAIN_PATH="./Experiment9/HybridResUNet_int15_ds1_baseline_new"
PERCENTILE=100 # no hard mining
N_EPOCH_HARDMINING=1
ONLY_PLAQUE='False'
CONFIG='config'
# pre-processing/augmentation
R_CENTRAL_CROP='False'
NOISE='False'
FLIP='True'
ROTATION='True'
RANDOM_TRANS='False'
CENTRAL_CROP=192
RESCALE=192
INTERVAL=23
DOWN_SAMPLE=1
MULTI_VIEW='False'
BC_LEARNING='False'
# models
MODEL_TYPE='2.5d'
MODEL='res_unet'
DROP_OUT=0.0 # drop_out rate for res_unet
THETA=1.0
WITH_SHALLOW_NET='True'
# visualization
DO_PLOT='True'
PLOT_DATA='test'
## create fig_dir to save log file and generated graphs
#FIG_DIR="${EXPERIMENT}/${MODEL_TYPE}_${MODEL}_${LR}__${PERCENTILE}_${NUM_TRAIN_EPOCHS}_\
#${STEP_SIZE}_${CRITERION}_${OPT}_r-${ROTATION}_flip-${FLIP}_w-${WEIGHT}_ptr-${USE_PRE_TRAIN}_mv-${MULTI_VIEW}_\
#sl-${WITH_SHALLOW_NET}_\
#lr-${LR_SCHEDULER}_wt-${WEIGHT_TYPE}_o-${OUTPUT_CHANNEL}_b-${BOUND_OUTPUT}_cf-${CONFIG}_dp-${DROP_OUT}_\
#w1-${W1}_w2-${W2}_sg1-${SIGMA1}_sg2-${SIGMA2}_rs-${RESCALE}_wt-${WIDTH}_bt-${BOUND_TYPE}_whda-${WHD_ALPHA}_whdb-${WHD_BETA}"
FIG_DIR="${EXPERIMENT}/${SUB_FOLDER}"
# create $FIG_DIR if it doesn't exist
if [ ! -d "./${FIG_DIR}" ]; then
mkdir -p ./${FIG_DIR}
fi
LOG="./${FIG_DIR}/train.`date +'%Y-%m-%d_%H-%M-%S'`.txt"
exec &> >(tee -a "$LOG")
echo "Logging output to $LOG"
CUDA_VISIBLE_DEVICES=${SING_GPU_ID} python main.py --central_crop ${CENTRAL_CROP} --rescale $RESCALE --output_channel ${OUTPUT_CHANNEL} \
--num_train_epochs ${NUM_TRAIN_EPOCHS} --w_decay ${W_DECAY} --lr $LR --momentum $MOMENTUM \
--step_size ${STEP_SIZE} --gamma $GAMMA --batch_size ${BATCH_SIZE} --num_workers ${NUM_WORKERS} \
--criterion $CRITERION --opt $OPT --data_dir ${DATA_DIR} --interval ${INTERVAL} --model_type ${MODEL_TYPE}\
--weight $WEIGHT --only_test ${ONLY_TEST} --rotation $ROTATION --flip $FLIP --r_central_crop ${R_CENTRAL_CROP} \
--random_trans ${RANDOM_TRANS} --noise $NOISE --use_pre_train ${USE_PRE_TRAIN} \
--pre_train_path ${PRE_TRAIN_PATH} --fig_dir ${FIG_DIR} --only_plaque ${ONLY_PLAQUE} \
--with_shallow_net ${WITH_SHALLOW_NET} --do_plot ${DO_PLOT} --down_sample ${DOWN_SAMPLE}\
--n_epoch_hardmining ${N_EPOCH_HARDMINING} --percentile ${PERCENTILE} --plot_data ${PLOT_DATA} \
--multi_view ${MULTI_VIEW} --model ${MODEL} --theta ${THETA} --config ${CONFIG} --bc_learning ${BC_LEARNING} \
--lr_scheduler ${LR_SCHEDULER} --weight_type ${WEIGHT_TYPE} --mpl ${MPL} --cal_zerogt ${CAL_ZEROGT} \
--drop_out ${DROP_OUT} --ignore_index ${IGNORE_INDEX} --w1 ${W1} --w2 ${W2} --sigma1 ${SIGMA1} --sigma2 ${SIGMA2} \
--bound_out ${BOUND_OUTPUT} --width ${WIDTH} --mod_outline ${MOD_OUTLINE} --bound_type ${BOUND_TYPE} \
--whd_alpha ${WHD_ALPHA} --whd_beta ${WHD_BETA} --whd_ratio ${WHD_RATIO} | Shell |
3D | kkhuang1990/PlaqueDetection | BoundDetection/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | BoundDetection/main.py | .py | 20,753 | 382 | # _*_ coding: utf-8 _*_
""" main code for train and test U-Net """
from __future__ import print_function
import sys
sys.path.append("..")
import numpy as np
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import argparse
import shutil
from loss import dice_score_slicewise, GeneralizedDiceLoss, WeightedKLDivLoss
from loss import WeightedCrossEntropy, FocalLoss, DiceLoss
from loss import MaxPoolLoss, CrossEntropyBoundLoss
from loss import WeightedHausdorffDistanceLoss, ModifiedWeightedHausdorffDistanceLoss
from loss import WeightedHausdorffDistanceDoubleBoundLoss, WeightedMaximumHausdorffDistanceDoubleBoundLoss
from loss import ModifiedWeightedHausdorffDistanceDoubleBoundLoss, WeightedHausdorffDistanceDoubleBoundLossWithSnake
import os.path as osp
from train import train_model, model_reference
from torchvision import transforms
from lr_scheduler import PolyLR
from image.models.deeplab_resnet import get_1x_lr_params_NOscale, get_10x_lr_params
from torch.optim import lr_scheduler
import matplotlib as mpl
mpl.use('Agg')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--data_dir', type=str, help="from where to read data")
parser.add_argument('--central_crop', type=int, default=192)
parser.add_argument('--rescale', type=int, default=96)
parser.add_argument('--output_channel', type=int, default=5, choices=(2, 3, 4, 5))
parser.add_argument('--num_train_epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.90)
parser.add_argument('--w_decay', type=float, default=0.005)
parser.add_argument('--step_size', type=int, default=20)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--use_gpu', type=bool, default=torch.cuda.is_available())
parser.add_argument('--num_workers', type=int, default=12)
parser.add_argument('--criterion', type=str, default='nll')
parser.add_argument('--opt', type=str, default='Adam', help="optimizer")
parser.add_argument('--weight', type=lambda x: True if x.lower()=='true' else None, default=True)
parser.add_argument('--weight_type', type=lambda x: None if x.lower()=='none' else x, default=None)
parser.add_argument('--only_test', type=lambda x: x.lower()=='true')
parser.add_argument('--rotation', type=lambda x: x.lower()=='true')
parser.add_argument('--flip', type=lambda x: x.lower()=='true')
parser.add_argument('--r_central_crop', type=lambda x: x.lower()=='true')
parser.add_argument('--random_trans', type=lambda x: x.lower()=='true')
parser.add_argument('--noise', type= lambda x: x.lower()=='true', help="whether add Gaussian noise or not")
parser.add_argument('--use_pre_train', type=lambda x: x.lower()=='true')
parser.add_argument('--fig_dir', type=str, help="directory for saving segmentation results")
parser.add_argument('--pre_train_path', type=str)
parser.add_argument('--with_shallow_net', type= lambda x: x.lower()=='true')
parser.add_argument('--n_epoch_hardmining', type=int, default=15, help="every how many epochs for hard mining")
parser.add_argument('--percentile', type=int, default=85, help="how much percent samples to save for hard mining")
parser.add_argument('--plot_data', type=str, default='test', help="what data to plot")
parser.add_argument('--do_plot', type=lambda x: x.lower()=='true', help="whether plot test results or not")
parser.add_argument('--multi_view', type=lambda x: x.lower()=='true', help="whether to use multi-view inputs")
parser.add_argument('--model', type=str, choices=('tiramisu', 'unet', 'res_unet', 'hyper_tiramisu', 'deeplab_resnet',
'res_unet_dp', 'res_unet_reg'), help="which model to use")
parser.add_argument('--theta', type=float, help="compression ratio for DenseNet")
parser.add_argument('--only_plaque', type=lambda x: x.lower()=='true', help="whether only use plaque samples")
parser.add_argument('--interval', type=int, help="interval of slices in volume")
parser.add_argument('--down_sample', type=int, default=1, help="down sampling step")
parser.add_argument('--model_type', type=str, default='2d', help="use 2D or 3D model")
parser.add_argument('--config', type=str, default='config', help="config file name for train/val/test data split")
parser.add_argument('--alpha', type=float, default=0.5, help="ratio of false positive in generalized dice loss")
parser.add_argument('--bc_learning', type=lambda x: None if x.lower()=='false' else x, default=None)
parser.add_argument('--lr_scheduler', type=str, default='StepLR', help="learning scheduler")
parser.add_argument('--mpl', type=lambda x: x.lower()=='true', default=False, help="whether max-pooling loss or not")
parser.add_argument('--cal_zerogt', type= lambda x: x.lower() == 'true', default=False,
help= "whether calculate F1 score for case of all GT pixels are zero")
parser.add_argument('--drop_out', type=float, default=0.0,
help= "drop out rate for Res-UNet model")
parser.add_argument('--ignore_index', type=lambda x: None if x.lower()=='none' else int(x),
help= "ignore index")
parser.add_argument('--w1', type=float, default=20.0, help="outer bound amptitude")
parser.add_argument('--sigma1', type=float, default=5.0, help="outer bound variance")
parser.add_argument('--w2', type=float, default=10.0, help="inner bound amptitude")
parser.add_argument('--sigma2', type=float, default=5.0, help="inner bound variance")
parser.add_argument('--bound_out', type=lambda x: x.lower()=='true', default=False,
help="whether output with bound")
parser.add_argument('--width', default=1, type=int, help="bound width")
parser.add_argument('--mod_outline', default=False, type=lambda x: x.lower()=='true',
help="whether modify outline or not")
parser.add_argument('--bound_type', default='innerouter', type=str,
help="what kind of bound to extract: inner or outer or both")
parser.add_argument('--whd_alpha', default=4, type=int, help="alpha in WHD loss")
parser.add_argument('--whd_beta', default=1, type=float, help="beta in WHD loss")
parser.add_argument('--whd_ratio', default=0.5, type=float, help="ratio in WHD loss")
args = parser.parse_args()
shutil.copy('./main.sh', './{}'.format(args.fig_dir)) # save current bash file for replicating experiment results
args.model_save_name = "./{}/model.pth".format(args.fig_dir)
# transforms and augmentations
if args.model_type == '2d':
from image.transforms import Gray2TripleWithBound, Gray2Bound, Normalize, Gray2InnerOuterBound
from image.transforms import Gray2InnerBound, Gray2OuterBound
from image.transforms import CentralCrop, Rescale, Gray2Triple, Gray2Mask, ToTensor, Gray2Binary, Identical, HU2Gray, RandomFlip
from image.transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, HU2GrayMultiStreamToTensor
else: # 2.5D and 3D
from volume.transforms import Gray2InnerBound, Gray2OuterBound, Gray2InnerOuterBound
from volume.transforms import Gray2Bound, Gray2TripleWithBound
from volume.transforms import CentralCrop, Rescale, Gray2Mask, ToTensor, Gray2Binary, Identical, HU2Gray, RandomFlip
from volume.transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, HU2GrayMultiStreamToTensor
# choose transforms of annotation under different settings
if args.output_channel == 2: # 2 options: (1) binary class seg (2) bound detection
if args.bound_out:
if args.bound_type == 'inner':
ToMask = Gray2InnerBound(width=args.width)
elif args.bound_type == 'outer':
ToMask = Gray2OuterBound(width=args.width)
elif args.bound_type == 'innerouter':
ToMask = Gray2Bound(width=args.width, n_classes=args.output_channel)
else:
ToMask = Gray2Binary()
elif args.output_channel == 3: # 2 options: (1) triple class seg (2) inner bound + outer bound
if args.bound_out:
ToMask = Gray2InnerOuterBound(width=args.width)
else:
ToMask = Gray2Triple()
elif args.output_channel == 4:
ToMask = Gray2TripleWithBound(n_classes=4, width=args.width)
elif args.output_channel == 5:
if args.bound_out:
ToMask = Gray2TripleWithBound(n_classes=5, width=args.width)
else:
ToMask = Gray2Mask()
args.compose = {'train': transforms.Compose([HU2Gray(),
RandomRotation() if args.rotation else Identical(),
RandomFlip() if args.flip else Identical(),
RandomCentralCrop() if args.r_central_crop else CentralCrop(args.central_crop),
# Rescale((args.rescale)),
RandomTranslate() if args.random_trans else Identical(),
AddNoise() if args.noise else Identical(),
ToMask,
ToTensor(norm=True)]),
'test': transforms.Compose([HU2Gray(),
CentralCrop(args.central_crop),
# Rescale(args.rescale),
ToMask,
ToTensor(norm=True)])}
# whether use pre_train model or not
if args.use_pre_train:
model = torch.load("{}/model.pth".format(args.pre_train_path),
map_location=lambda storage, loc: storage)
else:
args.color_channel = 3 if args.multi_view else 1
if args.model_type == '2d':
if args.model == 'unet':
if args.with_shallow_net:
from image.models.unet import UNet18 as UNet
else:
from image.models.unet import UNet28 as UNet
model = UNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet':
print("res_unet is called")
if args.with_shallow_net:
from image.models.res_unet import ResUNet18 as ResUNet
else:
from image.models.res_unet import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet_dp':
print("res_unet is called")
if args.with_shallow_net:
from image.models.res_unet_dp import ResUNet18 as ResUNet
else:
from image.models.res_unet_dp import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel, args.drop_out)
elif args.model == 'tiramisu':
if args.with_shallow_net:
from image.models.tiramisu import FCDenseNet43 as FCDenseNet
else:
from image.models.tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
# elif args.model == 'hyper_tiramisu':
# if args.with_shallow_net:
# from image.models.hyper_tiramisu import FCDenseNet43 as FCDenseNet
# else:
# from image.models.hyper_tiramisu import FCDenseNet67 as FCDenseNet
#
# model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'deeplab_resnet':
from image.models.deeplab_resnet import Res_Ms_Deeplab
model = Res_Ms_Deeplab(args.color_channel, args.output_channel)
elif args.model_type == '3d':
if args.model == 'unet':
if args.with_shallow_net:
from volume.models.unet import UNet18 as UNet
else:
from volume.models.unet import UNet28 as UNet
model = UNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet': # for 3D network, Res-UNet and Res-UNet with dropout is not distinguished
print("res_unet is called")
if args.with_shallow_net:
from volume.models.res_unet import ResUNet18 as ResUNet
else:
from volume.models.res_unet import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel, args.drop_out)
elif args.model == 'tiramisu':
if args.with_shallow_net:
from volume.models.tiramisu import FCDenseNet43 as FCDenseNet
else:
from volume.models.tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
# elif args.model == 'hyper_tiramisu':
# if args.with_shallow_net:
# from volume.models.hyper_tiramisu import FCDenseNet43 as FCDenseNet
# else:
# from volume.models.hyper_tiramisu import FCDenseNet67 as FCDenseNet
#
# model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model_type == "2.5d": # Hybrid model with 3D input and 2D output
if args.model == 'res_unet':
print("Hybrid res_unet is called")
if args.with_shallow_net:
from hybrid.models.hybrid_res_unet import ResUNet18 as ResUNet # 15 slices
else:
from hybrid.models.hybrid_res_unet import ResUNet23 as ResUNet # 31 slices
model = ResUNet(args.color_channel, args.output_channel, args.interval, args.rescale)
elif args.model == 'res_unet_reg':
# hybrid res-unet with regularization in original paper which introduced WHD loss
print("Hybrid res_unet is called")
if args.with_shallow_net:
from hybrid.models.hybrid_res_unet_reg import ResUNet18 as ResUNet # 15 slices
else:
from hybrid.models.hybrid_res_unet_reg import ResUNet23 as ResUNet # 31 slices
model = ResUNet(args.color_channel, args.output_channel, args.interval, args.rescale)
# whether use gpu or not
if args.use_gpu:
model = model.cuda()
# whether introduce prior weight into loss function or not
if args.weight:
if args.weight_type is None:
if args.bound_out:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_bound_{}.npy'.format(args.output_channel))).float()
else:
if args.output_channel == 5:
weight = torch.from_numpy(np.load('../class_weights/class_weight.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_{}.npy'.format(args.output_channel))).float()
weight = Variable(weight.cuda())
else: # no prior weight, especially for bound detection
weight = args.weight
print("weight: {}".format(weight))
# criterion
if args.criterion == 'nll':
criterion = nn.NLLLoss(weight=weight)
elif args.criterion == 'ce':
criterion = nn.CrossEntropyLoss(weight=weight)
elif args.criterion == 'dice':
criterion = DiceLoss(weight=weight, ignore_index=None, weight_type=args.weight_type, cal_zerogt=args.cal_zerogt)
elif args.criterion == 'gdl_none':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type=None,
alpha=args.alpha)
elif args.criterion == 'focal':
criterion = FocalLoss()
elif args.criterion == 'wce':
criterion = WeightedCrossEntropy()
elif args.criterion == 'ceb': # cross entropy bound loss
criterion = CrossEntropyBoundLoss(n_classes=args.output_channel, weight=weight, ignore_index=args.ignore_index,
ws=[args.w1, args.w2], sigmas=[args.sigma1, args.sigma2], bound_output=args.bound_out)
elif args.criterion == 'whd': # weighted Hausdorff distance
criterion = WeightedHausdorffDistanceLoss(alpha=args.whd_alpha, beta=args.whd_beta)
elif args.criterion == 'mwhd':
criterion = ModifiedWeightedHausdorffDistanceLoss(alpha=args.whd_alpha, thres=0.5)
elif args.criterion == 'whddb': # whd with double bounds
criterion = WeightedHausdorffDistanceDoubleBoundLoss(return_boundwise_loss=True, alpha=args.whd_alpha,
beta=args.whd_beta, ratio=args.whd_ratio)
elif args.criterion == 'whddbmax': # maximum whd with double bounds
criterion = WeightedMaximumHausdorffDistanceDoubleBoundLoss(return_boundwise_loss=True, alpha=args.whd_alpha,
beta=args.whd_beta, ratio=args.whd_ratio)
elif args.criterion == 'mwhddb': # modified whd with double bounds
criterion = ModifiedWeightedHausdorffDistanceDoubleBoundLoss(return_boundwise_loss=True, alpha=args.whd_alpha,
beta=args.whd_beta, ratio=args.whd_ratio)
elif args.criterion == "whddbsnake":
criterion = WeightedHausdorffDistanceDoubleBoundLossWithSnake(return_multi_loss=True, alpha=args.whd_alpha,
beta=args.whd_beta, ratio=args.whd_ratio)
# whddb loss with regularization
elif args.criterion == "whddb_cereg":
criterion = WeightedHausdorffDistanceDoubleBoundLoss(return_boundwise_loss=True, alpha=args.whd_alpha, beta=args.whd_beta, ratio=args.whd_ratio)
# criterion for BC learning
if args.criterion.startswith('gdl'):
args.criterion_bc = criterion
else:
args.criterion_bc = WeightedKLDivLoss(weight=weight)
# optimizer
if args.opt == 'Adam':
if args.model == 'deeplab_resnet':
optimizer = optim.Adam([{'params': get_1x_lr_params_NOscale(model), 'lr': args.lr},
{'params': get_10x_lr_params(model), 'lr': 10 * args.lr}],
lr=args.lr, weight_decay=args.w_decay)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay)
elif args.opt == 'sgd':
if args.model == 'deeplab_resnet':
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.lr},
{'params': get_10x_lr_params(model), 'lr': 10 * args.lr}],
lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
# learning schedule
if args.lr_scheduler == 'StepLR':
my_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
elif args.lr_scheduler == 'PolyLR':
my_lr_scheduler = PolyLR(optimizer, max_iter=args.num_train_epochs, power=0.9)
# print arguments setting
for arg in vars(args):
print("{} : {}".format(arg, getattr(args, arg)))
# plot samples used for train, val and test respectively
print("Dataset:")
for mode in ['train', 'val', 'test']:
config_file = osp.join('../configs/{}'.format(args.config), mode+'.txt')
print(mode)
with open(config_file, 'r') as reader:
for line in reader.readlines():
print(line.strip('\n'))
since = time.time()
if not args.only_test:
train_model(model, criterion, optimizer, my_lr_scheduler, args)
# model reference
model_reference(args, sample_stack_rows=50)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60)) | Python |
3D | kkhuang1990/PlaqueDetection | volume/train.py | .py | 25,288 | 528 | # _*_ coding: utf-8 _*_
""" define train and test functions here """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from sklearn.metrics import auc
import copy
from collections import Counter
import numpy as np
np.set_printoptions(precision=4)
from tqdm import tqdm
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
import pickle
import os.path as osp
import os
from image.models.deeplab_resnet import outS
from metric import cal_f_score, cal_f_score_slicewise, volumewise_hd95, volumewise_asd
from loss import WeightedKLDivLoss
from utils import mask2innerouterbound
from vision import plot_metrics, plaque_detection_rate, plot_class_f1
from vision import plot_slice_wise_measures, sample_seg_with_hfd
from image.models.deeplab_resnet import adjust_learning_rate, cal_loss
from medpy.metric.binary import ravd
def train_model(model, criterion, optimizer, scheduler, args):
""" train the model
for obtain stable validation result, we use averaged val loss over several epochs
as the principle of choosing best model weights
Args:
model: model inheriting from nn.Module class
criterion: criterion class, loss function used
optimizer: optimizer, optimization strategy
scheduler: lr scheduler
args: parser arguments
"""
best_model_wts = copy.deepcopy(model.state_dict())
best_epoch = 0
best_loss = 1.0e9
loss_keep = 0 # check how many times the val loss has decreased continuously
epoch_loss_prev = 1.0e9 # loss at the previous epoch
epoch_acc = {'train': [], 'val': [], 'test': []}
epoch_f1_score = {'train': [], 'val': [], 'test': []}
epoch_f1_score_class = {'train': [], 'val': [], 'test': []}
epoch_loss = {'train': [], 'val': [], 'test': []}
epoch_hdist = {'train': [], 'val': [], 'test': []}
epoch_asd = {'train': [], 'val': [], 'test': []}
epoch_vd = {'train': [], 'val': [], 'test': []}
metric_prev_epoch = None
phases_prev_epoch = None
for epoch in range(args.num_train_epochs):
print("{}/{}".format(epoch+1, args.num_train_epochs))
if epoch != 0 and epoch % args.n_epoch_hardmining == 0:
is_hard_mining = True
else:
is_hard_mining = False
if args.model_type == '2d':
from image.dataloader import read_train_data
if args.onlyrisk:
dataloaders = read_train_data(args.data_dir, args.compose, 'train', None, None, True,
is_hard_mining, args.num_workers, args.batch_size, args.percentile, args.multi_view,
args.onlyrisk, args.config, args.bc_learning)
else:
dataloaders = read_train_data(args.data_dir, args.compose, 'train', metric_prev_epoch, phases_prev_epoch, True,
is_hard_mining, args.num_workers, args.batch_size, args.percentile, args.multi_view,
args.onlyrisk, args.config, args.bc_learning)
elif args.model_type == '3d':
from volume.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, metric_prev_epoch, phases_prev_epoch, args.compose, 'train',
is_hard_mining, args.percentile, args.multi_view, args.interval, args.down_sample,
args.batch_size, args.num_workers, True, args.config)
if len(dataloaders['train'].dataset.phases) <= 20:
break
dataset_sizes = {'train': 0, 'val': 0, 'test': 0}
for phase in ['train', 'val', 'test']:
# print("processing {}".format(phase))
if phase == 'train':
scheduler.step()
if args.model == 'deeplab_resnet':
adjust_learning_rate(optimizer, scheduler)
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
if phase == 'train':
f1_slices_epoch = []
running_loss = 0.0
running_hdist, running_asd, running_vd = 0.0, 0.0, 0.0
running_corrects, running_f1_score = 0.0, 0.0
running_fscores = np.zeros(args.output_channel, dtype=np.float32)
running_effect_samples = np.zeros(args.output_channel, dtype=np.uint32)
running_cal_pgt, running_cal_pp, running_cal_tp = 0, 0, 0
running_noncal_pgt, running_noncal_pp, running_noncal_tp = 0, 0, 0
dl_pbar = tqdm(dataloaders[phase])
for sample_inx, sample in enumerate(dl_pbar):
dl_pbar.update(100)
inputs, labels = sample
patch_size = len(inputs) if args.model != 'hyper_tiramisu' else len(inputs[0])
dataset_sizes[phase] += patch_size
# wrap them in Variable
if args.use_gpu:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input.cuda()).float() for input in inputs]
else:
inputs = Variable(inputs.cuda()).float()
if phase == 'train' and args.bc_learning is not None:
labels = Variable(labels.cuda()).float()
else:
labels = Variable(labels.cuda()).long()
else:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input).float() for input in inputs]
else:
inputs = Variable(inputs).float()
if phase == 'train' and args.bc_learning is not None:
labels = Variable(labels).float()
else:
labels = Variable(labels).long()
optimizer.zero_grad()
outputs = model(inputs)
if phase == 'train' and args.bc_learning is not None: # for bc learning
if args.model == 'deeplab_resnet':
loss = cal_loss(outputs, labels, args.criterion, args.criterion_bc)
outputs = outputs[-1] # max fusion output is saved
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs)
else:
loss = args.criterion_bc(outputs, labels)
else:
if args.model == 'deeplab_resnet':
loss = cal_loss(outputs, labels, args.criterion, criterion)
outputs = outputs[-1] # max fusion output is saved
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs)
else:
if args.criterion == 'nll' and not args.mpl:
loss = criterion(F.log_softmax(outputs, dim=1), labels)
else: # dice, ce, gdl1, gdl2
loss = criterion(outputs, labels)
_, preds = torch.max(outputs.data, 1)
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
if args.output_channel >= 5 and not args.bound_out:
# calculate calcified and non-calcified plaque detection rate
cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = plaque_detection_rate(labels, preds,
args.output_channel)
# accumulate gt, positive and true positive from each minibatch
running_cal_pgt += cal_pgt
running_cal_pp += cal_pp
running_cal_tp += cal_tp
running_noncal_pgt += noncal_pgt
running_noncal_pp += noncal_pp
running_noncal_tp += noncal_tp
if phase == 'train' and args.bc_learning is not None:
_, labels = torch.max(labels, 1)
running_loss += loss.data.item() * patch_size
running_corrects += float(torch.sum(preds == labels.data)) / preds[0].numel()
# calculate hd95 and asd
preds_np, labels_np = preds.cpu().numpy(), labels.data.cpu().numpy()
preds_bound_np = np.stack([mask2innerouterbound(pred, args.width) for pred in preds_np])
labels_bound_np = np.stack([mask2innerouterbound(label, args.width) for label in labels_np])
mean_hdf = volumewise_hd95(preds_bound_np , labels_bound_np, return_slicewise_hdf=False)
mean_asd = volumewise_asd(preds_bound_np, labels_bound_np, n_classes=3)
running_hdist += mean_hdf * patch_size
running_asd += mean_asd * patch_size
# calculate F1, VD
cal_f1 = cal_f_score if args.model_type == '2d' else cal_f_score_slicewise
_, f_scores, n_effect_samples, f1_slices_batch = cal_f1(preds_np, labels_np, n_class=args.output_channel,
return_slice_f1=True, return_class_f1=True)
running_fscores += f_scores
running_effect_samples += n_effect_samples
mean_vd = abs(ravd(preds_np, labels_np))
running_vd += mean_vd * patch_size
if phase == 'train':
f1_slices_epoch += f1_slices_batch
dl_pbar.close()
print()
epoch_loss[phase].append(running_loss / dataset_sizes[phase])
epoch_acc[phase].append(float(running_corrects) / dataset_sizes[phase])
epoch_hdist[phase].append(running_hdist / dataset_sizes[phase])
epoch_asd[phase].append(running_asd / dataset_sizes[phase])
epoch_vd[phase].append(running_vd / dataset_sizes[phase])
running_f1_class = running_fscores / running_effect_samples
epoch_f1_score_class[phase].append(running_f1_class) # f1 score for each class
epoch_f1_score[phase].append(running_f1_class.mean())
print("[{:5s}({} samples)] Loss: {:.4f} Acc: {:.4f} Ave_F1: {:.4f} class-wise F1: {} Ave_hdf: {:.4f} "
"Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(phase, len(dataloaders[phase].dataset.phases),
epoch_loss[phase][-1], epoch_acc[phase][-1], epoch_f1_score[phase][-1], running_f1_class,
epoch_hdist[phase][-1], epoch_asd[phase][-1], epoch_vd[phase][-1]))
# update metric_prev_epoch and phases_prev_epoch
if phase == 'train':
metric_prev_epoch = np.array(f1_slices_epoch)
phases_prev_epoch = dataloaders['train'].dataset.phases
# deep copy the model
if phase == 'val':
val_loss_bf = sum(epoch_loss['val'][-5:]) / len(epoch_loss['val'][-5:])
if val_loss_bf <= best_loss:
best_loss = val_loss_bf
best_epoch = epoch
# be careful when assign one tensor to another
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model, args.model_save_name)
if val_loss_bf > epoch_loss_prev:
loss_keep += 1
else:
loss_keep = 0
epoch_loss_prev = val_loss_bf
if args.output_channel >= 5 and not args.bound_out:
# calculate cal and non-cal detection rate for test data
epoch_cal_pr = float(running_cal_tp) / running_cal_pp if running_cal_pp != 0 else 0.0
epoch_cal_rc = float(running_cal_tp) / running_cal_pgt
epoch_cal_f1 = 2.0 * running_cal_tp / (running_cal_pgt + running_cal_pp)
epoch_noncal_pr = float(running_noncal_tp) / running_noncal_pp if running_noncal_pp != 0 else 0.0
epoch_noncal_rc = float(running_noncal_tp) / running_noncal_pgt
epoch_noncal_f1 = 2.0 * running_noncal_tp / (running_noncal_pgt + running_noncal_pp)
print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(
epoch_cal_pr, epoch_cal_rc, epoch_cal_f1, epoch_noncal_pr, epoch_noncal_rc, epoch_noncal_f1))
# plot temporal loss, acc, f1_score after test
if (epoch+1) % 5 == 0 and phase == 'test':
metrics = [epoch_loss, epoch_acc, epoch_f1_score, epoch_asd, epoch_vd, epoch_hdist]
labels = ['total_loss', 'pixel_acc', 'F1_score', 'asd', 'vd', 'hd95']
plot_metrics(metrics, labels, fig_dir=args.fig_dir)
plot_class_f1(epoch_f1_score_class, args.fig_dir)
if loss_keep == 10:
break
# plot loss, acc, f1, asd, vd, hd95
metrics = [epoch_loss, epoch_acc, epoch_f1_score, epoch_asd, epoch_vd, epoch_hdist]
labels = ['total_loss', 'pixel_acc', 'F1_score', 'asd', 'vd', 'hd95']
plot_metrics(metrics, labels, fig_dir=args.fig_dir)
plot_class_f1(epoch_f1_score_class, args.fig_dir)
print('Best val loss: {:4f}'.format(best_loss))
model.load_state_dict(best_model_wts)
torch.save(model, args.model_save_name)
def model_reference(args, sample_stack_rows=50):
""" model reference and plot the segmentation results
for model reference, several epochs are used to balance the risks and other metrics
for segmentation results plotting, only one epoch is used without data augmentation
Args:
model: model
dataloaders: DataLoader class, dataloader used to read test data
args: parser arguments
sample_stack_rows: int, how many slices to plot per image
"""
#############################################################################################
# Part 1: model reference and metric evaluations
#############################################################################################
model = torch.load(args.model_save_name, map_location=lambda storage, loc: storage)
if args.use_gpu:
model = model.cuda()
dataset_sizes = 0
running_hdist, running_asd, running_vd = 0.0, 0.0, 0.0
running_corrects, running_f1, running_dice_score = 0.0, 0.0, 0.0
# for class-wise F1 scores
running_num_samples_class = np.zeros(args.output_channel , dtype=np.uint32)
running_class_f1 = np.zeros(args.output_channel, dtype=np.float32)
if args.model_type == '2d':
from image.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, args.compose, 'test', None, None, True,
False, args.num_workers, args.batch_size, args.percentile,
args.multi_view, args.onlyrisk, args.config)
elif args.model_type == '3d':
from volume.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, None, None, args.compose, 'test',
False, args.percentile, args.multi_view, args.interval, args.down_sample,
args.batch_size, args.num_workers, True, args.config)
for samp_inx, sample in enumerate(dataloaders['test']):
inputs, labels = sample
patch_size = len(inputs) if args.model != 'hyper_tiramisu' else len(inputs[0])
dataset_sizes += patch_size
# wrap them in Variable
if args.use_gpu:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input.cuda()).float() for input in inputs]
else:
inputs = Variable(inputs.cuda()).float()
labels = Variable(labels.cuda()).long()
else:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input).float() for input in inputs]
else:
inputs = Variable(inputs).float()
labels = Variable(labels).long()
outputs = model(inputs)
if args.model == 'deeplab_resnet':
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs[-1])
_, preds = torch.max(outputs.data, 1)
# calculate seg correct and risk correct within each minibatch
running_corrects += float(torch.sum(preds == labels.data)) / preds[0].numel()
# calculate hd95 and asd
preds_np, labels_np = preds.cpu().numpy(), labels.data.cpu().numpy()
preds_bound_np = np.stack([mask2innerouterbound(pred, args.width) for pred in preds_np])
labels_bound_np = np.stack([mask2innerouterbound(label, args.width) for label in labels_np])
mean_hdf = volumewise_hd95(preds_bound_np, labels_bound_np, return_slicewise_hdf=False)
mean_asd = volumewise_asd(preds_bound_np, labels_bound_np, n_classes=3)
running_hdist += mean_hdf * patch_size
running_asd += mean_asd * patch_size
# calculate F1 score
cal_f1 = cal_f_score if args.model_type == '2d' else cal_f_score_slicewise
batch_f1, batch_class_f1, n_effect_samples = cal_f1(preds_np, labels_np, n_class=args.output_channel,
return_class_f1=True)
running_class_f1 += batch_class_f1
running_num_samples_class += n_effect_samples
mean_vd = abs(ravd(preds_np, labels_np))
running_vd += mean_vd * patch_size
if args.output_channel >= 5 and not args.bound_out:
labels_np = labels.data.cpu().numpy()
preds_np = preds.cpu().numpy()
if samp_inx == 0:
labels_all = labels_np
preds_all = preds_np
else:
labels_all = np.concatenate([labels_all, labels_np], axis=0) # [N, H, W]
preds_all = np.concatenate([preds_all, preds_np], axis=0)
# plot slice-wise measurements under different thresholds
if args.output_channel >= 5 and not args.bound_out:
plot_slice_wise_measures(labels_all, preds_all, args)
epoch_acc = float(running_corrects) / dataset_sizes
epoch_class_f1 = running_class_f1 / running_num_samples_class
epoch_f1 = epoch_class_f1.mean()
epoch_hdist = running_hdist / dataset_sizes
epoch_asd = running_asd / dataset_sizes
epoch_vd = running_vd / dataset_sizes
# print various metrics
print("Acc: {:.4f} Ave_F1: {:.4f} Ave_hdf: {:.4f}, Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(
epoch_acc, epoch_f1, epoch_hdist, epoch_asd, epoch_vd))
for c_inx, each_f1 in enumerate(epoch_class_f1):
print("Class-{}: F1-{:.4f}".format(c_inx, each_f1))
if args.do_plot:
############################################################################################
# Part 2: plot segmentation results (这部分真蛋疼,已经写了无数遍了)
############################################################################################
plot_data = args.plot_data
args.compose[plot_data] = args.compose['test']
if args.model_type == '2d':
from image.dataloader import read_plot_data
dataloaders = read_plot_data(args.data_dir, args.compose, plot_data, False, args.num_workers,
args.batch_size, args.multi_view, args.config)
elif args.model_type == '3d':
from volume.dataloader import read_plot_data
dataloaders = read_plot_data(args.data_dir, args.compose, plot_data, args.multi_view, args.interval,
args.down_sample, args.num_workers, False, args.config)
for samp_inx, sample in enumerate(dataloaders[plot_data]):
inputs_batch, labels, sample_name, start = sample
# convert inputs into list of tensor no matter whether 'hyper_tiramisu' model or not
if args.model != 'hyper_tiramisu':
inputs_batch = [inputs_batch]
sample_name, start = sample_name[0], start.item()
inputs_batch = [torch.squeeze(input, dim=0) for input in inputs_batch] # [N, 1, T, H, W]
labels = torch.squeeze(labels, dim=0) # [N, T, H, W]
patch_size = len(inputs_batch[0])
for mb_inx in range(0, patch_size, args.batch_size):
end = min(mb_inx + args.batch_size, patch_size)
inputs = [input[mb_inx:end] for input in inputs_batch]
# wrap them in Variable
if args.use_gpu:
inputs = [Variable(input.cuda()).float() for input in inputs]
else:
inputs = [Variable(input).float() for input in inputs]
if args.model != 'hyper_tiramisu':
inputs = inputs[0]
outputs = model(inputs) # both outputs and preds are tensors
if args.model == 'deeplab_resnet':
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs[-1])
outputs_mb_np = outputs.data.cpu().numpy()
_, preds = torch.max(outputs.data, 1)
preds_mb_np = preds.cpu().numpy()
if mb_inx == 0:
preds_np = np.zeros((patch_size, *(preds_mb_np[0].shape)), dtype=preds_mb_np.dtype)
outputs_np = np.zeros((patch_size, *(outputs_mb_np[0].shape)), dtype=outputs_mb_np.dtype)
preds_np[mb_inx:end], outputs_np[mb_inx:end] = preds_mb_np, outputs_mb_np
# convert into numpy
labels_np = labels.cpu().numpy()
if inputs_batch[0].size(1) == 1:
inputs_np = [torch.squeeze(input, dim=1).cpu().numpy() for input in inputs_batch]
else:
inputs_np = [input[:, 0].cpu().numpy() for input in inputs_batch]
# for 2D images, we can directly use it for plot, for 3D volume, transform is necessary
if args.model_type == '3d':
inputs_np, labels_np, preds_np = rearrange_volume(inputs_np, labels_np, preds_np, args)
plot_seg_save_risk(labels_np, inputs_np, preds_np, start, sample_name, args.fig_dir,
sample_stack_rows, args.output_channel, args.width)
def rearrange_volume(inputs, labels, preds, args):
""" rearrange volumes into the correct order
:param inputs: list of ndarrays (N, D, H, W)
:param labels: ndarray (N, D, H, W)
:param preds: ndarray (N, D, H, W)
:return:
"""
inputs = [np.reshape(input, (-1, *(input.shape[2:]))) for input in inputs]
labels = np.reshape(labels, (-1, *(labels.shape[2:])))
preds = np.reshape(preds, (-1, *(preds.shape[2:])))
num_slices = len(inputs[0])
indexes = []
args.stride = args.down_sample * args.interval
for s_inx in range(0, num_slices, args.stride):
for i in range(args.interval):
for j in range(args.down_sample):
inx = s_inx + i + j * args.interval
if inx < num_slices:
indexes.append(inx)
inputs = [input[indexes] for input in inputs]
labels = labels[indexes]
preds = preds[indexes]
return (inputs, labels, preds)
# this part varies for different task (segmentation or bound detection)
def plot_seg_save_risk(labels, inputs, preds, start, samp_art_name, root_fig_dir, sample_stack_rows, n_class,
width):
""" plot segmentation results """
fig_dir = root_fig_dir + '/' + samp_art_name
if not osp.exists(fig_dir):
os.makedirs(fig_dir)
data = {'input': inputs, 'label': labels, 'pred': preds,
'sample_name': samp_art_name, 'start': start, 'n_class': n_class, 'width': width}
with open(osp.join(fig_dir, 'data.pkl'), 'wb') as writer:
pickle.dump(data, writer, protocol=pickle.HIGHEST_PROTOCOL)
print("# of slices: {}".format(len(inputs[0]))) # number of input slices
# plot the inputs, ground truth, outputs and F1 scores with sample_stack2
for inx in range(0, len(inputs[0]), sample_stack_rows):
# print("# of slices: {}".format(len(inputs[0]))) # number of input slices
over = min(inx + sample_stack_rows, len(inputs[0]))
label_plot, input_plot, pred_plot = labels[inx:over], [inputs[i][inx:over] for i in range(len(inputs))], \
preds[inx:over]
# print("inputplot size: {}".format([stream.shape for stream in input_plot]))
input_plot = [input for input in zip(*[input_plot[i] for i in range(len(input_plot))])]
data_list = [{"input": input[0], "GT": label, "pred": pred}
for (input, label, pred) in zip(input_plot, label_plot, pred_plot)]
file_name = "{}/{:03d}".format(fig_dir, inx + start)
sample_seg_with_hfd(data_list, rows=over - inx, start_with=0, show_every=1, fig_name=file_name,
start_inx=inx + start, n_class=n_class, width=width) | Python |
3D | kkhuang1990/PlaqueDetection | volume/main.sh | .sh | 3,449 | 96 | #!/bin/bash
# input/output
OUTPUT_CHANNEL=3
BOUND_OUTPUT='False'
WIDTH=1 # boundary width
#DATA_DIR="/home/mil/huang/Dataset/CPR_multiview"
DATA_DIR="/data/ugui0/antonio-t/CPR_multiview_interp2_huang"
# Experiment
EXPERIMENT="Experiment1"
SUB_FOLDER="Res-UNet_CE_3class"
# optimizer
LR_SCHEDULER='StepLR'
MOMENTUM=0.90
GAMMA=0.9
CRITERION='ce' # cross entropy loss with bound weight
W0=10.0
SIGMA=5.0 # for more sharp boundaries
IGNORE_INDEX='None'
CAL_ZEROGT='False' # whether calculate GT with all pixels equal to zero (only for dice loss)
ALPHA=0.5
OPT='Adam'
WEIGHT='True'
MOD_OUTLINE='False' # modify outline weight to put higher importance on outline
WEIGHT_TYPE='None' # what type of weight to use 'None', 'nlf' or 'mfb'
LR=0.001
STEP_SIZE=10
W_DECAY=0.0005 # almost default setting for segmentation
MPL='False'
# training
SING_GPU_ID=2
ONLY_TEST='False'
NUM_WORKERS=16
BATCH_SIZE=256 # 6 for unet/res_unet
NUM_TRAIN_EPOCHS=100
USE_PRE_TRAIN='False'
PRE_TRAIN_PATH="./Experiment14/2d_res_unet_0.001_0.90_0.9_theta-1.0-0.0_85_200_10_dice_160_96_Adam_rot-True_\
flip-True_w-True_rcp-True_rtrans-False_noise-False_ptr-False_multiview-False_shallow-False_onlyrisk-False_int-32_ds\
-2_alpha-0.5_bc-False_lr-StepLR"
PERCENTILE=100
N_EPOCH_HARDMINING=10
ONLYRISK='False'
CONFIG='config'
# pre-processing/augmentation
R_CENTRAL_CROP='True'
NOISE='False'
FLIP='True'
ROTATION='True'
RANDOM_TRANS='False'
CENTRAL_CROP=192
RESCALE=96
INTERVAL=32
DOWN_SAMPLE=2
MULTI_VIEW='False'
BC_LEARNING='False'
# models
MODEL_TYPE='2d'
MODEL='res_unet_dp'
DROP_OUT=0.0 # drop_out rate for res_unet
THETA=1.0
WITH_SHALLOW_NET='True'
# visualization
DO_PLOT='True'
PLOT_DATA='test'
# create fig_dir to save log file and generated graphs
FIG_DIR="${EXPERIMENT}/${SUB_FOLDER}"
# create $FIG_DIR if it doesn't exist
if [ ! -d "./${FIG_DIR}" ]; then
mkdir -p ./${FIG_DIR}
fi
LOG="./${FIG_DIR}/train.`date +'%Y-%m-%d_%H-%M-%S'`.txt"
exec &> >(tee -a "$LOG")
echo "Logging output to $LOG"
CUDA_VISIBLE_DEVICES=${SING_GPU_ID} python main.py --central_crop ${CENTRAL_CROP} --rescale $RESCALE --output_channel ${OUTPUT_CHANNEL} \
--num_train_epochs ${NUM_TRAIN_EPOCHS} --w_decay ${W_DECAY} --lr $LR --momentum $MOMENTUM \
--step_size ${STEP_SIZE} --gamma $GAMMA --batch_size ${BATCH_SIZE} --num_workers ${NUM_WORKERS} \
--criterion $CRITERION --opt $OPT --data_dir ${DATA_DIR} --interval ${INTERVAL} --model_type ${MODEL_TYPE}\
--weight $WEIGHT --only_test ${ONLY_TEST} --rotation $ROTATION --flip $FLIP --r_central_crop ${R_CENTRAL_CROP} \
--random_trans ${RANDOM_TRANS} --noise $NOISE --use_pre_train ${USE_PRE_TRAIN} \
--pre_train_path ${PRE_TRAIN_PATH} --fig_dir ${FIG_DIR} --onlyrisk ${ONLYRISK} \
--with_shallow_net ${WITH_SHALLOW_NET} --do_plot ${DO_PLOT} --down_sample ${DOWN_SAMPLE}\
--n_epoch_hardmining ${N_EPOCH_HARDMINING} --percentile ${PERCENTILE} --plot_data ${PLOT_DATA} \
--multi_view ${MULTI_VIEW} --model ${MODEL} --theta ${THETA} --config ${CONFIG} --bc_learning ${BC_LEARNING} \
--lr_scheduler ${LR_SCHEDULER} --weight_type ${WEIGHT_TYPE} --mpl ${MPL} --cal_zerogt ${CAL_ZEROGT} \
--drop_out ${DROP_OUT} --ignore_index ${IGNORE_INDEX} --w0 ${W0} --sigma ${SIGMA} --bound_out ${BOUND_OUTPUT} \
--width ${WIDTH} --mod_outline ${MOD_OUTLINE} | Shell |
3D | kkhuang1990/PlaqueDetection | volume/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | volume/main.py | .py | 17,494 | 348 | # _*_ coding: utf-8 _*_
""" main code for train and test U-Net """
from __future__ import print_function
import os, sys
sys.path.append("..")
import numpy as np
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import argparse
import shutil
from loss import dice_score_slicewise, GeneralizedDiceLoss, WeightedKLDivLoss
from loss import WeightedCrossEntropy, FocalLoss, DiceLoss
from loss import MaxPoolLoss, CrossEntropyBoundLoss
import os.path as osp
from train import train_model, model_reference
from torchvision import transforms
from lr_scheduler import PolyLR
from image.models.deeplab_resnet import get_1x_lr_params_NOscale, get_10x_lr_params
from torch.optim import lr_scheduler
import matplotlib as mpl
mpl.use('Agg')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--data_dir', type=str, help="from where to read data")
parser.add_argument('--central_crop', type=int, default=160)
parser.add_argument('--rescale', type=int, default=96)
parser.add_argument('--output_channel', type=int, default=5, choices=(2, 3, 4, 5))
parser.add_argument('--num_train_epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.90)
parser.add_argument('--w_decay', type=float, default=0.005)
parser.add_argument('--step_size', type=int, default=20)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--use_gpu', type=bool, default=torch.cuda.is_available())
parser.add_argument('--num_workers', type=int, default=12)
parser.add_argument('--criterion', type=str, default='nll')
parser.add_argument('--opt', type=str, default='Adam', help="optimizer")
parser.add_argument('--weight', type=lambda x: True if x.lower()=='true' else None, default=True)
parser.add_argument('--weight_type', type=lambda x: None if x.lower()=='none' else x, default=None)
parser.add_argument('--only_test', type=lambda x: x.lower()=='true')
parser.add_argument('--rotation', type=lambda x: x.lower()=='true')
parser.add_argument('--flip', type=lambda x: x.lower()=='true')
parser.add_argument('--r_central_crop', type=lambda x: x.lower()=='true')
parser.add_argument('--random_trans', type=lambda x: x.lower()=='true')
parser.add_argument('--noise', type= lambda x: x.lower()=='true', help="whether add Gaussian noise or not")
parser.add_argument('--use_pre_train', type=lambda x: x.lower()=='true')
parser.add_argument('--fig_dir', type=str, help="directory for saving segmentation results")
parser.add_argument('--pre_train_path', type=str)
parser.add_argument('--with_shallow_net', type= lambda x: x.lower()=='true')
parser.add_argument('--n_epoch_hardmining', type=int, default=15, help="every how many epochs for hard mining")
parser.add_argument('--percentile', type=int, default=85, help="how much percent samples to save for hard mining")
parser.add_argument('--plot_data', type=str, default='test', help="what data to plot")
parser.add_argument('--do_plot', type=lambda x: x.lower()=='true', help="whether plot test results or not")
parser.add_argument('--multi_view', type=lambda x: x.lower()=='true', help="whether to use multi-view inputs")
parser.add_argument('--model', type=str, choices=('tiramisu', 'unet', 'res_unet', 'hyper_tiramisu', 'deeplab_resnet',
'res_unet_dp'), help="which model to use")
parser.add_argument('--theta', type=float, help="compression ratio for DenseNet")
parser.add_argument('--onlyrisk', type=lambda x: x.lower()=='true', help="whether only use risk samples")
parser.add_argument('--interval', type=int, help="interval of slices in volume")
parser.add_argument('--down_sample', type=int, default=1, help="down sampling step")
parser.add_argument('--model_type', type=str, default='2d', help="use 2D or 3D model")
parser.add_argument('--config', type=str, default='config', help="config file name for train/val/test data split")
parser.add_argument('--alpha', type=float, default=0.5, help="ratio of false positive in generalized dice loss")
parser.add_argument('--bc_learning', type=lambda x: None if x.lower()=='false' else x, default=None)
parser.add_argument('--lr_scheduler', type=str, default='StepLR', help="learning scheduler")
parser.add_argument('--mpl', type=lambda x: x.lower()=='true', default=False, help="whether max-pooling loss or not")
parser.add_argument('--cal_zerogt', type= lambda x: x.lower() == 'true', default=False,
help= "whether calculate F1 score for case of all GT pixels are zero")
parser.add_argument('--drop_out', type=float, default=0.0,
help= "drop out rate for Res-UNet model")
parser.add_argument('--ignore_index', type=lambda x: None if x.lower()=='none' else int(x),
help= "ignore index")
parser.add_argument('--w0', type=float, default=10.0, help="bound loss amptitude")
parser.add_argument('--sigma', type=float, default=5.0, help="bound loss variance")
parser.add_argument('--bound_out', type=lambda x: x.lower()=='true', default=False,
help="whether output with bound")
parser.add_argument('--width', default=1, type=int, help="bound width")
parser.add_argument('--mod_outline', default=False, type=lambda x: x.lower()=='true',
help="whether modify outline or not")
args = parser.parse_args()
shutil.copy('./main.sh', './{}'.format(args.fig_dir)) # save current bash file for replicating experiment results
args.model_save_name = "./{}/model.pth".format(args.fig_dir)
# transforms and augmentations
if args.model_type == '2d':
from image.transforms import Gray2TripleWithBound
from image.transforms import CentralCrop, Rescale, Gray2Triple, Gray2Mask, ToTensor, Gray2Binary, Identical, HU2Gray, RandomFlip
from image.transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, HU2GrayMultiStreamToTensor
elif args.model_type == '3d':
from volume.transforms import CentralCrop, Rescale, Gray2Mask, ToTensor, Gray2Binary, Identical, HU2Gray, RandomFlip
from volume.transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, HU2GrayMultiStreamToTensor
# transforms
if args.output_channel == 2:
ToMask = Gray2Binary()
elif args.output_channel == 3:
ToMask = Gray2Triple()
elif args.output_channel == 4:
ToMask = Gray2TripleWithBound(n_classes=4, width=args.width)
elif args.output_channel == 5:
if args.bound_out:
ToMask = Gray2TripleWithBound(n_classes=5, width=args.width)
else:
ToMask = Gray2Mask()
args.compose = {'train': transforms.Compose([HU2Gray() if args.model != 'hyper_tiramisu' else Identical(),
RandomRotation() if args.rotation else Identical(),
RandomFlip() if args.flip else Identical(),
RandomCentralCrop() if args.r_central_crop else CentralCrop(args.central_crop),
Rescale((args.rescale)),
RandomTranslate() if args.random_trans else Identical(),
AddNoise() if args.noise else Identical(),
ToMask,
ToTensor() if args.model != 'hyper_tiramisu' else HU2GrayMultiStreamToTensor()]),
'test': transforms.Compose([HU2Gray() if args.model != 'hyper_tiramisu' else Identical(),
CentralCrop(args.central_crop),
Rescale(args.rescale),
ToMask,
ToTensor() if args.model != 'hyper_tiramisu' else HU2GrayMultiStreamToTensor()])}
# whether use pre_train model or not
if args.use_pre_train:
model = torch.load("{}/model.pth".format(args.pre_train_path),
map_location=lambda storage, loc: storage)
else:
args.color_channel = 3 if args.multi_view else 1
if args.model_type == '2d':
if args.model == 'unet':
if args.with_shallow_net:
from image.models.unet import UNet18 as UNet
else:
from image.models.unet import UNet28 as UNet
model = UNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet':
print("res_unet is called")
if args.with_shallow_net:
from image.models.res_unet import ResUNet18 as ResUNet
else:
from image.models.res_unet import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet_dp':
print("res_unet is called")
if args.with_shallow_net:
from image.models.res_unet_dp import ResUNet18 as ResUNet
else:
from image.models.res_unet_dp import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel, args.drop_out)
elif args.model == 'tiramisu':
if args.with_shallow_net:
from image.models.tiramisu import FCDenseNet43 as FCDenseNet
else:
from image.models.tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'hyper_tiramisu':
if args.with_shallow_net:
from image.models.hyper_tiramisu import FCDenseNet43 as FCDenseNet
else:
from image.models.hyper_tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'deeplab_resnet':
from image.models.deeplab_resnet import Res_Ms_Deeplab
model = Res_Ms_Deeplab(args.color_channel, args.output_channel)
elif args.model_type == '3d':
if args.model == 'unet':
if args.with_shallow_net:
from volume.models.unet import UNet18 as UNet
else:
from volume.models.unet import UNet28 as UNet
model = UNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet':
print("res_unet is called")
if args.with_shallow_net:
from volume.models.res_unet import ResUNet18 as ResUNet
else:
from volume.models.res_unet import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel)
elif args.model == 'tiramisu':
if args.with_shallow_net:
from volume.models.tiramisu import FCDenseNet43 as FCDenseNet
else:
from volume.models.tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'hyper_tiramisu':
if args.with_shallow_net:
from volume.models.hyper_tiramisu import FCDenseNet43 as FCDenseNet
else:
from volume.models.hyper_tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
# whether use gpu or not
if args.use_gpu:
model = model.cuda()
# whether introduce prior weight into loss function or not
if args.weight:
if args.weight_type is None:
if args.bound_out:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_bound_{}.npy'.format(args.output_channel))).float()
else:
if args.output_channel == 5:
print(os.getcwd())
weight = torch.from_numpy(np.load('../class_weights/class_weight.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_{}.npy'.format(args.output_channel))).float()
if args.mod_outline:
weight[2] = weight[2] + 5.0 # manually modify the weight for outline
elif args.weight_type == 'nlf':
if args.onlyrisk:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_onlyrisk.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_{}.npy'.format(args.output_channel))).float()
elif args.weight_type == 'mfb':
if args.onlyrisk:
weight = torch.from_numpy(np.load('../class_weights/mfb_weight_onlyrisk.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/mfb_weight_all_{}.npy'.format(args.output_channel))).float()
weight = Variable(weight.cuda())
else:
weight = args.weight # weight is None
print("weight: {}".format(weight))
# criterion
if args.criterion == 'nll':
criterion = nn.NLLLoss(weight=weight)
elif args.criterion == 'ce':
criterion = nn.CrossEntropyLoss(weight=weight)
elif args.criterion == 'dice':
criterion = DiceLoss(weight=weight, ignore_index=None, weight_type=args.weight_type, cal_zerogt=args.cal_zerogt)
elif args.criterion == 'gdl_inv_square':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type='inv_square',
alpha=args.alpha)
elif args.criterion == 'gdl_others_one_gt':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type='others_one_gt',
alpha=args.alpha)
elif args.criterion == 'gdl_others_one_pred':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type='others_one_pred',
alpha=args.alpha)
elif args.criterion == 'gdl_none':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type=None,
alpha=args.alpha)
elif args.criterion == 'focal':
criterion = FocalLoss()
elif args.criterion == 'wce':
criterion = WeightedCrossEntropy()
elif args.criterion == 'ceb': # cross entropy bound loss
criterion = CrossEntropyBoundLoss(n_classes=args.output_channel, weight=weight, ignore_index=args.ignore_index,
w0=args.w0, sigma=args.sigma)
# elif args.criterion == 'ahdf':
# criterion = AveragedHausdorffLoss(n_classes=args.output_channel)
# criterion for BC learning
if args.criterion.startswith('gdl'):
args.criterion_bc = criterion
else:
args.criterion_bc = WeightedKLDivLoss(weight=weight)
# Loss Max-Pooling
if args.mpl:
criterion = MaxPoolLoss(criterion)
# optimizer
if args.opt == 'Adam':
if args.model == 'deeplab_resnet':
optimizer = optim.Adam([{'params': get_1x_lr_params_NOscale(model), 'lr': args.lr},
{'params': get_10x_lr_params(model), 'lr': 10 * args.lr}],
lr=args.lr, weight_decay=args.w_decay)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay)
elif args.opt == 'sgd':
if args.model == 'deeplab_resnet':
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.lr},
{'params': get_10x_lr_params(model), 'lr': 10 * args.lr}],
lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
# learning schedule
if args.lr_scheduler == 'StepLR':
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
elif args.lr_scheduler == 'PolyLR':
exp_lr_scheduler = PolyLR(optimizer, max_iter=args.num_train_epochs, power=0.9)
# save args to log file
for arg in vars(args):
print("{} : {}".format(arg, getattr(args, arg)))
# plot samples used for train, val and test respectively
print("Dataset:")
for mode in ['train', 'val', 'test']:
config_file = osp.join('../configs/{}'.format(args.config), mode+'.txt')
print(mode)
with open(config_file, 'r') as reader:
for line in reader.readlines():
print(line.strip('\n'))
since = time.time()
if not args.only_test:
train_model(model, criterion, optimizer, exp_lr_scheduler, args)
# model reference
model_reference(args, sample_stack_rows=50)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60)) | Python |
3D | kkhuang1990/PlaqueDetection | volume/dataloader.py | .py | 16,652 | 360 | # _*_ coding: utf-8 _*_
""" functions used to load images and masks """
import matplotlib as mpl
mpl.use('Agg')
import os
import os.path as osp
from os import listdir
import random
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import time
from skimage import io
from skimage import transform
from .transforms import ToTensor, RandomCrop, GaussianCrop, HU2Gray, CentralCrop, Gray2Binary, Gray2Mask
from .transforms import RandomRotation, RandomFlip, RandomCentralCrop, Rescale
from torchvision import transforms
from vision import sample_stack
class CPRPlaqueTrainDataset(Dataset):
""" dataloader of train and validation dataset.
Patches are randomly extracted within the central part of given volume
"""
def __init__(self, data_dir, metric_prev_epoch = None, phases_prev_epoch = None, transform = None, mode = 'train',
is_hard_mining = False, percentile = 85, multi_view = False, interval=32, down_sample=1, config='config'):
""" read images from img_dir and save them into a list
Args:
data_dir: string, from where to read image
transform: transform, what transforms to operate on input images
interval: int, interval of sub-volume
n_samples_art: int, how many samples to extract per artery
hard_mining: bool, whether use bad mining or not
metric_prev_epoch: numpy ndarray, metric obtained from the previous epoch
phases_prev_epoch: list, phases of the previous epoch
multi_view: whether use multi_view input or not
interval: int, how many slices in one batch volume
down_sample: int, down sampling rate (every how many slices)
"""
super(CPRPlaqueTrainDataset, self).__init__()
self.interval = interval
self.mode = mode
self.data_dir = data_dir
self.transform = transform
self.down_sample = down_sample
self.is_hard_mining = is_hard_mining
self.percentile = percentile
self.multi_view = multi_view # whether to use multi-view inputs or not
self.slice_range = self.interval * self.down_sample
self.config = config
# initialize phases for different modes
if self.mode == 'train':
self.phases = self.update_phases(metric_prev_epoch, phases_prev_epoch)
else:
self.phases = self.get_phases()
def update_phases(self, metric_prev_epoch, phases_prev_epoch):
""" update the phases by mining the bad samples
:return: phases: refined phases after mining the bad samples
"""
if phases_prev_epoch is None:
phases = self.get_phases()
else:
if self.is_hard_mining:
thres = np.percentile(metric_prev_epoch, self.percentile)
phases = [phase for phase, metric in zip(phases_prev_epoch, metric_prev_epoch) if metric <= thres]
else:
phases = phases_prev_epoch
return phases
def __len__(self):
return len(self.phases)
def get_phases(self):
phases = []
with open(osp.join('../configs/{}'.format(self.config), self.mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'ordinate', 'image')
mask_path = osp.join(sample_path, artery, 'ordinate', 'mask')
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
for s_inx in range(start, end + 1 - self.slice_range + self.down_sample):
phases.append((image_path, mask_path, s_inx))
print("{} : {} samples".format(self.mode, len(phases)))
return phases
def __getitem__(self, inx):
sample = self.phases[inx]
image_path, mask_path, rand_inx = sample
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
for a_inx, axis_name in enumerate(axis_names):
image_path_axis = image_path.replace('ordinate', axis_name)
mask_path_axis = mask_path.replace('ordinate', axis_name)
slice_files_axis = [osp.join(image_path_axis, "{:03d}.tiff".format(i))
for i in range(rand_inx, rand_inx + self.slice_range, self.down_sample)]
label_files_axis = [osp.join(mask_path_axis, "{:03d}.tiff".format(i))
for i in range(rand_inx, rand_inx + self.slice_range, self.down_sample)]
image_axis = np.stack([io.imread(slice_file) for slice_file in slice_files_axis])
mask_axis = np.stack([io.imread(label_file) for label_file in label_files_axis])
if axis_name == 'applicate':
new_d, new_h, new_w = image_axis.shape
image = np.zeros((*image_axis.shape, len(axis_names)), dtype=np.int16)
image[:, :, :, a_inx] = image_axis
mask = mask_axis
else:
# if slice size doesn't match with each other, resize them into the same as applicate slice
for s_inx in range(new_d):
slice_axis = image_axis[s_inx]
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
image[s_inx, :, :, a_inx] = slice_axis
# transform 3D image and mask
sample_img, sample_mask = self.transform((image, mask))
return (sample_img, sample_mask)
class CPRPlaqueTestDataset(Dataset):
""" dataloader for test dataset
the whole artery is extracted with given stride along applicate axis
"""
def __init__(self, data_dir, transform = None, mode = 'train', multi_view = False, interval=32, down_sample=1,
config='config'):
""" read images from img_dir and save them into a list """
super(CPRPlaqueTestDataset, self).__init__()
self.interval = interval
self.mode = mode
self.data_dir = data_dir
self.transform = transform
self.down_sample = down_sample
self.multi_view = multi_view # whether to use multi-view inputs or not
self.stride = self.interval * self.down_sample
self.config = config
self.phases = self.get_phases()
def get_phases(self):
phases = []
with open(osp.join('../configs/{}'.format(self.config), self.mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'ordinate', 'image')
mask_path = osp.join(sample_path, artery, 'ordinate', 'mask')
phases.append((image_path, mask_path))
print("{} : {} samples".format(self.mode, len(phases)))
return phases
def __len__(self):
return len(self.phases)
def __getitem__(self, inx):
sample = self.phases[inx]
image_path, mask_path = sample
sample_name = '/'.join(image_path.split('/')[-4:-2])
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
n_sample = len(range(start, end + 2 - self.stride, self.stride)) * self.down_sample
for s_inx in range(start, end + 2 - self.stride, self.stride):
for shift in range(self.down_sample):
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
for a_inx, axis_name in enumerate(axis_names):
image_path_axis = image_path.replace('ordinate', axis_name)
mask_path_axis = mask_path.replace('ordinate', axis_name)
slice_files_axis = [osp.join(image_path_axis, "{:03d}.tiff".format(i))
for i in range(s_inx + shift, s_inx + shift + self.stride, self.down_sample)]
label_files_axis = [osp.join(mask_path_axis, "{:03d}.tiff".format(i))
for i in range(s_inx + shift, s_inx + shift + self.stride, self.down_sample)]
image_axis = np.stack([io.imread(slice_file) for slice_file in slice_files_axis])
mask_axis = np.stack([io.imread(label_file) for label_file in label_files_axis])
if axis_name == 'applicate':
new_d, new_h, new_w = image_axis.shape
image = np.zeros((*image_axis.shape, len(axis_names)), dtype=np.int16)
image[:, :, :, a_inx] = image_axis
mask = mask_axis
else:
# if slice size doesn't match with each other, resize them into the same as applicate slice
for slice_inx in range(new_d):
slice_axis = image_axis[slice_inx]
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
image[slice_inx, :, :, a_inx] = slice_axis
image, mask = self.transform((image, mask))
if s_inx == start and shift == 0:
if isinstance(image, list):
sample_img = [torch.zeros([n_sample, *list(image.size())]).float() for _ in range(len(image))]
else:
sample_img = torch.zeros([n_sample, *list(image.size())]).float()
sample_mask = torch.zeros([n_sample, *list(mask.size())]).long()
if isinstance(image, list):
for i in range(len(image)):
sample_img[i][(s_inx-start) // self.interval + shift] = image[i]
else:
sample_img[(s_inx - start) // self.interval + shift] = image
sample_mask[(s_inx-start) // self.interval + shift] = mask
return (sample_img, sample_mask, sample_name, start)
def read_train_data(data_dir, metric_prev_epoch = None, phases_prev_epoch = None, transform = None, mode = 'train',
is_hard_mining = False, percentile = 85, multi_view = False, interval=32, down_sample=1,
batch_size= 32, num_workers= 12, shuffle=True, config='config'):
""" read data for train/validation """
dataloaders = {}
phases = ['train', 'val', 'test'] if mode=='train' else ['test']
transform['val'] = transform['test']
for phase in phases:
cprplaque = CPRPlaqueTrainDataset(data_dir, metric_prev_epoch, phases_prev_epoch, transform[phase],
phase, is_hard_mining, percentile, multi_view, interval, down_sample, config)
dataloaders[phase] = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size= batch_size)
return dataloaders
def read_plot_data(data_dir, transform, plot_data, multi_view=False, interval=16, down_sample=1,
num_workers= 16, shuffle=False, config='config'):
""" read data for train/validation """
dataloaders = {}
transform['val'] = transform['test']
cprplaque = CPRPlaqueTestDataset(data_dir, transform[plot_data], plot_data, multi_view, interval, down_sample, config)
dataloaders[plot_data] = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size=1)
return dataloaders
def show_dataloader():
""" show each data sample to verify the correctness of dataloader """
since = time.time()
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
# data_dir = "/data/ugui0/antonio-t/CPR_multiview"
# data_dir = "/Users/AlbertHuang/CT_Anomaly_Detection/Plaque_CPR/20180213"
trans_params = {
'central_crop' : 160,
'random_crop' : (64, 64),
'rescale' : (64, 64),
'output_channel' : 5
}
composed = {'train': transforms.Compose([HU2Gray(),
RandomCentralCrop(),
RandomRotation(),
RandomFlip(),
Rescale(trans_params['rescale']),
Gray2Binary() if trans_params['output_channel'] == 2 else Gray2Mask(),
# AddNoise(),
# RandomTranslate(),
ToTensor()]),
'test': transforms.Compose([HU2Gray(),
CentralCrop(160),
Rescale(trans_params['rescale']),
Gray2Binary() if trans_params['output_channel'] == 2 else Gray2Mask(),
ToTensor()])}
dataloaders = read_train_data(data_dir, None, None, composed, 'train', False, 85, True, interval=32,
down_sample=1, batch_size=8, num_workers=8, shuffle=True)
# dataloaders = read_test_data(data_dir, composed, 'test', True, interval=16, down_sample=1, num_workers=8, shuffle=False)
num_pixel = np.zeros(5, dtype=np.uint32)
for inx in range(1):
datasizes = {'train':0, 'val':0}
# datasizes = {'test': 0}
for phase in ['train', 'val']:
# for phase in ['test']:
# des_path = osp.join(des_dir, phase, str(inx))
# if not osp.exists(des_path):
# os.makedirs(des_path)
for i, sample in enumerate(dataloaders[phase]):
image, mask = sample
print("image size: {}".format(image.size()))
print("mask size: {}".format(mask.size()))
if image.size(1) == 1:
image_np = image.squeeze(1).numpy()
else:
image_np = image[:, 0, ::].numpy()
mask_np = mask.numpy()
img_dir = "./data_samples/{}".format(i)
if not osp.exists(img_dir):
os.makedirs(img_dir)
image_name = "./data_samples/{}/image".format(i)
mask_name = "./data_samples/{}/mask".format(i)
sample_stack(image_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=image_name)
sample_stack(mask_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=mask_name)
# image, mask, _, _ = sample
datasizes[phase] += image.size(0)
for i, label in enumerate(mask.numpy()):
for j in range(trans_params['output_channel']):
num_pixel[j] += np.sum(label == j)
# print("Train: {}, Val: {}".format(datasizes['train'], datasizes['val']))
# class_freq = num_pixel / num_pixel.sum()
# class_weight = np.median(class_freq) / class_freq
# print("median frequency balancing: {}".format(class_weight))
# np.save("./class_weight_mfb.npy", class_weight)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
if __name__ == "__main__":
show_dataloader() | Python |
3D | kkhuang1990/PlaqueDetection | volume/transforms.py | .py | 14,554 | 440 | # _*_ coding: utf-8 _*_
""" transforms for 3D volume """
import torch
from skimage import transform
import numpy as np
import random
import warnings
import cv2
from scipy import ndimage
from sklearn.preprocessing import label_binarize
from utils import hu2lut, gray2mask, central_crop, hu2lut, hu2gray
from utils import gray2bound, gray2mask, rgb2mask, gray2triplewithbound
from utils import gray2innerbound, gray2outerbound, gray2innerouterbound
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
class Gray2TripleWithBound(object):
""" convert grayscale value into triple segmentation + bound """
def __init__(self, n_classes=4, width=1): # number of classes after conversion
self.n_classes = n_classes
self.width = width
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
for l_inx, label in enumerate(gray):
mask[l_inx] = gray2triplewithbound(label, self.n_classes, self.width)
return image, mask
class Gray2InnerBound(object):
""" convert mask with grayscale value to inner bound """
def __init__(self, width=1):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
inner_bound = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
inner_bound[l_inx] = gray2innerbound(label, self.width)
return image, inner_bound
class Gray2OuterBound(object):
""" convert mask with grayscale value to inner bound """
def __init__(self, width=1):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
inner_bound = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
inner_bound[l_inx] = gray2outerbound(label, self.width)
return image, inner_bound
class Gray2InnerOuterBound(object):
""" convert mask with grayscale value to inner and outer boundaries
where inner and outer boundaries are treated as different classes
"""
def __init__(self, width=2):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
bounds = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
bounds[l_inx] = gray2innerouterbound(label, self.width)
return image, bounds
class Gray2Bound(object):
""" convert mask with grayscale value to boundary """
def __init__(self, n_classes=3, width=2):
self.width = width # boundary width
self.n_classes = n_classes
def __call__(self, sample):
image, gray = sample
bounds = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
bounds[l_inx] = gray2bound(label, self.n_classes, self.width)
return image, bounds
class RandomRotation(object):
""" random rotation (angle is randomly set as a multiplier of given angle) """
def __init__(self, angle=90, prob=0.8):
self.angle = angle
self.prob = prob
def __call__(self, sample):
image, mask = sample
rand_angle = random.randrange(0, 360, self.angle)
x = random.uniform(0, 1)
if x <= self.prob:
for s_inx, (slice, label) in enumerate(zip(image, mask)):
image[s_inx] = transform.rotate(slice, rand_angle, mode='reflect', preserve_range=True)
mask[s_inx] = transform.rotate(label, rand_angle, mode='reflect', preserve_range=True, order=0)
return (image, mask)
class RandomFlip(object):
""" random horizontal or vertical flip """
def __init__(self, prob=0.8):
self.prob = prob
def __call__(self, sample):
image, mask = sample
x = random.uniform(0, 1)
if x < self.prob:
phase = random.randint(0, 1)
for s_inx, (slice, label) in enumerate(zip(image, mask)):
image[s_inx] = np.flip(slice, phase)
mask[s_inx] = np.flip(label, phase)
return (image, mask)
class Rescale(object):
"""Rescale the image in a sample to a given size with range preserved
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2, "output_size should be a 2-dimensional tuple"
self.output_size = output_size
def __call__(self, sample):
image, mask = sample
h, w = image.shape[1:3]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
if image.ndim == 3:
new_image = np.zeros((len(image), new_h, new_w), dtype=image.dtype)
elif image.ndim == 4:
new_image = np.zeros((len(image), new_h, new_w, image.shape[3]), dtype=image.dtype)
new_mask = np.zeros((len(mask), new_h, new_w), dtype=mask.dtype)
for i, (slice, label) in enumerate(zip(image, mask)):
new_image[i] = transform.resize(slice, (new_h, new_w), mode= 'reflect', preserve_range=True)
new_mask[i] = transform.resize(label, (new_h, new_w), mode= 'reflect', preserve_range=True, order=0)
return new_image, new_mask
class RandomCrop(object):
""" Randomly crop the image
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, mask = sample
h, w = image.shape[1:3]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
# print("random crop position: {} {}".format(top, left))
image = image[:, top:top + new_h, left:left + new_w]
mask = mask[:, top:top + new_h, left:left + new_w]
return image, mask
class RGB2Mask(object):
""" convert 3D rgb annotation to 3D mask
red - (255, 0, 0) : low-density plaque --> 4
black - (0, 0, 0) : background --> 0
orange - (255, 128, 0) : calcification --> 3
white - (255, 255, 255) : Border of the artery (small in healthy patients) --> 2
blue - (0, 0, 255) : inside of the artery --> 1
"""
def __call__(self, sample):
image, rgb = sample
d, h, w = rgb.shape[:3]
mask = np.zeros((d, h, w), dtype=np.uint8)
mask[np.all(rgb == [255, 0, 0], axis=3)] = 4
mask[np.all(rgb == [255, 128, 0], axis=3)] = 3
mask[np.all(rgb == [255, 255, 255], axis=3)] = 2
mask[np.all(rgb == [0, 0, 255], axis=3)] = 1
return image, mask
class Gray2Mask(object):
""" convert gray-scale image to 2D mask
red - 76 : low-density plaque --> 4
black - 0 : background --> 0
orange - 151 : calcification --> 3
white - 255 : Border of the artery (small in healthy patients) --> 2
blue - 29 : inside of the artery --> 1
"""
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray == 76] = 4
mask[gray == 151] = 3
mask[gray == 255] = 2
mask[gray == 29] = 1
return image, mask
class Gray2Binary(object):
""" convert gray-scale image to Binary label mask """
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray != 0] = 1
return image, mask
class HU2Gray(object):
def __init__(self, hu_max=1440.0, hu_min=-1024.0):
self.hu_max = hu_max
self.hu_min = hu_min
self.scale = float(255) / (self.hu_max - self.hu_min)
def __call__(self, sample):
""" convert HU value to gray scale [0, 255]
hu: numpy ndarray, Image of HU value, [H, W]
"""
image, mask = sample
image = (image - self.hu_min) * self.scale
return image, mask
class HU2LUT(object):
def __init__(self, window, level):
self.window = window
self.level = level
def __call__(self, sample):
data, mask = sample
lut = np.piecewise(data, [data <= (self.level - 0.5 - (self.window - 1) / 2),
data > (self.level - 0.5 + (self.window - 1) / 2)],
[0, 255, lambda data: ((data - (self.level - 0.5)) / (self.window - 1) + 0.5) * (255 - 0)])
return lut.astype(np.uint8), mask
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, norm=True):
self.norm = norm
def __call__(self, sample):
image, mask = sample
# swap color axis because (1) numpy image: H x W x C (2) torch image: C X H X W
if image.ndim == 4:
image = image.transpose((3, 0, 1, 2))
elif image.ndim == 3:
image = image[np.newaxis, :, :, :]
image = torch.from_numpy(image).float()
if self.norm:
image = image / 255.0
mask = torch.from_numpy(mask).long()
return image, mask
class HU2GrayMultiStreamToTensor(object):
""" convert HU value to grayscale for different windows + ToTensor """
def __init__(self, w_widths = [500.0, 100.0], w_centers = [250.0, 50.0], norm=True):
self.w_widths = w_widths
self.w_centers = w_centers
self.norm = norm
def __call__(self, sample):
image, mask = sample
if image.ndim == 4:
image = image.transpose((3, 0, 1, 2))
elif image.ndim == 3:
image = image[np.newaxis, :, :, :]
sample_img = []
for w_w, w_c in zip(self.w_widths, self.w_centers):
stream = hu2lut(image, w_w, w_c)
stream = torch.from_numpy(stream).float()
if self.norm:
stream = stream / 255.0
sample_img.append(stream)
sample_mask = torch.from_numpy(mask).long()
return sample_img, sample_mask
class Identical(object):
def __call__(self, sample):
return sample
class CentralCrop(object):
def __init__(self, size):
assert isinstance(size, (int, tuple))
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
""" centre crop the given image
Args:
sample : (image, mask)
size: tuple, new image size
"""
image, mask = sample
h, w = image.shape[1:3]
assert (h - self.size[0]) % 2 == 0 and (w - self.size[1]) % 2 == 0, \
"new image size must match with the input image size"
h_low, w_low = (h - self.size[0]) // 2, (w - self.size[1]) // 2
h_high, w_high = (h + self.size[0]) // 2, (w + self.size[1]) // 2
new_image = image[:, h_low:h_high, w_low:w_high]
new_mask = mask[:, h_low:h_high, w_low:w_high]
return new_image, new_mask
class GaussianCrop(object):
""" crop patches with central pixel position (x, y) obeying Guassian distribution """
def __init__(self, size, sigma=0.1):
assert isinstance(size, (int, tuple))
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.sigma = sigma
def __call__(self, sample):
""" centre crop the given image
Args:
sample : (image, mask)
"""
image, mask = sample
h, w = image.shape[1:3]
p_h, p_w = self.size
c_h = int(random.normalvariate(0.5, self.sigma) * h)
if c_h < p_h // 2:
c_h = p_h // 2
elif c_h > h - p_h // 2:
c_h = h - p_h // 2
c_w = int(random.normalvariate(0.5, self.sigma) * w)
if c_w < p_w // 2 :
c_w = p_w // 2
elif c_w > w - p_w // 2:
c_w = w - p_w // 2
new_image = image[:, c_h - p_h // 2:c_h + p_h // 2, c_w - p_w // 2:c_w + p_w // 2]
new_mask = mask[:, c_h - p_h // 2:c_h + p_h // 2, c_w - p_w // 2:c_w + p_w // 2]
return new_image, new_mask
class RandomCentralCrop(object):
""" randomly central crop with given size options """
def __init__(self, lower_size=160, upper_size=256, step=4):
assert lower_size%2 ==0 and upper_size%2==0, "both lower and upper size should be even number"
self.lower_size = lower_size
self.upper_size = upper_size
self.step = step
def __call__(self, sample):
x = random.randrange(self.lower_size, self.upper_size, self.step)
return CentralCrop(x)(sample)
class AddNoise(object):
""" add Gaussian noise to given sample """
def __init__(self, loc=0.0, scale=1.0, prob=0.5):
self.loc = loc
self.scale = scale
self.prob = prob
def __call__(self, sample):
image, mask = sample
x = random.uniform(0, 1)
if x <= self.prob:
noise = np.random.normal(self.loc, self.scale, image.shape)
image += noise
return image, mask
class RandomTranslate(object):
""" random translate the given image """
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, sample):
image, mask = sample
[H, W] = image.shape[1:3]
x = random.uniform(0, 1)
if x <= self.prob:
right = random.randint(int(-W/4), int(W/4))
down = random.randint(int(H/4), int(H/4))
M = np.float32([[1, 0, right], [0, 1, down]])
for i, (slice, label) in enumerate(zip(image, mask)):
image[i] = cv2.warpAffine(slice, M, (W, H))
mask[i] = cv2.warpAffine(label, M, (W, H))
return image, mask | Python |
3D | kkhuang1990/PlaqueDetection | volume/models/res_unet.py | .py | 5,131 | 144 | # coding = utf-8
""" define the U-Net structure """
import torch
from torch import nn
from .utils import _initialize_weights
def conv_333(in_channels, out_channels, stride=1):
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=True)
class ResBlock(nn.Module):
""" residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm3d(in_channels)
self.conv1 = conv_333(in_channels, out_channels, stride=stride)
self.bn2 = nn.BatchNorm3d(out_channels)
self.conv2 = conv_333(out_channels, out_channels, stride=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout3d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv3d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(out_channels)
)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return out
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
out = self.transconv(x)
out = torch.cat([skip, out], 1)
return out
class ResUNet(nn.Module):
""" UNet class """
def __init__(self, in_channels=1, out_channels=5, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=0.5):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.conv1 = nn.Conv3d(in_channels, self.down_blocks[0], 3, padding=1)
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks):
output_channel = self.down_blocks[b_inx]
if b_inx == 0:
input_channel = self.down_blocks[0]
self.BlocksDown.append(ResBlock(input_channel, output_channel, stride=1, p=p))
else:
input_channel = self.down_blocks[b_inx-1]
self.BlocksDown.append(ResBlock(input_channel, output_channel, stride=2, p=p))
# bottleneck block
self.bottleneck = ResBlock(self.down_blocks[-1], bottleneck, stride=2, p=p)
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ResBlock(input_channel, output_channel, stride=1, p=p))
# final convolution layer
self.fl = nn.Conv3d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights(self)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
# print(out.size())
skip_connections = []
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
# print(out.size())
out = self.bottleneck(out)
# print(out.size())
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
# print(out.size())
output = self.fl(out)
# print(out.size())
return output
def ResUNet28(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)
def ResUNet23(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)
def ResUNet18(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128],
up_blocks = [128, 64, 32], bottleneck = 256, p=p)
if __name__ == "__main__":
in_channels = 1
out_channels = 2
unet = ResUNet28(in_channels, out_channels, p=0.0)
print(unet)
x = torch.FloatTensor(6, 1, 32, 96, 96)
y = unet(x) | Python |
3D | kkhuang1990/PlaqueDetection | volume/models/unet.py | .py | 3,945 | 110 | # _*_ coding: utf-8 _*_
""" 3D U-Net for semantic segmentation """
import torch
from torch import nn
from .utils import _initialize_weights
class ConvBlock(nn.Sequential):
""" Convolution Block """
def __init__(self, in_channels, out_channels):
super().__init__()
self.add_module('conv1', nn.Conv3d(in_channels, out_channels, 3, stride=1, padding=1))
self.add_module('relu1', nn.ReLU(True))
self.add_module('conv2', nn.Conv3d(out_channels, out_channels, 3, stride=1, padding=1))
self.add_module('relu2', nn.ReLU(True))
def forward(self, x):
return super().forward(x)
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
out = self.transconv(x)
out = torch.cat([skip, out], 1)
return out
class UNet(nn.Module):
""" UNet class """
def __init__(self, in_channels=1, out_channels=5, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks):
input_channel = in_channels if b_inx == 0 else self.down_blocks[b_inx-1]
output_channel = self.down_blocks[b_inx]
self.BlocksDown.append(ConvBlock(input_channel, output_channel))
# bottleneck block
self.bottleneck = ConvBlock(self.down_blocks[-1], bottleneck)
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ConvBlock(input_channel, output_channel))
# final convolution layer
self.fl = nn.Conv3d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights(self)
self.maxpool = nn.MaxPool3d(kernel_size=2, stride=2)
def forward(self, x):
out = x
# print(out.size())
skip_connections = []
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
out = self.maxpool(out)
# print(out.size())
out = self.bottleneck(out)
# print(out.size())
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
# print(out.size())
output = self.fl(out)
return output
def UNet28(in_channels, out_channels):
return UNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024)
def UNet23(in_channels, out_channels):
return UNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512)
def UNet18(in_channels, out_channels):
return UNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128],
up_blocks = [128, 64, 32], bottleneck = 256)
# if __name__ == "__main__":
# in_channels = 1
# out_channels = 5
# unet = UNet28(in_channels, out_channels)
# print(unet)
# x = torch.FloatTensor(6, 1, 32, 96, 96)
# y = unet(x) | Python |
3D | kkhuang1990/PlaqueDetection | volume/models/tiramisu.py | .py | 8,943 | 233 | # _*_ coding: utf-8 _*_
import torch
import torch.nn as nn
from .utils import _initialize_weights
class DenseLayer(nn.Sequential):
""" Basic dense layer of DenseNet """
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv3d(in_channels, growth_rate, kernel_size=3,
stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout3d(0.2))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
"""
:param in_channels: int, number of input channels
:param growth_rate: int, growth_rate, the same as output channels in each dense layer
:param n_layers: int, number of layers
:param upsample: bool, whether to do upsampling or not
"""
super().__init__()
self.upsample = upsample
self.layers = nn.ModuleList([DenseLayer(
in_channels + i*growth_rate, growth_rate)
for i in range(n_layers)])
def forward(self, x):
if self.upsample:
new_features = []
# case of up sampling
# the input of a dense block is not concatenated with its output
# to overcome the exploration of feature maps
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
new_features.append(out)
return torch.cat(new_features,1)
else:
# case of down sampling
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1) # 1 = channel axis
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels, theta=1.0):
super().__init__()
self.add_module('norm', nn.BatchNorm3d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
out_channels = int(theta * in_channels)
self.add_module('conv', nn.Conv3d(in_channels, out_channels,
kernel_size=1, stride=1,
padding=0, bias=True))
self.add_module('drop', nn.Dropout3d(0.2))
self.add_module('maxpool', nn.MaxPool3d(2))
def forward(self, x):
return super().forward(x)
class TransitionUp(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.convTrans = nn.ConvTranspose3d(
in_channels=in_channels, out_channels=in_channels,
kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3), skip.size(4))
out = torch.cat([out, skip], 1)
return out
class Bottleneck(nn.Sequential):
def __init__(self, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(
in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
def center_crop(layer, max_depth, max_height, max_width):
# central crop for 3D tensor
_, _, d, h, w = layer.size()
xyz1 = (d - max_depth) // 2
xyz2 = (h - max_height) // 2
xyz3 = (w - max_width) // 2
return layer[:, :, xyz1:(xyz1 + max_depth), xyz2:(xyz2 + max_height), xyz3:(xyz3 + max_width)]
class FCDenseNet(nn.Module):
def __init__(self, in_channels=1, down_blocks=(5,5,5),
up_blocks=(5,5,5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=32, n_classes=5, theta=1.0):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.theta = theta
cur_channels_count = 0
skip_connection_channel_counts = []
## First Convolution ##
self.add_module('firstconv', nn.Conv3d(in_channels=in_channels,
out_channels=out_chans_first_conv, kernel_size=3,
stride=1, padding=1, bias=True))
cur_channels_count = out_chans_first_conv
## Down-sampling path ##
self.denseBlocksDown = nn.ModuleList([])
self.transDownBlocks = nn.ModuleList([])
for i in range(len(down_blocks)):
self.denseBlocksDown.append(
DenseBlock(cur_channels_count, growth_rate, down_blocks[i])) # both the input and output are saved
cur_channels_count += (growth_rate*down_blocks[i])
# update the number of feature maps of skip connection
skip_connection_channel_counts.insert(0,cur_channels_count)
# transition down will not change the number of feature maps
self.transDownBlocks.append(TransitionDown(cur_channels_count, self.theta))
cur_channels_count = int(self.theta * cur_channels_count)
## Bottleneck ##
self.add_module('bottleneck',Bottleneck(cur_channels_count,
growth_rate, bottleneck_layers))
prev_block_channels = growth_rate*bottleneck_layers
cur_channels_count += prev_block_channels
## Up-sampling path ##
self.transUpBlocks = nn.ModuleList([])
self.denseBlocksUp = nn.ModuleList([])
for i in range(len(up_blocks)):
self.transUpBlocks.append(TransitionUp(prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]
if i != len(up_blocks) - 1:
self.denseBlocksUp.append(DenseBlock(
cur_channels_count, growth_rate, up_blocks[i],
upsample=True))
else:
self.denseBlocksUp.append(DenseBlock(
cur_channels_count, growth_rate, up_blocks[-1],
upsample=False))
prev_block_channels = growth_rate*up_blocks[i]
cur_channels_count += growth_rate*up_blocks[-1]
## final convolution ##
self.finalConv = nn.Conv3d(in_channels=cur_channels_count,
out_channels=n_classes, kernel_size=1, stride=1,
padding=0, bias=True)
_initialize_weights(self)
def forward(self, x):
out = self.firstconv(x)
# print(out.size())
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
# print(out.size())
out = self.bottleneck(out)
# print(out.size())
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
# print(out.size())
out = self.finalConv(out)
# print(out.size())
# out = self.softmax(out)
return out
def FCDenseNet36(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4, 4, 4),
up_blocks=(4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet43(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(5, 5, 5),
up_blocks=(5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet52(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4, 5, 7),
up_blocks=(7, 5, 4), bottleneck_layers=12,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
# networks the original paper provided
def FCDenseNet57(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet67(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet103(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4,5,7,10,12),
up_blocks=(12,10,7,5,4), bottleneck_layers=15,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
if __name__ == "__main__":
densenet = FCDenseNet67(in_channel=1, n_classes=5, theta=0.5)
x = torch.FloatTensor(1, 1, 32, 96, 96)
y = densenet(x) | Python |
3D | kkhuang1990/PlaqueDetection | volume/models/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | volume/models/utils.py | .py | 503 | 16 | # _*_ coding: utf-8 _*_
from torch import nn
def count_parameters(model):
""" count number of parameters """
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def _initialize_weights(model):
""" model weight initialization """
for m in model.modules():
if isinstance(m, (nn.Conv3d, nn.ConvTranspose3d)):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_() | Python |
3D | kkhuang1990/PlaqueDetection | volume/models/hyper_tiramisu.py | .py | 11,797 | 305 | # _*_ coding: utf-8 _*_
""" implemented the Fully Convolution HyperDenseNet for semantic segmentation """
import torch
import torch.nn as nn
from .utils import _initialize_weights
class DenseLayer(nn.Sequential):
""" Basic dense layer of DenseNet """
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm3d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv3d(in_channels, growth_rate, kernel_size=3,
stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout3d(0.2))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, n_streams, in_channels, growth_rate, n_layers, upsample=False):
"""
:param n_streams: int, number of parallel streams
:param in_channels: int, number of input channels
:param growth_rate: int, growth_rate, the same as output channels in each dense layer
:param n_layers: int, number of layers
:param upsample: bool, whether to do upsampling or not
:param dense_hyper: bool, whether use dense hyper skip connection or not
"""
super().__init__()
self.upsample = upsample
self.n_layers = n_layers
self.n_streams = n_streams
self.layers = nn.ModuleList([nn.ModuleList([DenseLayer((in_channels + i * growth_rate) * n_streams, growth_rate)
for _ in range(n_streams)]) for i in range(n_layers)])
def forward(self, x):
"""
:param x: list of tensors
:return: list of tensors for each parallel DenseBlock
"""
# combine list of tensors into a single tensor
x_com = torch.cat(x, 1)
# case of up sampling
if self.upsample:
new_features = [[None for _ in range(self.n_layers)] for _ in range(self.n_streams)]
for l_inx in range(self.n_layers):
outs = [None for _ in range(self.n_streams)]
for s_inx in range(self.n_streams):
outs[s_inx] = self.layers[l_inx][s_inx](x_com)
new_features[s_inx][l_inx] = outs[s_inx]
x_com = torch.cat([x_com, *outs], 1)
return [torch.cat(new_feature,1) for new_feature in new_features]
# case of down sampling
else:
for l_inx in range(self.n_layers):
outs = [None for _ in range(self.n_streams)]
for s_inx in range(self.n_streams):
outs[s_inx] = self.layers[l_inx][s_inx](x_com)
x[s_inx] = torch.cat([x[s_inx], outs[s_inx]], 1)
x_com = torch.cat([x_com, *outs], 1)
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels, theta=1.0):
super().__init__()
self.add_module('norm', nn.BatchNorm3d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
out_channels = int(theta * in_channels)
self.add_module('conv', nn.Conv3d(in_channels, out_channels,
kernel_size=1, stride=1,
padding=0, bias=True))
self.add_module('drop', nn.Dropout3d(0.2))
self.add_module('maxpool', nn.MaxPool3d(2))
def forward(self, x):
return super().forward(x)
class TransitionDownBlock(nn.Module):
def __init__(self, n_streams, in_channels, theta):
super().__init__()
self.layers = nn.ModuleList([TransitionDown(in_channels, theta) for _ in range(n_streams)])
def forward(self, x):
outs = []
for stream, layer in zip(x, self.layers):
outs.append(layer(stream))
return outs
class TransitionUp(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.convTrans = nn.ConvTranspose3d(
in_channels=in_channels, out_channels=in_channels,
kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3), skip.size(4))
out = torch.cat([out, skip], 1)
return out
def center_crop(layer, max_depth, max_height, max_width):
# central crop for 3D tensor
_, _, d, h, w = layer.size()
xyz1 = (d - max_depth) // 2
xyz2 = (h - max_height) // 2
xyz3 = (w - max_width) // 2
return layer[:, :, xyz1:(xyz1 + max_depth), xyz2:(xyz2 + max_height), xyz3:(xyz3 + max_width)]
class TransitionUpBlock(nn.Module):
def __init__(self, n_streams, in_channels):
super().__init__()
self.layers = nn.ModuleList([TransitionUp(in_channels) for _ in range(n_streams)])
def forward(self, xs, skips):
outs = []
for x, skip, layer in zip(xs, skips, self.layers):
outs.append(layer(x, skip))
return outs
class BottleneckBlock(nn.Sequential):
def __init__(self, n_streams, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(
n_streams, in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
class FirstConvBlock(nn.Module):
def __init__(self, n_streams, in_channels, out_channels):
super().__init__()
self.layers = nn.ModuleList([nn.Conv3d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=1, padding=1, bias=True)
for _ in range(n_streams)])
def forward(self, x):
outs = []
for stream, layer in zip(x, self.layers):
outs.append(layer(stream))
return outs
class FinalConv(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__()
self.add_module('finalconv', nn.Conv3d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
return super().forward(x)
class FCDenseNet(nn.Module):
def __init__(self, n_streams=2, in_channels=1, down_blocks=(5,5,5),
up_blocks=(5,5,5), bottleneck_layers=5, growth_rate=16,
out_chans_first_conv=32, n_classes=5, theta=1.0):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.theta = theta
cur_channels_count = 0
skip_connection_channel_counts = []
## First Convolution ##
self.firstconv = FirstConvBlock(n_streams, in_channels, out_chans_first_conv)
cur_channels_count = out_chans_first_conv
## Down-sampling path ##
self.denseBlocksDown = nn.ModuleList([])
self.transDownBlocks = nn.ModuleList([])
for i in range(len(down_blocks)):
self.denseBlocksDown.append(
DenseBlock(n_streams, cur_channels_count, growth_rate, down_blocks[i], upsample=False))
cur_channels_count += (growth_rate*down_blocks[i])
skip_connection_channel_counts.insert(0,cur_channels_count)
self.transDownBlocks.append(TransitionDownBlock(n_streams, cur_channels_count, self.theta))
cur_channels_count = int(self.theta * cur_channels_count)
## Bottleneck ##
self.add_module('bottleneck',BottleneckBlock(n_streams, cur_channels_count,
growth_rate, bottleneck_layers))
prev_block_channels = growth_rate*bottleneck_layers
cur_channels_count += prev_block_channels
## Up-sampling path ##
self.transUpBlocks = nn.ModuleList([])
self.denseBlocksUp = nn.ModuleList([])
for i in range(len(up_blocks)):
self.transUpBlocks.append(TransitionUpBlock(n_streams, prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]
if i != len(up_blocks) - 1:
self.denseBlocksUp.append(DenseBlock(n_streams,
cur_channels_count, growth_rate, up_blocks[i],
upsample=True))
else:
self.denseBlocksUp.append(DenseBlock(n_streams,
cur_channels_count, growth_rate, up_blocks[i],
upsample=False))
prev_block_channels = growth_rate*up_blocks[i]
cur_channels_count += prev_block_channels
## final convolution
self.finalconv = FinalConv(n_streams * cur_channels_count, n_classes)
_initialize_weights(self)
# self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
""" Hyper input as a list of tensors """
out = self.firstconv(x)
# print([item.size() for item in out])
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
# print([item.size() for item in out])
out = self.bottleneck(out)
# print([item.size() for item in out])
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
# print([item.size() for item in out])
out = torch.cat(out, 1)
out = self.finalconv(out)
# print(out.size())
# out = self.softmax(out)
return out
def FCDenseNet36(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4, 4, 4),
up_blocks=(4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet43(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(5, 5, 5),
up_blocks=(5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet52(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4, 5, 7),
up_blocks=(7, 5, 4), bottleneck_layers=12,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
# networks the original paper provided
def FCDenseNet57(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet67(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet103(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4,5,7,10,12),
up_blocks=(12,10,7,5,4), bottleneck_layers=15,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
if __name__ == "__main__":
n_streams = 2
in_channel = 1
fcdensenet = FCDenseNet67(n_streams=n_streams, in_channel=in_channel, n_classes=5, theta=1.0)
x = [torch.FloatTensor(1, 1, 32, 96, 96) for _ in range(n_streams)]
y = fcdensenet(x)
# print(y)
| Python |
3D | kkhuang1990/PlaqueDetection | image/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | image/dataloader.py | .py | 26,076 | 560 | # _*_ coding: utf-8 _*_
""" Load data using hard mining, which means only load data whose segmentation accuracy is lower than the threshold
obtained from the previous epoch.
"""
import matplotlib as mpl
mpl.use('Agg')
import random
import os
import os.path as osp
from os import listdir
import numpy as np
import time
import torch
from torch.utils.data import Dataset, DataLoader
from skimage import io
from skimage import transform
from .transforms import CentralCrop, Gray2Mask, ToTensor, HU2Gray, Rescale, Gray2Binary, HU2GrayMultiStreamToTensor
from .transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, RandomFlip
from .transforms import Gray2Triple, Gray2TripleWithBound
from utils import hu2lut
from torchvision import transforms
from vision import sample_stack
from torch.autograd import Variable
torch.manual_seed(42) # for shuffle=True
class CPRPlaqueTrainDataset(Dataset):
def __init__(self, data_dir, metric_prev_epoch=None, phases_prev_epoch=None, transform=None, mode='train',
is_hard_mining=False, percentile=85, multi_view=False, only_plaque=False, config='config',
bc_learning=None, n_classes=5):
""" read images from data_dir and save them into a dataloader
hard mining strategy is used to recursively select 'hard' samples for each epoch
:param data_dir: string, from where to read image
:param transform: transform, what transforms to operate on input images
:param interval: int, interval of sub-volume
:param slice_stride: int, stride for selecting sub-volume
:param hard_mining: bool, whether use bad mining or not
:param metric_prev_epoch: numpy ndarray, metric obtained from the previous epoch
:param phases_prev_epoch: list, phases of the previous epoch
:param only_plaque: bool, whether to only load slices containing plaque or not
:param config: str, data configuration directory
:param n_classes: int, number of classes for annotation
"""
super(CPRPlaqueTrainDataset, self).__init__()
self.data_dir = data_dir
self.transform = transform
self.mode = mode
self.is_hard_mining = is_hard_mining
self.percentile = percentile
self.multi_view = multi_view # whether to use multi-view inputs or not
self.only_plaque = only_plaque
self.config = config
self.bc_learning = bc_learning # whether to use bc_learning or not [None, 'bc', 'bc_plus']
self.n_classes = n_classes
# initialize phases for different modes
if self.mode == 'train':
self.phases = self.update_phases(metric_prev_epoch, phases_prev_epoch)
else:
self.phases = self.get_phases()
def update_phases(self, metric_prev_epoch, phases_prev_epoch, thres_num_smps=300):
""" update the phases by mining the hard samples
:param thres_num_smps: int, when length of phases is blow given threshold, hard mining is not operated any nore
:return phases: refined phases after mining the hard samples
"""
if phases_prev_epoch is None:
if self.only_plaque:
phases = self.get_phases_plaque_samples()
else:
phases = self.get_phases()
else:
if self.is_hard_mining and len(phases_prev_epoch) >= thres_num_smps:
thres = np.percentile(metric_prev_epoch, self.percentile)
phases = [phase for phase, metric in zip(phases_prev_epoch, metric_prev_epoch) if metric <= thres]
else:
phases = phases_prev_epoch
return phases
def get_phases(self):
phases = []
with open(osp.join("../configs/{}".format(self.config), self.mode+'.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
artery_path = osp.join(sample_path, artery)
# since there are less slices along ordinate/abscissa axis, we extract along ordinate axis
image_path = osp.join(artery_path, 'ordinate', 'image')
mask_path = osp.join(artery_path, 'ordinate', 'mask')
# extract slice files
slice_files = sorted([file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
label_files = sorted([file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
for slice_file, label_file in zip(slice_files, label_files):
slice_path = osp.join(image_path, slice_file)
label_path = osp.join(mask_path, label_file)
phases.append((slice_path, label_path))
return phases
# def get_phases_risk_samples(self, ratio=0.1):
# """ choose all samples with non-zero risk plus randomly choose 10% samples with zero risk """
#
# phases = []
# with open(osp.join("./{}".format(self.config), self.mode+'.txt'), 'r') as reader:
# samples = [line.strip('\n') for line in reader.readlines()]
#
# for sample in samples:
# sample_path = osp.join(self.data_dir, sample)
#
# for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
# # since there are less slices along ordinate/abscissa axis, we extract along ordinate axis
# image_path = osp.join(artery_path, 'ordinate', 'image')
# mask_path = osp.join(artery_path, 'ordinate', 'mask')
#
# # extract slice files
# slice_files = sorted([file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
# label_files = sorted([file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
# # extract slice information
#
# risks = np.genfromtxt(osp.join(artery_path, "risk_labels.txt")).astype(np.uint8)
#
# for slice_file, label_file, risk in zip(slice_files, label_files, risks):
# slice_path = osp.join(image_path, slice_file)
# label_path = osp.join(mask_path, label_file)
# # for not 'train' mode, all the samples are loaded
# if self.mode != 'train':
# phases.append((slice_path, label_path))
# # for 'train' mode
# else:
# if risk == 0:
# x = random.uniform(0, 1)
# if x < ratio:
# phases.append((slice_path, label_path))
# else:
# phases.append((slice_path, label_path))
#
# return phases
def get_phases_plaque_samples(self, ratio=0.05):
""" choose all samples with non-zero risk plus randomly choose 10% samples with zero risk """
phases = []
num_noncal, num_cal = 0, 0
with open(osp.join("../configs/{}".format(self.config), self.mode+'.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
artery_path = osp.join(sample_path, artery)
# since there are less slices along ordinate/abscissa axis, we extract along ordinate axis
image_path = osp.join(artery_path, 'ordinate', 'image')
mask_path = osp.join(artery_path, 'ordinate', 'mask')
# extract slice files
slice_files = sorted([file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
label_files = sorted([file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
# extract slice information
noncals = np.genfromtxt(osp.join(artery_path, "non_calcified_plaque_labels.txt")).astype(np.uint8)
cals = np.genfromtxt(osp.join(artery_path, "calcified_plaque_labels.txt")).astype(np.uint8)
for slice_file, label_file, noncal, cal in zip(slice_files, label_files, noncals, cals):
slice_path = osp.join(image_path, slice_file)
label_path = osp.join(mask_path, label_file)
# not in 'train' mode, all the samples are loaded
if self.mode != 'train':
phases.append((slice_path, label_path))
# in 'train' mode, load all abnormal slices and only a ratio of normal slices
else:
if noncal == 0 and cal == 0:
x = random.uniform(0, 1)
if x < ratio:
phases.append((slice_path, label_path))
else:
phases.append((slice_path, label_path))
if noncal != 0:
num_noncal += 1
if cal != 0:
num_cal += 1
print("{} non-cals {} cals/ {} samples".format(num_noncal, num_cal, len(phases)))
return phases
def __len__(self):
return len(self.phases)
def __getitem__(self, inx):
if self.mode=='train' and self.bc_learning is not None: # BC learning
slice_path1, label_path1 = self.phases[random.randint(0, len(self.phases) - 1)]
slice1, label1 = self.read_multiview_sample(slice_path1, label_path1)
slice1, label1 = self.transform((slice1, label1))
slice_path2, label_path2 = self.phases[random.randint(0, len(self.phases) - 1)]
slice2, label2 = self.read_multiview_sample(slice_path2, label_path2)
slice2, label2 = self.transform((slice2, label2))
# Mix two images
r = random.uniform(0, 1)
if self.bc_learning == 'bc_plus':
g1 = slice1.std()
g2 = slice2.std()
p = 1.0 / (1 + g1 / g2 * (1 - r) / r)
sample_img = ((slice1 * p + slice2 * (1 - p)) / np.sqrt(p ** 2 + (1 - p) ** 2)).float()
elif self.bc_learning == 'bc':
sample_img = (slice1 * r + slice2 * (1 - r)).float()
# Mix two labels
encoded_label1 = torch.zeros(self.n_classes, *list(label1.size()))
encoded_label1.scatter_(0, label1.unsqueeze(0), 1)
encoded_label2 = torch.zeros(self.n_classes, *list(label1.size()))
encoded_label2.scatter_(0, label2.unsqueeze(0), 1)
sample_mask = encoded_label1 * r + encoded_label2 * (1 - r)
else:
slice_path, label_path = self.phases[inx]
slice, label = self.read_multiview_sample(slice_path, label_path)
sample_img, sample_mask = self.transform((slice, label))
return (sample_img, sample_mask)
def read_multiview_sample(self, slice_path, label_path):
""" read multiview sample (image and mask) from slice_path and label_path """
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
for a_inx, axis_name in enumerate(axis_names):
slice_path_axis = slice_path.replace('ordinate', axis_name)
label_path_axis = label_path.replace('ordinate', axis_name)
try:
slice_axis = io.imread(slice_path_axis)
except Exception as e:
print("{} error happened in {}".format(e, slice_path))
slice_axis = np.zeros((512, 512), dtype=np.int16)
try:
label_axis = io.imread(label_path_axis)
except Exception as e:
print("{} error happened in {}".format(e, label_path))
label_axis = np.zeros((512, 512), dtype=np.uint8)
if axis_name == 'applicate':
new_h, new_w = slice_axis.shape
slice = np.zeros((*slice_axis.shape, len(axis_names)), dtype=np.int16)
label = label_axis
else:
# if slice size doesn't match with each other, resize them into the same as applicate slice
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
slice[:, :, a_inx] = slice_axis
return slice, label
class CPRPlaquePlotDataset(Dataset):
def __init__(self, data_dir, transform=None, mode='train', multi_view=False, config='config'):
""" dataloader for plotting the test results """
super(CPRPlaquePlotDataset, self).__init__()
self.data_dir = data_dir
self.transform = transform
self.mode = mode
self.multi_view = multi_view # whether to use multi-view inputs or not
self.config = config
self.phases = self.get_phases()
def get_phases(self):
phases = []
with open(osp.join("../configs/{}".format(self.config), self.mode+'.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
artery_path = osp.join(sample_path, artery)
# since there are less slices along ordinate/abscissa axis, we extract along ordinate axis
image_path = osp.join(artery_path, 'ordinate', 'image')
mask_path = osp.join(artery_path, 'ordinate', 'mask')
phases.append((image_path, mask_path))
return phases
def __len__(self):
return len(self.phases)
def __getitem__(self, inx):
image_path, mask_path = self.phases[inx]
sample_name = '/'.join(image_path.split('/')[-4:-2])
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
for s_inx, slice_file in enumerate(slice_files):
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
for a_inx, axis_name in enumerate(axis_names):
slice_path_axis = osp.join(image_path.replace('ordinate', axis_name), slice_file)
label_path_axis = osp.join(mask_path.replace('ordinate', axis_name), slice_file)
slice_axis = io.imread(slice_path_axis)
label_axis = io.imread(label_path_axis)
if axis_name == 'applicate':
new_h, new_w = slice_axis.shape
slice = np.zeros((*slice_axis.shape, len(axis_names)), dtype=np.int16)
label = label_axis
else:
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
slice[:, :, a_inx] = slice_axis
slice, label = self.transform((slice, label))
if s_inx == 0:
# for Hyper DenseNet input (this part can be omitted cause Hyper DenseNet doesn't work well)
if isinstance(slice, list):
sample_img = [torch.zeros((len(slice_files), *list(slice[0].size())), dtype=slice[0].dtype)
for _ in range(len(slice))]
else:
sample_img = torch.zeros((len(slice_files), *list(slice.size())), dtype=slice.dtype)
sample_mask = torch.zeros((len(slice_files), *list(label.size())), dtype=label.dtype)
if isinstance(slice, list):
for i in range(len(slice)):
sample_img[i][s_inx] = slice[i]
else:
sample_img[s_inx] = slice
sample_mask[s_inx] = label
return (sample_img, sample_mask, sample_name, start)
def dataloader(data_dir, transform, mode, metric_prev_epoch=None, phases_prev_epoch=None, shuffle=True,
is_hard_mining=False, num_workders=8, batch_size= 128, percentile=85, multi_view=False, only_plaque=False,
config='config', bc_learning=None):
cprplaque_data = CPRPlaqueTrainDataset(data_dir, metric_prev_epoch, phases_prev_epoch, transform, mode, is_hard_mining,
percentile, multi_view, only_plaque, config, bc_learning)
dataloader = DataLoader(dataset=cprplaque_data, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workders, worker_init_fn=lambda x: random.seed(x))
return dataloader
def read_train_data(data_dir, transform, mode='train', metric_prev_epoch=None, phases_prev_epoch=None, shuffle=True,
is_hard_mining=False, num_workers= 8, batch_size=128, percentile=85, multi_view=False, onlyrisk=False,
config='config', bc_learning=None):
""" read images and masks into DataLoader object in either train or test mode """
dataloaders = {}
phases = ['train', 'val', 'test'] if mode=='train' else ['test']
transform['val'] = transform['test']
for phase in phases:
dataloaders[phase] = dataloader(data_dir, transform[phase], phase, metric_prev_epoch, phases_prev_epoch,
shuffle, is_hard_mining, num_workers, batch_size, percentile, multi_view, onlyrisk, config, bc_learning)
return dataloaders
def read_plot_data(data_dir, transform, plot_data, shuffle=False, num_workers= 16, batch_size=128, multi_view=False,
config='config'):
""" read data for test and plot """
dataloaders = {}
transform['val'] = transform['test']
cprplaque = CPRPlaquePlotDataset(data_dir, transform[plot_data], plot_data, multi_view, config)
dataloaders[plot_data] = DataLoader(dataset=cprplaque, shuffle=shuffle, num_workers=num_workers,
batch_size=1, worker_init_fn=lambda x: random.seed(x))
return dataloaders
def show_train_dataloader():
start = time.time()
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
sample_dir = "./data_samples"
bound_weight_dir = "./bound_weight"
trans_params = {
'rescale' : 96,
'central_crop' : 160,
'output_channel' : 3,
'mode' : 'train',
'num_workers' : 16,
'batch_size' : 64,
'bc_learning' : None,
'do_plot' : True,
'onlyrisk' : False
}
if trans_params['output_channel'] == 2:
ToMask = Gray2Binary()
elif trans_params['output_channel'] == 3:
ToMask = Gray2Triple()
elif trans_params['output_channel'] == 4:
ToMask = Gray2TripleWithBound(n_classes=4)
elif trans_params['output_channel'] == 5:
ToMask = Gray2TripleWithBound(n_classes=5)
composed = {'train': transforms.Compose([CentralCrop(trans_params['central_crop']),
Rescale(trans_params['rescale']),
ToMask,
ToTensor()]),
'test': transforms.Compose([CentralCrop(trans_params['central_crop']),
Rescale(trans_params['rescale']),
ToMask,
ToTensor()])}
dataloaders = read_train_data(data_dir, composed, trans_params['mode'], None, None, False, False,
trans_params['num_workers'], trans_params['batch_size'], 85, False, trans_params['onlyrisk'], 'config', trans_params['bc_learning'])
phases = ['train'] if trans_params['mode'] == 'train' else ['test']
num_pixel = np.zeros(trans_params['output_channel'], dtype=np.uint32)
# scan dataloader and calculate weight for each class
for phase in phases:
fig_phase = phase if trans_params['bc_learning'] is None else phase+'_bc'
fig_dir = osp.join(sample_dir, fig_phase)
bound_fig_dir = osp.join(bound_weight_dir, phase)
if not osp.exists(bound_fig_dir):
os.makedirs(bound_fig_dir)
if not osp.exists(fig_dir):
os.makedirs(fig_dir)
for i, sample in enumerate(dataloaders[phase]):
image, mask = sample
print("image size: {}".format(image.shape))
print("mask size: {}".format(mask.shape))
if image.size(1) == 1:
image = torch.squeeze(image, dim=1).numpy()
else:
image = image[:,0].numpy()
mask_var = Variable(mask.cuda())
weights = bound_weight(mask_var, w0=100.0, sigma=1.0, n_classes=trans_params['output_channel']).cpu().numpy()
fig_name = bound_fig_dir + "/" + "{}".format(i)
sample_stack(weights, rows=10, cols=10, start_with=0, show_every=1, scale=4, fig_name = fig_name)
mask = mask.numpy() # [N, H, W]
# for plot data samples
if trans_params['do_plot']:
if trans_params['bc_learning'] is None or phase != 'train':
data = np.concatenate([np.stack((input, label)) for (input, label) in zip(image, mask)], axis=0)
fig_name = fig_dir + "/" + "{}".format(i)
sample_stack(data, rows=100, cols=2, start_with=0, show_every=1, fig_name=fig_name)
else:
data = np.concatenate([np.stack((input, *label)) for (input, label) in zip(image, mask)], axis=0)
fig_name = fig_dir + "/" + "{}".format(i)
sample_stack(data, rows=100, cols=1+trans_params['output_channel'], start_with=0, show_every=1, fig_name=fig_name)
for i, label in enumerate(mask):
for j in range(trans_params['output_channel']):
num_pixel[j] += np.sum(label == j)
print("# of pixels for each class: {}".format(num_pixel))
# nlf_weight = -1.0 * np.log(num_pixel / float(num_pixel.sum()))
# print("negative log frequency weight: {}".format(nlf_weight))
# if trans_params['onlyrisk']:
# np.save("./nlf_weight_onlyrisk.npy", nlf_weight)
# else:
# np.save("./nlf_weight_all_{}.npy".format(trans_params['output_channel']), nlf_weight)
#
# class_freq = num_pixel / num_pixel.sum()
# mfb_weight = np.median(class_freq) / class_freq
# print("median frequency balancing weight: {}".format(mfb_weight))
# if trans_params['onlyrisk']:
# np.save("./mfb_weight_onlyrisk.npy", mfb_weight)
# else:
# np.save("./mfb_weight_all_{}.npy".format(trans_params['output_channel']), mfb_weight)
def cal_mean_std_dataloader():
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
trans_params = {
'rescale' : 96,
'central_crop' : 160,
'output_channel' : 5,
'mode' : 'train',
'num_workers' : 16,
'batch_size' : 64,
'bc_learning' : None,
'do_plot' : False,
'onlyrisk' : False
}
if trans_params['output_channel'] == 2:
ToMask = Gray2Binary()
elif trans_params['output_channel'] == 3:
ToMask = Gray2Triple()
elif trans_params['output_channel'] == 4:
ToMask = Gray2TripleWithBound(n_classes=4)
elif trans_params['output_channel'] == 5:
ToMask = Gray2TripleWithBound(n_classes=5)
composed = {'train': transforms.Compose([RandomRotation(),
RandomFlip(),
RandomCentralCrop(),
Rescale(trans_params['rescale']),
ToMask,
ToTensor(norm=False)]),
'test': transforms.Compose([CentralCrop(trans_params['central_crop']),
Rescale(trans_params['rescale']),
ToMask,
ToTensor(norm=False)])}
dataloaders = read_train_data(data_dir, composed, trans_params['mode'], None, None, True, False,
trans_params['num_workers'], trans_params['batch_size'], 85, False, trans_params['onlyrisk'], 'config', trans_params['bc_learning'])
phases = ['train'] if trans_params['mode'] == 'train' else ['test']
images = []
# scan dataloader and calculate weight for each class
for phase in phases:
for i, sample in enumerate(dataloaders[phase]):
image, mask = sample
print("image size: {}".format(image.shape))
print("mask size: {}".format(mask.shape))
if image.size(1) == 1:
image = torch.squeeze(image, dim=1).numpy()
else:
image = image[:,0].numpy()
print("max HU: {}, min HU: {}, mean HU: {}".format(image.max(), image.min(), image.mean()))
images.append(image)
images = np.concatenate(images, axis=0)
img_mean = images.mean()
img_std = images.std()
print("Mean of all pixels: {}".format(img_mean))
print("Std of all pixels: {}".format(img_std))
if __name__ == "__main__":
cal_mean_std_dataloader()
# plaque_statistic_multi_preocess(num_workers=18) | Python |
3D | kkhuang1990/PlaqueDetection | image/transforms.py | .py | 13,836 | 447 | # _*_ coding: utf-8 _*_
""" different types of transforms """
from skimage import transform
import torch
import random
import warnings
import cv2
from os import listdir
import os
import os.path as osp
from skimage.transform import rotate
from skimage import io
import numpy as np
import shutil
from utils import hu2lut, gray2mask, central_crop, hu2lut, hu2gray
from utils import gray2innerouterbound, gray2bound, gray2mask, rgb2mask, gray2triplewithbound
from utils import gray2innerbound, gray2outerbound
from scipy import ndimage
from sklearn.preprocessing import label_binarize
from torchvision.transforms import ToTensor
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
class RandomRotation(object):
""" random rotation (angle is randomly set as a multiplier of given angle) """
def __init__(self, angle=90, prob=0.8):
self.angle = angle
self.prob = prob
def __call__(self, sample):
image, mask = sample
rand_angle = random.randrange(0, 360, self.angle)
x = random.uniform(0, 1)
if x <= self.prob:
image = rotate(image, rand_angle, mode='reflect', preserve_range=True)
mask = rotate(mask, rand_angle, mode='reflect', preserve_range=True, order=0)
return (image, mask)
class RandomFlip(object):
""" random horizontal or vertical flip """
def __init__(self, prob=0.8):
self.prob = prob
def __call__(self, sample):
image, mask = sample
x = random.uniform(0, 1)
if x < self.prob:
phase = random.randint(0, 1)
image = np.flip(image, phase)
mask = np.flip(mask, phase)
return (image, mask)
class Normalize(object):
def __init__(self, m=-4.676876544952393, v=182.7666473388672):
self.m = m
self.v = v
def __call__(self, sample):
image, mask = sample
image = (image - self.m) / self.v
return (image, mask)
class Rescale(object):
"""Rescale the image in a sample to a given size with range preserved
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, mask = sample
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h // w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w // h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = transform.resize(image, (new_h, new_w), mode= 'reflect', preserve_range=True)
mask = transform.resize(mask, (new_h, new_w), mode= 'reflect', preserve_range=True, order=0)
return image, mask
class RandomCrop(object):
""" Randomly crop the image
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, mask = sample
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
mask = mask[top: top + new_h, left: left + new_w]
return image, mask
class RGB2Mask(object):
""" convert rgb image to 2D mask """
def __call__(self, sample):
image, rgb = sample
mask = rgb2mask(rgb)
return image, mask
class Gray2Mask(object):
""" convert gray-scale image to 2D mask """
def __call__(self, sample):
image, gray = sample
mask = gray2mask(gray)
return image, mask
class Intercept(object):
def __init__(self, intercept=-450):
self.intercept = intercept
def __call__(self, sample):
hu, gray = sample
h, w = gray.shape[:2]
hu[hu<=self.intercept] = np.random.randint(self.intercept, -200, (h, w), dtype=hu.dtype)[hu<=self.intercept]
return hu, gray
class Gray2TripleWithBound(object):
""" convert grayscale value into triple segmentation + bound """
def __init__(self, n_classes=4, width=1): # number of classes after conversion
self.n_classes = n_classes
self.width = width
def __call__(self, sample):
image, gray = sample
mask = gray2triplewithbound(gray, self.n_classes, self.width)
return image, mask
class Gray2Bound(object):
""" convert mask with grayscale value to boundary """
def __init__(self, n_classes=3, width=2):
self.width = width # boundary width
self.n_classes = n_classes
def __call__(self, sample):
image, gray = sample
bound = gray2bound(gray, self.n_classes, self.width)
return image, bound
class Gray2InnerOuterBound(object):
""" convert mask with grayscale value to inner and outer boundaries
where inner and outer boundaries are treated as different classes
"""
def __init__(self, width=2):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
bound = gray2innerouterbound(gray, self.width)
return image, bound
class Gray2InnerBound(object):
""" convert mask with grayscale value to inner bound
this is to test whether the WHD (weighted Hausdorff Distance) loss works well or not
"""
def __init__(self, width=1):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
inner_bound = gray2innerbound(gray, self.width)
return image, inner_bound
class Gray2OuterBound(object):
""" convert mask with grayscale value to outer bound
this is to test whether the WHD (weighted Hausdorff Distance) loss works well or not
"""
def __init__(self, width=1):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
outer_bound = gray2outerbound(gray, self.width)
return image, outer_bound
class Gray2Triple(object):
""" convert gray-scale image to mask which only marks central part, outline and background """
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray == 76] = 2
mask[gray == 151] = 2
mask[gray == 255] = 2
mask[gray == 29] = 1
return image, mask
class Mask2Gray(object):
""" convert 2D image to gray-scale image
red - 76 : low-density plaque --> 4
black - 0 : background --> 0
orange - 151 : calcification --> 3
white - 255 : Border of the artery (small in healthy patients) --> 2
blue - 29 : inside of the artery --> 1
"""
def __call__(self, sample):
image, mask = sample
h, w = image.shape[:2]
gray = np.zeros((h, w), dtype=np.uint8)
gray[mask == 4] = 76
gray[mask == 3] = 151
gray[mask == 2] = 255
gray[mask == 1] = 29
return image, gray
class Gray2Binary(object):
""" convert gray-scale annotation to binary mask """
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray == 151] = 1
mask[gray == 76] = 1
return image, mask
class HU2Gray(object):
def __init__(self, hu_max=1640.0, hu_min=-1024.0):
self.hu_max = hu_max
self.hu_min = hu_min
self.scale = float(255) / (self.hu_max - self.hu_min)
def __call__(self, sample):
""" convert HU value to gray scale [0, 255] """
image, mask = sample
image = hu2gray(image, self.hu_max, self.hu_min)
return image, mask
class HU2LUT(object):
def __init__(self, window, level):
self.window = window
self.level = level
def __call__(self, sample):
image, mask = sample
new_image = hu2lut(image, self.window, self.level)
return new_image, mask
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, norm=True):
self.norm = norm
def __call__(self, sample):
image, mask = sample
# swap color axis because (1) numpy image: H x W x C (2) torch image: C X H X W
if image.ndim == 3:
image = image.transpose((2, 0, 1))
elif image.ndim == 2:
image = image[np.newaxis, :, :]
image = torch.from_numpy(image).float()
if self.norm:
image = image / 255.0
mask = torch.from_numpy(mask).long()
return image, mask
class HU2GrayMultiStreamToTensor(object):
""" convert HU value to grayscale for different windows + ToTensor """
def __init__(self, w_widths = [500.0, 100.0], w_centers = [250.0, 50.0], norm=True):
self.w_widths = w_widths
self.w_centers = w_centers
self.norm = norm
def __call__(self, sample):
image, mask = sample
if image.ndim == 3:
image = image.transpose((2, 0, 1))
elif image.ndim == 2:
image = image[np.newaxis, :, :]
sample_img = []
for w_w, w_c in zip(self.w_widths, self.w_centers):
stream = hu2lut(image, w_w, w_c)
stream = torch.from_numpy(stream).float()
if self.norm:
stream = stream / 255.0
sample_img.append(stream)
sample_mask = torch.from_numpy(mask).long()
return sample_img, sample_mask
class Identical(object):
def __call__(self, sample):
return sample
class GaussianCrop(object):
""" crop patches with central pixel position (x, y) obeying Guassian distribution """
def __init__(self, size, sigma=0.1):
assert isinstance(size, (int, tuple))
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.sigma = sigma
def __call__(self, sample):
""" centre crop the given image
Args:
sample : (image, mask)
"""
image, mask = sample
h, w = image.shape[:2]
p_h, p_w = self.size
c_h = int(random.normalvariate(0.5, self.sigma) * h)
if c_h < p_h // 2:
c_h = p_h // 2
elif c_h > h - p_h // 2:
c_h = h - p_h // 2
c_w = int(random.normalvariate(0.5, self.sigma) * w)
if c_w < p_w // 2 :
c_w = p_w // 2
elif c_w > w - p_w // 2:
c_w = w - p_w // 2
new_image = image[c_h - p_h // 2:c_h + p_h // 2, c_w - p_w // 2:c_w + p_w // 2]
new_mask = mask[c_h - p_h // 2:c_h + p_h // 2, c_w - p_w // 2:c_w + p_w // 2]
return new_image, new_mask
class CentralCrop(object):
""" central crop """
def __init__(self, size):
self.size = size
def __call__(self, sample):
""" centre crop the given image
:param sample: (image, mask)
:param size: int or tuple, new image size
"""
image, mask = sample
new_image = central_crop(image, self.size)
new_mask = central_crop(mask, self.size)
return new_image, new_mask
class RandomCentralCrop(object):
""" randomly central crop with given size options """
def __init__(self, lower_size=192, upper_size=256, step=4):
assert lower_size%2 ==0 and upper_size%2==0, "both lower and upper size should be even number"
self.lower_size = lower_size
self.upper_size = upper_size
self.step = step
def __call__(self, sample):
x = random.randrange(self.lower_size, self.upper_size, self.step)
return CentralCrop(x)(sample)
class AddNoise(object):
""" add Gaussian noise to given sample """
def __init__(self, loc=0.0, scale=1.0, prob=0.5):
self.loc = loc
self.scale = scale
self.prob = prob
def __call__(self, sample):
image, mask = sample
x = random.uniform(0, 1)
if x <= self.prob:
noise = np.random.normal(self.loc, self.scale, image.shape)
image += noise
return image, mask
class RandomTranslate(object):
""" random translate the given image """
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, sample):
image, mask = sample
[H, W] = image.shape[:2]
x = random.uniform(0, 1)
if x <= self.prob:
right = random.randint(int(-W/4), int(W/4))
down = random.randint(int(H/4), int(H/4))
M = np.float32([[1, 0, right], [0, 1, down]])
image = cv2.warpAffine(image, M, (W, H))
mask = cv2.warpAffine(mask, M, (W, H))
return image, mask
# composed = transforms.Compose([CentralCrop((128, 128)),
# Rescale((388, 388)),
# MirrorPadding(((92, 92), (92, 92))),
# Gray2Mask(),
# ToTensor()])
#
# if __name__ == "__main__":
# x = np.random.normal(0, 0.5, (100, 100))
# plt.figure(1)
# plt.imshow(x)
#
# y, _ = RandomTranslate()((x, x))
# plt.figure(2)
# plt.imshow(y)
#
# plt.show() | Python |
3D | kkhuang1990/PlaqueDetection | image/models/deeplab_resnet.py | .py | 13,971 | 386 | # _*_ coding: utf-8 _*_
""" implement DeepLab v2 in pytorch """
import torch.nn as nn
import torch
import numpy as np
affine_par = True
import torch.nn.functional as F
from torchvision.models import ResNet
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def get_1x_lr_params_NOscale(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(model.Scale.conv1)
b.append(model.Scale.bn1)
b.append(model.Scale.layer1)
b.append(model.Scale.layer2)
b.append(model.Scale.layer3)
b.append(model.Scale.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
b.append(model.Scale.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def adjust_learning_rate(optimizer, lr_scheduler):
""" only for DeepLab model """
lr = lr_scheduler.get_lr()[0]
optimizer.param_groups[1]['lr'] = lr * 10
def cal_loss(output, target, loss_name, criterion):
""" calculate loss for DeepLab resnet with multiple outputs
:param output: list of Float Variable, model output
:param target: Long Variable, ground truth label
:param criterion: loss function, criterion used for evaluation
:param loss_name: str, loss function name
:return: loss: float tensor, final loss
"""
if loss_name == 'nll':
output = [F.log_softmax(out, dim=1) for out in output]
if len(target.size()) == 3:
h, w = target.size()[1:]
elif len(target.size()) == 4:
h, w = target.size()[2:]
interpo_out = nn.Upsample(size=(h, w), mode='bilinear') # interpolation for output
loss = 0.0
for out in output:
tmp = criterion(interpo_out(out), target)
loss += tmp
loss = loss / len(output)
return loss
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
dow1 = self.conv1(x)
dow1 = self.relu(dow1)
seg = self.conv2(dow1)
inc1 = self.conv3(seg)
add1 = dow1 + self.relu(inc1)
inc2 = self.conv4(add1)
out = x + self.relu(inc2)
return out, seg
class Residual_Refinement_Module(nn.Module):
def __init__(self, num_classes):
super(Residual_Refinement_Module, self).__init__()
self.RC1 = Residual_Covolution(2048, 512, num_classes)
self.RC2 = Residual_Covolution(2048, 512, num_classes)
def forward(self, x):
x, seg1 = self.RC1(x)
_, seg2 = self.RC2(x)
return [seg1, seg1+seg2]
class ResNet_Refine(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet_Refine, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = Residual_Refinement_Module(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
class MS_Deeplab(nn.Module):
def __init__(self,block,num_classes):
super(MS_Deeplab,self).__init__()
self.Scale = ResNet(block,[3, 4, 23, 3],num_classes) #changed to fix #4
def forward(self,x):
output = self.Scale(x) # for original scale
output_size = output.size()[2]
input_size = x.size()[2]
self.interp1 = nn.Upsample(size=(int(input_size*0.75)+1, int(input_size*0.75)+1), mode='bilinear')
self.interp2 = nn.Upsample(size=(int(input_size*0.5)+1, int(input_size*0.5)+1), mode='bilinear')
self.interp3 = nn.Upsample(size=(output_size, output_size), mode='bilinear')
x75 = self.interp1(x)
output75 = self.interp3(self.Scale(x75)) # for 0.75x scale
x5 = self.interp2(x)
output5 = self.interp3(self.Scale(x5)) # for 0.5x scale
out_max = torch.max(torch.max(output, output75), output5)
return [output, output75, output5, out_max]
def Res_Ms_Deeplab(input_channels=1, output_channels=5, pretrain=True):
model = MS_Deeplab(Bottleneck,21)
if pretrain: # load pretrained model
model.load_state_dict(torch.load("./MS_DeepLab_resnet_trained_VOC.pth",
map_location=lambda storage, loc: storage))
if output_channels != 21: # different # of input channels
model.Scale.layer5 = ResNet(Bottleneck, [3, 4, 23, 3], output_channels).layer5
if input_channels != 3: # different # of input channels
model.Scale.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
return model
# def Res_Ms_Deeplab(num_classes=21):
# model = MS_Deeplab(Bottleneck, num_classes)
# return model
def Res_Deeplab(num_classes=21, is_refine=False):
if is_refine:
model = ResNet_Refine(Bottleneck,[3, 4, 23, 3], num_classes)
else:
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model | Python |
3D | kkhuang1990/PlaqueDetection | image/models/res_unet.py | .py | 4,967 | 146 | # coding = utf-8
""" define the U-Net structure """
import torch
from torch import nn
from .utils import _initialize_weights
import torch.nn.functional as F
def conv_33(in_channels, out_channels, stride=1):
# since BN is used, bias is not necessary
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResBlock(nn.Module):
""" residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = conv_33(in_channels, out_channels, stride=stride)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv2 = conv_33(out_channels, out_channels, stride=1)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return out
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
out = self.transconv(x)
out = torch.cat([skip, out], 1)
return out
class ResUNet(nn.Module):
""" ResUNet class """
def __init__(self, in_channels=1, out_channels=5, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=0.5):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.conv1 = nn.Conv2d(in_channels, self.down_blocks[0], 3, padding=1)
self.bn1 = nn.BatchNorm2d(self.down_blocks[0])
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks[1:]):
b_inx += 1
output_channel = self.down_blocks[b_inx]
input_channel = self.down_blocks[b_inx-1]
self.BlocksDown.append(ResBlock(input_channel, output_channel, stride=2, p=p))
# bottleneck block
self.bottleneck = ResBlock(self.down_blocks[-1], bottleneck, stride=2, p=p)
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ResBlock(input_channel, output_channel, stride=1, p=p))
# final convolution layer
self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights(self)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# print(out.size())
skip_connections = []
skip_connections.append(out)
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
# print(out.size())
out = self.bottleneck(out)
# print(out.size())
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
# print(out.size())
output = self.fl(out)
return output
def ResUNet28(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)
def ResUNet23(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)
def ResUNet18(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128],
up_blocks = [128, 64, 32], bottleneck = 256, p=p)
def resunet_debug():
ResUNet = ResUNet28(1, 5)
x = torch.rand(4, 1, 96, 96)
y = ResUNet(x)
| Python |
3D | kkhuang1990/PlaqueDetection | image/models/unet.py | .py | 3,607 | 99 | # coding = utf-8
""" define the U-Net structure """
import torch
from torch import nn
from .utils import _initialize_weights
class ConvBlock(nn.Sequential):
""" Convolution Block """
def __init__(self, in_channels, out_channels):
super().__init__()
self.add_module('conv1', nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1))
self.add_module('relu1', nn.ReLU(True))
self.add_module('conv2', nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1))
self.add_module('relu2', nn.ReLU(True))
def forward(self, x):
return super().forward(x)
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
out = self.transconv(x)
out = torch.cat([skip, out], 1)
return out
class UNet(nn.Module):
""" UNet class """
def __init__(self, in_channels=1, out_channels=5, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks):
input_channel = in_channels if b_inx == 0 else self.down_blocks[b_inx-1]
output_channel = self.down_blocks[b_inx]
self.BlocksDown.append(ConvBlock(input_channel, output_channel))
# bottleneck block
self.bottleneck = ConvBlock(self.down_blocks[-1], bottleneck)
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ConvBlock(input_channel, output_channel))
# final convolution layer
self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights(self)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
out = x
skip_connections = []
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
out = self.maxpool(out)
out = self.bottleneck(out)
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
output = self.fl(out)
return output
def UNet28(in_channels, out_channels):
return UNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024)
def UNet23(in_channels, out_channels):
return UNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512)
def UNet18(in_channels, out_channels):
return UNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128],
up_blocks = [128, 64, 32], bottleneck = 256) | Python |
3D | kkhuang1990/PlaqueDetection | image/models/wnet.py | .py | 3,624 | 89 | # coding = utf-8
""" define the U-Net structure """
import torch
from torch import nn
from .utils import _initialize_weights
import torch.nn.functional as F
class WNet(nn.Module):
""" define W-Net for help segmentation with boundary detection first """
def __init__(self, in_channel, inter_channel, out_channel, bound_net, pre_train_bound_path,
seg_net='res_unet_dp', pretrain=False):
super(WNet, self).__init__()
# set plaque segmentation network structure
if bound_net == 'unet':
from .unet import UNet28 as UNet
self.BoundNet = UNet(in_channel, inter_channel)
elif bound_net == 'res_unet':
from .res_unet import ResUNet28 as ResUNet
self.BoundNet = ResUNet(in_channel, inter_channel)
elif bound_net == 'res_unet_dp':
from .res_unet_dp import ResUNet28 as ResUNet
self.BoundNet = ResUNet(in_channel, inter_channel, p=0.0)
elif bound_net == 'tiramisu':
from .tiramisu import FCDenseNet67 as FCDenseNet
self.BoundNet = FCDenseNet(in_channel, inter_channel, theta=1.0)
elif bound_net == 'hyper_tiramisu':
from .hyper_tiramisu import FCDenseNet67 as FCDenseNet
self.BoundNet = FCDenseNet(in_channel, inter_channel, theta=1.0)
elif bound_net == 'deeplab_resnet':
from .deeplab_resnet import Res_Ms_Deeplab
self.BoundNet = Res_Ms_Deeplab(in_channel, inter_channel)
self.BoundNet.load_state_dict(torch.load("{}/model.pth".format(pre_train_bound_path),
map_location=lambda storage, loc: storage))
self.BoundNet = torch.load("{}/model.pth".format(pre_train_bound_path),
map_location=lambda storage, loc: storage)
# set plaque segmentation network structure
if pretrain:
pass # define the pre_train_seg_path here
else:
if seg_net == 'unet':
from .unet import UNet28 as UNet
self.SegNet = UNet(inter_channel, out_channel)
elif seg_net == 'res_unet':
from .res_unet import ResUNet28 as ResUNet
self.SegNet = ResUNet(inter_channel, out_channel)
elif seg_net == 'res_unet_dp':
from .res_unet_dp import ResUNet28 as ResUNet
self.SegNet = ResUNet(inter_channel, out_channel, p=0.0)
elif seg_net == 'tiramisu':
from .tiramisu import FCDenseNet67 as FCDenseNet
self.SegNet = FCDenseNet(inter_channel, out_channel, theta=1.0)
elif seg_net == 'hyper_tiramisu':
from .hyper_tiramisu import FCDenseNet67 as FCDenseNet
self.SegNet = FCDenseNet(inter_channel, out_channel, theta=1.0)
elif seg_net == 'deeplab_resnet':
from .deeplab_resnet import Res_Ms_Deeplab
self.SegNet = Res_Ms_Deeplab(inter_channel, out_channel)
def forward(self, x):
# size of bound_pred: [B, C_out, H, W] size of x: [B, C_in, H, W]
bound_pred = self.BoundNet(x) # for innerouter bound, out_channel=3 else 2
x_bound = torch.cat([x, F.softmax(bound_pred, dim=1)[:, 1:]], dim=1)
y = self.SegNet(x_bound)
return bound_pred, y
def WNetPT(in_channel, inter_channel, out_channel, bound_net, pre_train_bound_path, seg_net='res_unet_dp'):
model = WNet(in_channel, inter_channel, out_channel, bound_net, pre_train_bound_path,
seg_net, pretrain=False)
return model | Python |
3D | kkhuang1990/PlaqueDetection | image/models/tiramisu.py | .py | 8,578 | 225 | # _*_ coding: utf-8 _*_
import torch
import torch.nn as nn
import math
from .utils import _initialize_weights
class DenseLayer(nn.Sequential):
""" Basic dense layer of DenseNet """
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv2d(in_channels, growth_rate, kernel_size=3,
stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
"""
:param in_channels: int, number of input channels
:param growth_rate: int, growth_rate, the same as output channels in each dense layer
:param n_layers: int, number of layers
:param upsample: bool, whether to do upsampling or not
"""
super().__init__()
self.upsample = upsample
self.layers = nn.ModuleList([DenseLayer(
in_channels + i*growth_rate, growth_rate)
for i in range(n_layers)])
def forward(self, x):
if self.upsample:
new_features = []
# case of up sampling
# the input of a dense block is not concatenated with its output
# to overcome the exploration of feature maps
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
new_features.append(out)
return torch.cat(new_features,1)
else:
# case of down sampling
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1) # 1 = channel axis
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels, theta=1.0):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
out_channels = int(theta * in_channels)
self.add_module('conv', nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1,
padding=0, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
self.add_module('maxpool', nn.MaxPool2d(2))
def forward(self, x):
return super().forward(x)
class TransitionUp(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(
in_channels=in_channels, out_channels=in_channels,
kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], 1)
return out
class Bottleneck(nn.Sequential):
def __init__(self, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(
in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
def center_crop(layer, max_height, max_width):
# central crop for 2D tensor
_, _, h, w = layer.size()
xyz1 = (h - max_height) // 2
xyz2 = (w - max_width) // 2
return layer[:, :, xyz1:(xyz1 + max_height), xyz2:(xyz2 + max_width)]
class FCDenseNet(nn.Module):
def __init__(self, in_channels=1, down_blocks=(5,5,5),
up_blocks=(5,5,5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=32, n_classes=5, theta=1.0):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.theta = theta
cur_channels_count = 0
skip_connection_channel_counts = []
## First Convolution ##
self.add_module('firstconv', nn.Conv2d(in_channels=in_channels,
out_channels=out_chans_first_conv, kernel_size=3,
stride=1, padding=1, bias=True))
cur_channels_count = out_chans_first_conv
## Down-sampling path ##
self.denseBlocksDown = nn.ModuleList([])
self.transDownBlocks = nn.ModuleList([])
for i in range(len(down_blocks)):
self.denseBlocksDown.append(
DenseBlock(cur_channels_count, growth_rate, down_blocks[i])) # both the input and output are saved
cur_channels_count += (growth_rate*down_blocks[i])
# update the number of feature maps of skip connection
skip_connection_channel_counts.insert(0,cur_channels_count)
# transition down will not change the number of feature maps
self.transDownBlocks.append(TransitionDown(cur_channels_count, self.theta))
cur_channels_count = int(self.theta * cur_channels_count)
## Bottleneck ##
self.add_module('bottleneck',Bottleneck(cur_channels_count,
growth_rate, bottleneck_layers))
prev_block_channels = growth_rate*bottleneck_layers
cur_channels_count += prev_block_channels
## Up-sampling path ##
self.transUpBlocks = nn.ModuleList([])
self.denseBlocksUp = nn.ModuleList([])
for i in range(len(up_blocks)):
self.transUpBlocks.append(TransitionUp(prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]
if i != len(up_blocks) - 1:
self.denseBlocksUp.append(DenseBlock(
cur_channels_count, growth_rate, up_blocks[i],
upsample=True))
else:
self.denseBlocksUp.append(DenseBlock(
cur_channels_count, growth_rate, up_blocks[-1],
upsample=False))
prev_block_channels = growth_rate*up_blocks[i]
cur_channels_count += growth_rate*up_blocks[-1]
## final convolution ##
self.finalConv = nn.Conv2d(in_channels=cur_channels_count,
out_channels=n_classes, kernel_size=1, stride=1,
padding=0, bias=True)
# self.softmax = nn.LogSoftmax(dim=1)
_initialize_weights(self)
def forward(self, x):
out = self.firstconv(x)
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
out = self.bottleneck(out)
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
out = self.finalConv(out)
return out
def FCDenseNet36(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4, 4, 4),
up_blocks=(4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet43(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(5, 5, 5),
up_blocks=(5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet52(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4, 5, 7),
up_blocks=(7, 5, 4), bottleneck_layers=12,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
# networks the original paper provided
def FCDenseNet57(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet67(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet103(in_channel, n_classes, theta):
return FCDenseNet(
in_channels=in_channel, down_blocks=(4,5,7,10,12),
up_blocks=(12,10,7,5,4), bottleneck_layers=15,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta) | Python |
3D | kkhuang1990/PlaqueDetection | image/models/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | image/models/res_unet_dp.py | .py | 5,055 | 147 | # coding = utf-8
""" define the U-Net structure """
import torch
from torch import nn
from .utils import _initialize_weights
import torch.nn.functional as F
def conv_33(in_channels, out_channels, stride=1):
# since BN is used, bias is not necessary
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResBlock(nn.Module):
""" residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = conv_33(in_channels, out_channels, stride=stride)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv2 = conv_33(out_channels, out_channels, stride=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout2d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.dp(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return out
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
out = self.transconv(x)
out = torch.cat([skip, out], 1)
return out
class ResUNet(nn.Module):
""" ResUNet class """
def __init__(self, in_channels=1, out_channels=5, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=0.5):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.conv1 = nn.Conv2d(in_channels, self.down_blocks[0], 3, padding=1)
self.bn1 = nn.BatchNorm2d(self.down_blocks[0])
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks[1:]):
b_inx += 1
output_channel = self.down_blocks[b_inx]
input_channel = self.down_blocks[b_inx-1]
self.BlocksDown.append(ResBlock(input_channel, output_channel, stride=2, p=p))
# bottleneck block
self.bottleneck = ResBlock(self.down_blocks[-1], bottleneck, stride=2, p=p)
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ResBlock(input_channel, output_channel, stride=1, p=p))
# final convolution layer
self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights(self)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# print(out.size())
skip_connections = []
skip_connections.append(out)
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
# print(out.size())
out = self.bottleneck(out)
# print(out.size())
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
# print(out.size())
output = self.fl(out)
return output
def ResUNet28(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)
def ResUNet23(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)
def ResUNet18(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128],
up_blocks = [128, 64, 32], bottleneck = 256, p=p)
def resunet_debug():
ResUNet = ResUNet28(1, 5)
x = torch.rand(4, 1, 96, 96)
y = ResUNet(x)
| Python |
3D | kkhuang1990/PlaqueDetection | image/models/utils.py | .py | 528 | 17 | # _*_ coding: utf-8 _*_
from torch import nn
import math
def _initialize_weights(model):
""" model weight initialization """
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| Python |
3D | kkhuang1990/PlaqueDetection | image/models/hyper_tiramisu.py | .py | 11,640 | 302 | # _*_ coding: utf-8 _*_
""" define the structure of Hyper Tiramisu for multi-stream input """
import torch
import torch.nn as nn
from .utils import _initialize_weights
class DenseLayer(nn.Sequential):
""" Basic dense layer of DenseNet """
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv2d(in_channels, growth_rate, kernel_size=3,
stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, n_streams, in_channels, growth_rate, n_layers, upsample=False):
"""
:param n_streams: int, number of parallel streams
:param in_channels: int, number of input channels
:param growth_rate: int, growth_rate, the same as output channels in each dense layer
:param n_layers: int, number of layers
:param upsample: bool, whether to do upsampling or not
:param dense_hyper: bool, whether use dense hyper skip connection or not
"""
super().__init__()
self.upsample = upsample
self.n_layers = n_layers
self.n_streams = n_streams
self.layers = nn.ModuleList([nn.ModuleList([DenseLayer((in_channels + i * growth_rate) * n_streams, growth_rate)
for _ in range(n_streams)]) for i in range(n_layers)])
def forward(self, x):
"""
:param x: list of tensors
:return: list of tensors for each parallel DenseBlock
"""
# combine list of tensors into a single tensor
x_com = torch.cat(x, 1)
# case of up sampling
if self.upsample:
new_features = [[None for _ in range(self.n_layers)] for _ in range(self.n_streams)]
for l_inx in range(self.n_layers):
outs = [None for _ in range(self.n_streams)]
for s_inx in range(self.n_streams):
outs[s_inx] = self.layers[l_inx][s_inx](x_com)
new_features[s_inx][l_inx] = outs[s_inx]
x_com = torch.cat([x_com, *outs], 1)
return [torch.cat(new_feature,1) for new_feature in new_features]
# case of down sampling
else:
for l_inx in range(self.n_layers):
outs = [None for _ in range(self.n_streams)]
for s_inx in range(self.n_streams):
outs[s_inx] = self.layers[l_inx][s_inx](x_com)
x[s_inx] = torch.cat([x[s_inx], outs[s_inx]], 1)
x_com = torch.cat([x_com, *outs], 1)
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels, theta=1.0):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
out_channels = int(theta * in_channels)
self.add_module('conv', nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1,
padding=0, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
self.add_module('maxpool', nn.MaxPool2d(2))
def forward(self, x):
return super().forward(x)
class TransitionDownBlock(nn.Module):
def __init__(self, n_streams, in_channels, theta):
super().__init__()
self.layers = nn.ModuleList([TransitionDown(in_channels, theta) for _ in range(n_streams)])
def forward(self, x):
outs = []
for stream, layer in zip(x, self.layers):
outs.append(layer(stream))
return outs
class TransitionUp(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(
in_channels=in_channels, out_channels=in_channels,
kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], 1)
return out
def center_crop(layer, max_height, max_width):
# central crop for 2D tensor
_, _, h, w = layer.size()
xyz1 = (h - max_height) // 2
xyz2 = (w - max_width) // 2
return layer[:, :, xyz1:(xyz1 + max_height), xyz2:(xyz2 + max_width)]
class TransitionUpBlock(nn.Module):
def __init__(self, n_streams, in_channels):
super().__init__()
self.layers = nn.ModuleList([TransitionUp(in_channels) for _ in range(n_streams)])
def forward(self, xs, skips):
outs = []
for x, skip, layer in zip(xs, skips, self.layers):
outs.append(layer(x, skip))
return outs
class BottleneckBlock(nn.Sequential):
def __init__(self, n_streams, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(
n_streams, in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
class FirstConvBlock(nn.Module):
def __init__(self, n_streams, in_channels, out_channels):
super().__init__()
self.layers = nn.ModuleList([nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=1, padding=1, bias=True)
for _ in range(n_streams)])
def forward(self, x):
outs = []
for stream, layer in zip(x, self.layers):
outs.append(layer(stream))
return outs
class FinalConv(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__()
self.add_module('finalconv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
return super().forward(x)
class FCDenseNet(nn.Module):
def __init__(self, n_streams=2, in_channels=1, down_blocks=(5,5,5),
up_blocks=(5,5,5), bottleneck_layers=5, growth_rate=16,
out_chans_first_conv=32, n_classes=5, theta=1.0):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.theta = theta
cur_channels_count = 0
skip_connection_channel_counts = []
## First Convolution ##
self.firstconv = FirstConvBlock(n_streams, in_channels, out_chans_first_conv)
cur_channels_count = out_chans_first_conv
## Down-sampling path ##
self.denseBlocksDown = nn.ModuleList([])
self.transDownBlocks = nn.ModuleList([])
for i in range(len(down_blocks)):
self.denseBlocksDown.append(
DenseBlock(n_streams, cur_channels_count, growth_rate, down_blocks[i], upsample=False))
cur_channels_count += (growth_rate*down_blocks[i])
skip_connection_channel_counts.insert(0,cur_channels_count)
self.transDownBlocks.append(TransitionDownBlock(n_streams, cur_channels_count, self.theta))
cur_channels_count = int(self.theta * cur_channels_count)
## Bottleneck ##
self.add_module('bottleneck',BottleneckBlock(n_streams, cur_channels_count,
growth_rate, bottleneck_layers))
prev_block_channels = growth_rate*bottleneck_layers
cur_channels_count += prev_block_channels
## Up-sampling path ##
self.transUpBlocks = nn.ModuleList([])
self.denseBlocksUp = nn.ModuleList([])
for i in range(len(up_blocks)):
self.transUpBlocks.append(TransitionUpBlock(n_streams, prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]
if i != len(up_blocks) - 1:
self.denseBlocksUp.append(DenseBlock(n_streams,
cur_channels_count, growth_rate, up_blocks[i],
upsample=True))
else:
self.denseBlocksUp.append(DenseBlock(n_streams,
cur_channels_count, growth_rate, up_blocks[i],
upsample=False))
prev_block_channels = growth_rate*up_blocks[i]
cur_channels_count += prev_block_channels
## final convolution
self.finalconv = FinalConv(n_streams * cur_channels_count, n_classes)
# self.softmax = nn.LogSoftmax(dim=1)
_initialize_weights(self)
def forward(self, x):
""" Hyper input as a list of tensors """
out = self.firstconv(x)
# print([item.size() for item in out])
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
# print([item.size() for item in out])
out = self.bottleneck(out)
# print([item.size() for item in out])
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
# print([item.size() for item in out])
out = torch.cat(out, 1)
out = self.finalconv(out)
# print(out.size())
# out = self.softmax(out)
return out
def FCDenseNet36(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4, 4, 4),
up_blocks=(4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet43(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(5, 5, 5),
up_blocks=(5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet52(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4, 5, 7),
up_blocks=(7, 5, 4), bottleneck_layers=12,
growth_rate=16, out_chans_first_conv=32, n_classes=n_classes, theta=theta)
def FCDenseNet57(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet67(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5), bottleneck_layers=5,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
def FCDenseNet103(in_channel, n_classes, theta, n_streams=2):
return FCDenseNet(
n_streams=n_streams, in_channels=in_channel, down_blocks=(4,5,7,10,12),
up_blocks=(12,10,7,5,4), bottleneck_layers=15,
growth_rate=16, out_chans_first_conv=48, n_classes=n_classes, theta=theta)
# if __name__ == "__main__":
# n_streams = 2
# in_channels = 1
# fcdensenet43 = FCDenseNet43(n_streams, in_channels, n_classes=5, theta=1.0)
# x = [torch.FloatTensor(12, 1, 64, 64) for _ in range(n_streams)]
# y = fcdensenet43(x) | Python |
3D | kkhuang1990/PlaqueDetection | PlaqueSegmentation/train.py | .py | 25,288 | 528 | # _*_ coding: utf-8 _*_
""" define train and test functions here """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from sklearn.metrics import auc
import copy
from collections import Counter
import numpy as np
np.set_printoptions(precision=4)
from tqdm import tqdm
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
import pickle
import os.path as osp
import os
from image.models.deeplab_resnet import outS
from metric import cal_f_score, cal_f_score_slicewise, volumewise_hd95, volumewise_asd
from loss import WeightedKLDivLoss
from utils import mask2innerouterbound
from vision import plot_metrics, plaque_detection_rate, plot_class_f1
from vision import plot_slice_wise_measures, sample_seg_with_hfd
from image.models.deeplab_resnet import adjust_learning_rate, cal_loss
from medpy.metric.binary import ravd
def train_model(model, criterion, optimizer, scheduler, args):
""" train the model
for obtain stable validation result, we use averaged val loss over several epochs
as the principle of choosing best model weights
Args:
model: model inheriting from nn.Module class
criterion: criterion class, loss function used
optimizer: optimizer, optimization strategy
scheduler: lr scheduler
args: parser arguments
"""
best_model_wts = copy.deepcopy(model.state_dict())
best_epoch = 0
best_loss = 1.0e9
loss_keep = 0 # check how many times the val loss has decreased continuously
epoch_loss_prev = 1.0e9 # loss at the previous epoch
epoch_acc = {'train': [], 'val': [], 'test': []}
epoch_f1_score = {'train': [], 'val': [], 'test': []}
epoch_f1_score_class = {'train': [], 'val': [], 'test': []}
epoch_loss = {'train': [], 'val': [], 'test': []}
epoch_hdist = {'train': [], 'val': [], 'test': []}
epoch_asd = {'train': [], 'val': [], 'test': []}
epoch_vd = {'train': [], 'val': [], 'test': []}
metric_prev_epoch = None
phases_prev_epoch = None
for epoch in range(args.num_train_epochs):
print("{}/{}".format(epoch+1, args.num_train_epochs))
if epoch != 0 and epoch % args.n_epoch_hardmining == 0:
is_hard_mining = True
else:
is_hard_mining = False
if args.model_type == '2d':
from image.dataloader import read_train_data
if args.onlyrisk:
dataloaders = read_train_data(args.data_dir, args.compose, 'train', None, None, True,
is_hard_mining, args.num_workers, args.batch_size, args.percentile, args.multi_view,
args.onlyrisk, args.config, args.bc_learning)
else:
dataloaders = read_train_data(args.data_dir, args.compose, 'train', metric_prev_epoch, phases_prev_epoch, True,
is_hard_mining, args.num_workers, args.batch_size, args.percentile, args.multi_view,
args.onlyrisk, args.config, args.bc_learning)
elif args.model_type == '3d':
from volume.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, metric_prev_epoch, phases_prev_epoch, args.compose, 'train',
is_hard_mining, args.percentile, args.multi_view, args.interval, args.down_sample,
args.batch_size, args.num_workers, True, args.config)
if len(dataloaders['train'].dataset.phases) <= 20:
break
dataset_sizes = {'train': 0, 'val': 0, 'test': 0}
for phase in ['train', 'val', 'test']:
# print("processing {}".format(phase))
if phase == 'train':
scheduler.step()
if args.model == 'deeplab_resnet':
adjust_learning_rate(optimizer, scheduler)
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
if phase == 'train':
f1_slices_epoch = []
running_loss = 0.0
running_hdist, running_asd, running_vd = 0.0, 0.0, 0.0
running_corrects, running_f1_score = 0.0, 0.0
running_fscores = np.zeros(args.output_channel, dtype=np.float32)
running_effect_samples = np.zeros(args.output_channel, dtype=np.uint32)
running_cal_pgt, running_cal_pp, running_cal_tp = 0, 0, 0
running_noncal_pgt, running_noncal_pp, running_noncal_tp = 0, 0, 0
dl_pbar = tqdm(dataloaders[phase])
for sample_inx, sample in enumerate(dl_pbar):
dl_pbar.update(100)
inputs, labels = sample
patch_size = len(inputs) if args.model != 'hyper_tiramisu' else len(inputs[0])
dataset_sizes[phase] += patch_size
# wrap them in Variable
if args.use_gpu:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input.cuda()).float() for input in inputs]
else:
inputs = Variable(inputs.cuda()).float()
if phase == 'train' and args.bc_learning is not None:
labels = Variable(labels.cuda()).float()
else:
labels = Variable(labels.cuda()).long()
else:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input).float() for input in inputs]
else:
inputs = Variable(inputs).float()
if phase == 'train' and args.bc_learning is not None:
labels = Variable(labels).float()
else:
labels = Variable(labels).long()
optimizer.zero_grad()
outputs = model(inputs)
if phase == 'train' and args.bc_learning is not None: # for bc learning
if args.model == 'deeplab_resnet':
loss = cal_loss(outputs, labels, args.criterion, args.criterion_bc)
outputs = outputs[-1] # max fusion output is saved
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs)
else:
loss = args.criterion_bc(outputs, labels)
else:
if args.model == 'deeplab_resnet':
loss = cal_loss(outputs, labels, args.criterion, criterion)
outputs = outputs[-1] # max fusion output is saved
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs)
else:
if args.criterion == 'nll' and not args.mpl:
loss = criterion(F.log_softmax(outputs, dim=1), labels)
else: # dice, ce, gdl1, gdl2
loss = criterion(outputs, labels)
_, preds = torch.max(outputs.data, 1)
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
if args.output_channel >= 5 and not args.bound_out:
# calculate calcified and non-calcified plaque detection rate
cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = plaque_detection_rate(labels, preds,
args.output_channel)
# accumulate gt, positive and true positive from each minibatch
running_cal_pgt += cal_pgt
running_cal_pp += cal_pp
running_cal_tp += cal_tp
running_noncal_pgt += noncal_pgt
running_noncal_pp += noncal_pp
running_noncal_tp += noncal_tp
if phase == 'train' and args.bc_learning is not None:
_, labels = torch.max(labels, 1)
running_loss += loss.data.item() * patch_size
running_corrects += float(torch.sum(preds == labels.data)) / preds[0].numel()
# calculate hd95 and asd
preds_np, labels_np = preds.cpu().numpy(), labels.data.cpu().numpy()
preds_bound_np = np.stack([mask2innerouterbound(pred, args.width) for pred in preds_np])
labels_bound_np = np.stack([mask2innerouterbound(label, args.width) for label in labels_np])
mean_hdf = volumewise_hd95(preds_bound_np , labels_bound_np, return_slicewise_hdf=False)
mean_asd = volumewise_asd(preds_bound_np, labels_bound_np, n_classes=3)
running_hdist += mean_hdf * patch_size
running_asd += mean_asd * patch_size
# calculate F1, VD
cal_f1 = cal_f_score if args.model_type == '2d' else cal_f_score_slicewise
_, f_scores, n_effect_samples, f1_slices_batch = cal_f1(preds_np, labels_np, n_class=args.output_channel,
return_slice_f1=True, return_class_f1=True)
running_fscores += f_scores
running_effect_samples += n_effect_samples
mean_vd = abs(ravd(preds_np, labels_np))
running_vd += mean_vd * patch_size
if phase == 'train':
f1_slices_epoch += f1_slices_batch
dl_pbar.close()
print()
epoch_loss[phase].append(running_loss / dataset_sizes[phase])
epoch_acc[phase].append(float(running_corrects) / dataset_sizes[phase])
epoch_hdist[phase].append(running_hdist / dataset_sizes[phase])
epoch_asd[phase].append(running_asd / dataset_sizes[phase])
epoch_vd[phase].append(running_vd / dataset_sizes[phase])
running_f1_class = running_fscores / running_effect_samples
epoch_f1_score_class[phase].append(running_f1_class) # f1 score for each class
epoch_f1_score[phase].append(running_f1_class.mean())
print("[{:5s}({} samples)] Loss: {:.4f} Acc: {:.4f} Ave_F1: {:.4f} class-wise F1: {} Ave_hdf: {:.4f} "
"Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(phase, len(dataloaders[phase].dataset.phases),
epoch_loss[phase][-1], epoch_acc[phase][-1], epoch_f1_score[phase][-1], running_f1_class,
epoch_hdist[phase][-1], epoch_asd[phase][-1], epoch_vd[phase][-1]))
# update metric_prev_epoch and phases_prev_epoch
if phase == 'train':
metric_prev_epoch = np.array(f1_slices_epoch)
phases_prev_epoch = dataloaders['train'].dataset.phases
# deep copy the model
if phase == 'val':
val_loss_bf = sum(epoch_loss['val'][-5:]) / len(epoch_loss['val'][-5:])
if val_loss_bf <= best_loss:
best_loss = val_loss_bf
best_epoch = epoch
# be careful when assign one tensor to another
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model, args.model_save_name)
if val_loss_bf > epoch_loss_prev:
loss_keep += 1
else:
loss_keep = 0
epoch_loss_prev = val_loss_bf
if args.output_channel >= 5 and not args.bound_out:
# calculate cal and non-cal detection rate for test data
epoch_cal_pr = float(running_cal_tp) / running_cal_pp if running_cal_pp != 0 else 0.0
epoch_cal_rc = float(running_cal_tp) / running_cal_pgt
epoch_cal_f1 = 2.0 * running_cal_tp / (running_cal_pgt + running_cal_pp)
epoch_noncal_pr = float(running_noncal_tp) / running_noncal_pp if running_noncal_pp != 0 else 0.0
epoch_noncal_rc = float(running_noncal_tp) / running_noncal_pgt
epoch_noncal_f1 = 2.0 * running_noncal_tp / (running_noncal_pgt + running_noncal_pp)
print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(
epoch_cal_pr, epoch_cal_rc, epoch_cal_f1, epoch_noncal_pr, epoch_noncal_rc, epoch_noncal_f1))
# plot temporal loss, acc, f1_score after test
if (epoch+1) % 5 == 0 and phase == 'test':
metrics = [epoch_loss, epoch_acc, epoch_f1_score, epoch_asd, epoch_vd, epoch_hdist]
labels = ['total_loss', 'pixel_acc', 'F1_score', 'asd', 'vd', 'hd95']
plot_metrics(metrics, labels, fig_dir=args.fig_dir)
plot_class_f1(epoch_f1_score_class, args.fig_dir)
if loss_keep == 10:
break
# plot loss, acc, f1, asd, vd, hd95
metrics = [epoch_loss, epoch_acc, epoch_f1_score, epoch_asd, epoch_vd, epoch_hdist]
labels = ['total_loss', 'pixel_acc', 'F1_score', 'asd', 'vd', 'hd95']
plot_metrics(metrics, labels, fig_dir=args.fig_dir)
plot_class_f1(epoch_f1_score_class, args.fig_dir)
print('Best val loss: {:4f}'.format(best_loss))
model.load_state_dict(best_model_wts)
torch.save(model, args.model_save_name)
def model_reference(args, sample_stack_rows=50):
""" model reference and plot the segmentation results
for model reference, several epochs are used to balance the risks and other metrics
for segmentation results plotting, only one epoch is used without data augmentation
Args:
model: model
dataloaders: DataLoader class, dataloader used to read test data
args: parser arguments
sample_stack_rows: int, how many slices to plot per image
"""
#############################################################################################
# Part 1: model reference and metric evaluations
#############################################################################################
model = torch.load(args.model_save_name, map_location=lambda storage, loc: storage)
if args.use_gpu:
model = model.cuda()
dataset_sizes = 0
running_hdist, running_asd, running_vd = 0.0, 0.0, 0.0
running_corrects, running_f1, running_dice_score = 0.0, 0.0, 0.0
# for class-wise F1 scores
running_num_samples_class = np.zeros(args.output_channel , dtype=np.uint32)
running_class_f1 = np.zeros(args.output_channel, dtype=np.float32)
if args.model_type == '2d':
from image.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, args.compose, 'test', None, None, True,
False, args.num_workers, args.batch_size, args.percentile,
args.multi_view, args.onlyrisk, args.config)
elif args.model_type == '3d':
from volume.dataloader import read_train_data
dataloaders = read_train_data(args.data_dir, None, None, args.compose, 'test',
False, args.percentile, args.multi_view, args.interval, args.down_sample,
args.batch_size, args.num_workers, True, args.config)
for samp_inx, sample in enumerate(dataloaders['test']):
inputs, labels = sample
patch_size = len(inputs) if args.model != 'hyper_tiramisu' else len(inputs[0])
dataset_sizes += patch_size
# wrap them in Variable
if args.use_gpu:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input.cuda()).float() for input in inputs]
else:
inputs = Variable(inputs.cuda()).float()
labels = Variable(labels.cuda()).long()
else:
if args.model == 'hyper_tiramisu':
inputs = [Variable(input).float() for input in inputs]
else:
inputs = Variable(inputs).float()
labels = Variable(labels).long()
outputs = model(inputs)
if args.model == 'deeplab_resnet':
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs[-1])
_, preds = torch.max(outputs.data, 1)
# calculate seg correct and risk correct within each minibatch
running_corrects += float(torch.sum(preds == labels.data)) / preds[0].numel()
# calculate hd95 and asd
preds_np, labels_np = preds.cpu().numpy(), labels.data.cpu().numpy()
preds_bound_np = np.stack([mask2innerouterbound(pred, args.width) for pred in preds_np])
labels_bound_np = np.stack([mask2innerouterbound(label, args.width) for label in labels_np])
mean_hdf = volumewise_hd95(preds_bound_np, labels_bound_np, return_slicewise_hdf=False)
mean_asd = volumewise_asd(preds_bound_np, labels_bound_np, n_classes=3)
running_hdist += mean_hdf * patch_size
running_asd += mean_asd * patch_size
# calculate F1 score
cal_f1 = cal_f_score if args.model_type == '2d' else cal_f_score_slicewise
batch_f1, batch_class_f1, n_effect_samples = cal_f1(preds_np, labels_np, n_class=args.output_channel,
return_class_f1=True)
running_class_f1 += batch_class_f1
running_num_samples_class += n_effect_samples
mean_vd = abs(ravd(preds_np, labels_np))
running_vd += mean_vd * patch_size
if args.output_channel >= 5 and not args.bound_out:
labels_np = labels.data.cpu().numpy()
preds_np = preds.cpu().numpy()
if samp_inx == 0:
labels_all = labels_np
preds_all = preds_np
else:
labels_all = np.concatenate([labels_all, labels_np], axis=0) # [N, H, W]
preds_all = np.concatenate([preds_all, preds_np], axis=0)
# plot slice-wise measurements under different thresholds
if args.output_channel >= 5 and not args.bound_out:
plot_slice_wise_measures(labels_all, preds_all, args)
epoch_acc = float(running_corrects) / dataset_sizes
epoch_class_f1 = running_class_f1 / running_num_samples_class
epoch_f1 = epoch_class_f1.mean()
epoch_hdist = running_hdist / dataset_sizes
epoch_asd = running_asd / dataset_sizes
epoch_vd = running_vd / dataset_sizes
# print various metrics
print("Acc: {:.4f} Ave_F1: {:.4f} Ave_hdf: {:.4f}, Ave_ASD: {:.4f} Ave_VD: {:.4f}".format(
epoch_acc, epoch_f1, epoch_hdist, epoch_asd, epoch_vd))
for c_inx, each_f1 in enumerate(epoch_class_f1):
print("Class-{}: F1-{:.4f}".format(c_inx, each_f1))
if args.do_plot:
############################################################################################
# Part 2: plot segmentation results (这部分真蛋疼,已经写了无数遍了)
############################################################################################
plot_data = args.plot_data
args.compose[plot_data] = args.compose['test']
if args.model_type == '2d':
from image.dataloader import read_plot_data
dataloaders = read_plot_data(args.data_dir, args.compose, plot_data, False, args.num_workers,
args.batch_size, args.multi_view, args.config)
elif args.model_type == '3d':
from volume.dataloader import read_plot_data
dataloaders = read_plot_data(args.data_dir, args.compose, plot_data, args.multi_view, args.interval,
args.down_sample, args.num_workers, False, args.config)
for samp_inx, sample in enumerate(dataloaders[plot_data]):
inputs_batch, labels, sample_name, start = sample
# convert inputs into list of tensor no matter whether 'hyper_tiramisu' model or not
if args.model != 'hyper_tiramisu':
inputs_batch = [inputs_batch]
sample_name, start = sample_name[0], start.item()
inputs_batch = [torch.squeeze(input, dim=0) for input in inputs_batch] # [N, 1, T, H, W]
labels = torch.squeeze(labels, dim=0) # [N, T, H, W]
patch_size = len(inputs_batch[0])
for mb_inx in range(0, patch_size, args.batch_size):
end = min(mb_inx + args.batch_size, patch_size)
inputs = [input[mb_inx:end] for input in inputs_batch]
# wrap them in Variable
if args.use_gpu:
inputs = [Variable(input.cuda()).float() for input in inputs]
else:
inputs = [Variable(input).float() for input in inputs]
if args.model != 'hyper_tiramisu':
inputs = inputs[0]
outputs = model(inputs) # both outputs and preds are tensors
if args.model == 'deeplab_resnet':
outputs = nn.Upsample(size=(inputs.size(2), inputs.size(3)), mode='bilinear')(outputs[-1])
outputs_mb_np = outputs.data.cpu().numpy()
_, preds = torch.max(outputs.data, 1)
preds_mb_np = preds.cpu().numpy()
if mb_inx == 0:
preds_np = np.zeros((patch_size, *(preds_mb_np[0].shape)), dtype=preds_mb_np.dtype)
outputs_np = np.zeros((patch_size, *(outputs_mb_np[0].shape)), dtype=outputs_mb_np.dtype)
preds_np[mb_inx:end], outputs_np[mb_inx:end] = preds_mb_np, outputs_mb_np
# convert into numpy
labels_np = labels.cpu().numpy()
if inputs_batch[0].size(1) == 1:
inputs_np = [torch.squeeze(input, dim=1).cpu().numpy() for input in inputs_batch]
else:
inputs_np = [input[:, 0].cpu().numpy() for input in inputs_batch]
# for 2D images, we can directly use it for plot, for 3D volume, transform is necessary
if args.model_type == '3d':
inputs_np, labels_np, preds_np = rearrange_volume(inputs_np, labels_np, preds_np, args)
plot_seg_save_risk(labels_np, inputs_np, preds_np, start, sample_name, args.fig_dir,
sample_stack_rows, args.output_channel, args.width)
def rearrange_volume(inputs, labels, preds, args):
""" rearrange volumes into the correct order
:param inputs: list of ndarrays (N, D, H, W)
:param labels: ndarray (N, D, H, W)
:param preds: ndarray (N, D, H, W)
:return:
"""
inputs = [np.reshape(input, (-1, *(input.shape[2:]))) for input in inputs]
labels = np.reshape(labels, (-1, *(labels.shape[2:])))
preds = np.reshape(preds, (-1, *(preds.shape[2:])))
num_slices = len(inputs[0])
indexes = []
args.stride = args.down_sample * args.interval
for s_inx in range(0, num_slices, args.stride):
for i in range(args.interval):
for j in range(args.down_sample):
inx = s_inx + i + j * args.interval
if inx < num_slices:
indexes.append(inx)
inputs = [input[indexes] for input in inputs]
labels = labels[indexes]
preds = preds[indexes]
return (inputs, labels, preds)
# this part varies for different task (segmentation or bound detection)
def plot_seg_save_risk(labels, inputs, preds, start, samp_art_name, root_fig_dir, sample_stack_rows, n_class,
width):
""" plot segmentation results """
fig_dir = root_fig_dir + '/' + samp_art_name
if not osp.exists(fig_dir):
os.makedirs(fig_dir)
data = {'input': inputs, 'label': labels, 'pred': preds,
'sample_name': samp_art_name, 'start': start, 'n_class': n_class, 'width': width}
with open(osp.join(fig_dir, 'data.pkl'), 'wb') as writer:
pickle.dump(data, writer, protocol=pickle.HIGHEST_PROTOCOL)
print("# of slices: {}".format(len(inputs[0]))) # number of input slices
# plot the inputs, ground truth, outputs and F1 scores with sample_stack2
for inx in range(0, len(inputs[0]), sample_stack_rows):
# print("# of slices: {}".format(len(inputs[0]))) # number of input slices
over = min(inx + sample_stack_rows, len(inputs[0]))
label_plot, input_plot, pred_plot = labels[inx:over], [inputs[i][inx:over] for i in range(len(inputs))], \
preds[inx:over]
# print("inputplot size: {}".format([stream.shape for stream in input_plot]))
input_plot = [input for input in zip(*[input_plot[i] for i in range(len(input_plot))])]
data_list = [{"input": input[0], "GT": label, "pred": pred}
for (input, label, pred) in zip(input_plot, label_plot, pred_plot)]
file_name = "{}/{:03d}".format(fig_dir, inx + start)
sample_seg_with_hfd(data_list, rows=over - inx, start_with=0, show_every=1, fig_name=file_name,
start_inx=inx + start, n_class=n_class, width=width) | Python |
3D | kkhuang1990/PlaqueDetection | PlaqueSegmentation/main.sh | .sh | 3,449 | 96 | #!/bin/bash
# input/output
OUTPUT_CHANNEL=3
BOUND_OUTPUT='False'
WIDTH=1 # boundary width
#DATA_DIR="/home/mil/huang/Dataset/CPR_multiview"
DATA_DIR="/data/ugui0/antonio-t/CPR_multiview_interp2_huang"
# Experiment
EXPERIMENT="Experiment1"
SUB_FOLDER="Res-UNet_CE_3class"
# optimizer
LR_SCHEDULER='StepLR'
MOMENTUM=0.90
GAMMA=0.9
CRITERION='ce' # cross entropy loss with bound weight
W0=10.0
SIGMA=5.0 # for more sharp boundaries
IGNORE_INDEX='None'
CAL_ZEROGT='False' # whether calculate GT with all pixels equal to zero (only for dice loss)
ALPHA=0.5
OPT='Adam'
WEIGHT='True'
MOD_OUTLINE='False' # modify outline weight to put higher importance on outline
WEIGHT_TYPE='None' # what type of weight to use 'None', 'nlf' or 'mfb'
LR=0.001
STEP_SIZE=10
W_DECAY=0.0005 # almost default setting for segmentation
MPL='False'
# training
SING_GPU_ID=2
ONLY_TEST='False'
NUM_WORKERS=16
BATCH_SIZE=256 # 6 for unet/res_unet
NUM_TRAIN_EPOCHS=100
USE_PRE_TRAIN='False'
PRE_TRAIN_PATH="./Experiment14/2d_res_unet_0.001_0.90_0.9_theta-1.0-0.0_85_200_10_dice_160_96_Adam_rot-True_\
flip-True_w-True_rcp-True_rtrans-False_noise-False_ptr-False_multiview-False_shallow-False_onlyrisk-False_int-32_ds\
-2_alpha-0.5_bc-False_lr-StepLR"
PERCENTILE=100
N_EPOCH_HARDMINING=10
ONLYRISK='False'
CONFIG='config'
# pre-processing/augmentation
R_CENTRAL_CROP='True'
NOISE='False'
FLIP='True'
ROTATION='True'
RANDOM_TRANS='False'
CENTRAL_CROP=192
RESCALE=96
INTERVAL=32
DOWN_SAMPLE=2
MULTI_VIEW='False'
BC_LEARNING='False'
# models
MODEL_TYPE='2d'
MODEL='res_unet_dp'
DROP_OUT=0.0 # drop_out rate for res_unet
THETA=1.0
WITH_SHALLOW_NET='True'
# visualization
DO_PLOT='True'
PLOT_DATA='test'
# create fig_dir to save log file and generated graphs
FIG_DIR="${EXPERIMENT}/${SUB_FOLDER}"
# create $FIG_DIR if it doesn't exist
if [ ! -d "./${FIG_DIR}" ]; then
mkdir -p ./${FIG_DIR}
fi
LOG="./${FIG_DIR}/train.`date +'%Y-%m-%d_%H-%M-%S'`.txt"
exec &> >(tee -a "$LOG")
echo "Logging output to $LOG"
CUDA_VISIBLE_DEVICES=${SING_GPU_ID} python main.py --central_crop ${CENTRAL_CROP} --rescale $RESCALE --output_channel ${OUTPUT_CHANNEL} \
--num_train_epochs ${NUM_TRAIN_EPOCHS} --w_decay ${W_DECAY} --lr $LR --momentum $MOMENTUM \
--step_size ${STEP_SIZE} --gamma $GAMMA --batch_size ${BATCH_SIZE} --num_workers ${NUM_WORKERS} \
--criterion $CRITERION --opt $OPT --data_dir ${DATA_DIR} --interval ${INTERVAL} --model_type ${MODEL_TYPE}\
--weight $WEIGHT --only_test ${ONLY_TEST} --rotation $ROTATION --flip $FLIP --r_central_crop ${R_CENTRAL_CROP} \
--random_trans ${RANDOM_TRANS} --noise $NOISE --use_pre_train ${USE_PRE_TRAIN} \
--pre_train_path ${PRE_TRAIN_PATH} --fig_dir ${FIG_DIR} --onlyrisk ${ONLYRISK} \
--with_shallow_net ${WITH_SHALLOW_NET} --do_plot ${DO_PLOT} --down_sample ${DOWN_SAMPLE}\
--n_epoch_hardmining ${N_EPOCH_HARDMINING} --percentile ${PERCENTILE} --plot_data ${PLOT_DATA} \
--multi_view ${MULTI_VIEW} --model ${MODEL} --theta ${THETA} --config ${CONFIG} --bc_learning ${BC_LEARNING} \
--lr_scheduler ${LR_SCHEDULER} --weight_type ${WEIGHT_TYPE} --mpl ${MPL} --cal_zerogt ${CAL_ZEROGT} \
--drop_out ${DROP_OUT} --ignore_index ${IGNORE_INDEX} --w0 ${W0} --sigma ${SIGMA} --bound_out ${BOUND_OUTPUT} \
--width ${WIDTH} --mod_outline ${MOD_OUTLINE} | Shell |
3D | kkhuang1990/PlaqueDetection | PlaqueSegmentation/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | PlaqueSegmentation/main.py | .py | 17,494 | 348 | # _*_ coding: utf-8 _*_
""" main code for train and test U-Net """
from __future__ import print_function
import os, sys
sys.path.append("..")
import numpy as np
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import argparse
import shutil
from loss import dice_score_slicewise, GeneralizedDiceLoss, WeightedKLDivLoss
from loss import WeightedCrossEntropy, FocalLoss, DiceLoss
from loss import MaxPoolLoss, CrossEntropyBoundLoss
import os.path as osp
from train import train_model, model_reference
from torchvision import transforms
from lr_scheduler import PolyLR
from image.models.deeplab_resnet import get_1x_lr_params_NOscale, get_10x_lr_params
from torch.optim import lr_scheduler
import matplotlib as mpl
mpl.use('Agg')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--data_dir', type=str, help="from where to read data")
parser.add_argument('--central_crop', type=int, default=160)
parser.add_argument('--rescale', type=int, default=96)
parser.add_argument('--output_channel', type=int, default=5, choices=(2, 3, 4, 5))
parser.add_argument('--num_train_epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.90)
parser.add_argument('--w_decay', type=float, default=0.005)
parser.add_argument('--step_size', type=int, default=20)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--use_gpu', type=bool, default=torch.cuda.is_available())
parser.add_argument('--num_workers', type=int, default=12)
parser.add_argument('--criterion', type=str, default='nll')
parser.add_argument('--opt', type=str, default='Adam', help="optimizer")
parser.add_argument('--weight', type=lambda x: True if x.lower()=='true' else None, default=True)
parser.add_argument('--weight_type', type=lambda x: None if x.lower()=='none' else x, default=None)
parser.add_argument('--only_test', type=lambda x: x.lower()=='true')
parser.add_argument('--rotation', type=lambda x: x.lower()=='true')
parser.add_argument('--flip', type=lambda x: x.lower()=='true')
parser.add_argument('--r_central_crop', type=lambda x: x.lower()=='true')
parser.add_argument('--random_trans', type=lambda x: x.lower()=='true')
parser.add_argument('--noise', type= lambda x: x.lower()=='true', help="whether add Gaussian noise or not")
parser.add_argument('--use_pre_train', type=lambda x: x.lower()=='true')
parser.add_argument('--fig_dir', type=str, help="directory for saving segmentation results")
parser.add_argument('--pre_train_path', type=str)
parser.add_argument('--with_shallow_net', type= lambda x: x.lower()=='true')
parser.add_argument('--n_epoch_hardmining', type=int, default=15, help="every how many epochs for hard mining")
parser.add_argument('--percentile', type=int, default=85, help="how much percent samples to save for hard mining")
parser.add_argument('--plot_data', type=str, default='test', help="what data to plot")
parser.add_argument('--do_plot', type=lambda x: x.lower()=='true', help="whether plot test results or not")
parser.add_argument('--multi_view', type=lambda x: x.lower()=='true', help="whether to use multi-view inputs")
parser.add_argument('--model', type=str, choices=('tiramisu', 'unet', 'res_unet', 'hyper_tiramisu', 'deeplab_resnet',
'res_unet_dp'), help="which model to use")
parser.add_argument('--theta', type=float, help="compression ratio for DenseNet")
parser.add_argument('--onlyrisk', type=lambda x: x.lower()=='true', help="whether only use risk samples")
parser.add_argument('--interval', type=int, help="interval of slices in volume")
parser.add_argument('--down_sample', type=int, default=1, help="down sampling step")
parser.add_argument('--model_type', type=str, default='2d', help="use 2D or 3D model")
parser.add_argument('--config', type=str, default='config', help="config file name for train/val/test data split")
parser.add_argument('--alpha', type=float, default=0.5, help="ratio of false positive in generalized dice loss")
parser.add_argument('--bc_learning', type=lambda x: None if x.lower()=='false' else x, default=None)
parser.add_argument('--lr_scheduler', type=str, default='StepLR', help="learning scheduler")
parser.add_argument('--mpl', type=lambda x: x.lower()=='true', default=False, help="whether max-pooling loss or not")
parser.add_argument('--cal_zerogt', type= lambda x: x.lower() == 'true', default=False,
help= "whether calculate F1 score for case of all GT pixels are zero")
parser.add_argument('--drop_out', type=float, default=0.0,
help= "drop out rate for Res-UNet model")
parser.add_argument('--ignore_index', type=lambda x: None if x.lower()=='none' else int(x),
help= "ignore index")
parser.add_argument('--w0', type=float, default=10.0, help="bound loss amptitude")
parser.add_argument('--sigma', type=float, default=5.0, help="bound loss variance")
parser.add_argument('--bound_out', type=lambda x: x.lower()=='true', default=False,
help="whether output with bound")
parser.add_argument('--width', default=1, type=int, help="bound width")
parser.add_argument('--mod_outline', default=False, type=lambda x: x.lower()=='true',
help="whether modify outline or not")
args = parser.parse_args()
shutil.copy('./main.sh', './{}'.format(args.fig_dir)) # save current bash file for replicating experiment results
args.model_save_name = "./{}/model.pth".format(args.fig_dir)
# transforms and augmentations
if args.model_type == '2d':
from image.transforms import Gray2TripleWithBound
from image.transforms import CentralCrop, Rescale, Gray2Triple, Gray2Mask, ToTensor, Gray2Binary, Identical, HU2Gray, RandomFlip
from image.transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, HU2GrayMultiStreamToTensor
elif args.model_type == '3d':
from volume.transforms import CentralCrop, Rescale, Gray2Mask, ToTensor, Gray2Binary, Identical, HU2Gray, RandomFlip
from volume.transforms import RandomTranslate, RandomCentralCrop, AddNoise, RandomRotation, HU2GrayMultiStreamToTensor
# transforms
if args.output_channel == 2:
ToMask = Gray2Binary()
elif args.output_channel == 3:
ToMask = Gray2Triple()
elif args.output_channel == 4:
ToMask = Gray2TripleWithBound(n_classes=4, width=args.width)
elif args.output_channel == 5:
if args.bound_out:
ToMask = Gray2TripleWithBound(n_classes=5, width=args.width)
else:
ToMask = Gray2Mask()
args.compose = {'train': transforms.Compose([HU2Gray() if args.model != 'hyper_tiramisu' else Identical(),
RandomRotation() if args.rotation else Identical(),
RandomFlip() if args.flip else Identical(),
RandomCentralCrop() if args.r_central_crop else CentralCrop(args.central_crop),
Rescale((args.rescale)),
RandomTranslate() if args.random_trans else Identical(),
AddNoise() if args.noise else Identical(),
ToMask,
ToTensor() if args.model != 'hyper_tiramisu' else HU2GrayMultiStreamToTensor()]),
'test': transforms.Compose([HU2Gray() if args.model != 'hyper_tiramisu' else Identical(),
CentralCrop(args.central_crop),
Rescale(args.rescale),
ToMask,
ToTensor() if args.model != 'hyper_tiramisu' else HU2GrayMultiStreamToTensor()])}
# whether use pre_train model or not
if args.use_pre_train:
model = torch.load("{}/model.pth".format(args.pre_train_path),
map_location=lambda storage, loc: storage)
else:
args.color_channel = 3 if args.multi_view else 1
if args.model_type == '2d':
if args.model == 'unet':
if args.with_shallow_net:
from image.models.unet import UNet18 as UNet
else:
from image.models.unet import UNet28 as UNet
model = UNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet':
print("res_unet is called")
if args.with_shallow_net:
from image.models.res_unet import ResUNet18 as ResUNet
else:
from image.models.res_unet import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet_dp':
print("res_unet is called")
if args.with_shallow_net:
from image.models.res_unet_dp import ResUNet18 as ResUNet
else:
from image.models.res_unet_dp import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel, args.drop_out)
elif args.model == 'tiramisu':
if args.with_shallow_net:
from image.models.tiramisu import FCDenseNet43 as FCDenseNet
else:
from image.models.tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'hyper_tiramisu':
if args.with_shallow_net:
from image.models.hyper_tiramisu import FCDenseNet43 as FCDenseNet
else:
from image.models.hyper_tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'deeplab_resnet':
from image.models.deeplab_resnet import Res_Ms_Deeplab
model = Res_Ms_Deeplab(args.color_channel, args.output_channel)
elif args.model_type == '3d':
if args.model == 'unet':
if args.with_shallow_net:
from volume.models.unet import UNet18 as UNet
else:
from volume.models.unet import UNet28 as UNet
model = UNet(args.color_channel, args.output_channel)
elif args.model == 'res_unet':
print("res_unet is called")
if args.with_shallow_net:
from volume.models.res_unet import ResUNet18 as ResUNet
else:
from volume.models.res_unet import ResUNet28 as ResUNet
model = ResUNet(args.color_channel, args.output_channel)
elif args.model == 'tiramisu':
if args.with_shallow_net:
from volume.models.tiramisu import FCDenseNet43 as FCDenseNet
else:
from volume.models.tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
elif args.model == 'hyper_tiramisu':
if args.with_shallow_net:
from volume.models.hyper_tiramisu import FCDenseNet43 as FCDenseNet
else:
from volume.models.hyper_tiramisu import FCDenseNet67 as FCDenseNet
model = FCDenseNet(args.color_channel, args.output_channel, args.theta)
# whether use gpu or not
if args.use_gpu:
model = model.cuda()
# whether introduce prior weight into loss function or not
if args.weight:
if args.weight_type is None:
if args.bound_out:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_bound_{}.npy'.format(args.output_channel))).float()
else:
if args.output_channel == 5:
print(os.getcwd())
weight = torch.from_numpy(np.load('../class_weights/class_weight.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_{}.npy'.format(args.output_channel))).float()
if args.mod_outline:
weight[2] = weight[2] + 5.0 # manually modify the weight for outline
elif args.weight_type == 'nlf':
if args.onlyrisk:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_onlyrisk.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/nlf_weight_all_{}.npy'.format(args.output_channel))).float()
elif args.weight_type == 'mfb':
if args.onlyrisk:
weight = torch.from_numpy(np.load('../class_weights/mfb_weight_onlyrisk.npy')).float()
else:
weight = torch.from_numpy(np.load('../class_weights/mfb_weight_all_{}.npy'.format(args.output_channel))).float()
weight = Variable(weight.cuda())
else:
weight = args.weight # weight is None
print("weight: {}".format(weight))
# criterion
if args.criterion == 'nll':
criterion = nn.NLLLoss(weight=weight)
elif args.criterion == 'ce':
criterion = nn.CrossEntropyLoss(weight=weight)
elif args.criterion == 'dice':
criterion = DiceLoss(weight=weight, ignore_index=None, weight_type=args.weight_type, cal_zerogt=args.cal_zerogt)
elif args.criterion == 'gdl_inv_square':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type='inv_square',
alpha=args.alpha)
elif args.criterion == 'gdl_others_one_gt':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type='others_one_gt',
alpha=args.alpha)
elif args.criterion == 'gdl_others_one_pred':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type='others_one_pred',
alpha=args.alpha)
elif args.criterion == 'gdl_none':
criterion = GeneralizedDiceLoss(weight=weight, ignore_index=None, weight_type=None,
alpha=args.alpha)
elif args.criterion == 'focal':
criterion = FocalLoss()
elif args.criterion == 'wce':
criterion = WeightedCrossEntropy()
elif args.criterion == 'ceb': # cross entropy bound loss
criterion = CrossEntropyBoundLoss(n_classes=args.output_channel, weight=weight, ignore_index=args.ignore_index,
w0=args.w0, sigma=args.sigma)
# elif args.criterion == 'ahdf':
# criterion = AveragedHausdorffLoss(n_classes=args.output_channel)
# criterion for BC learning
if args.criterion.startswith('gdl'):
args.criterion_bc = criterion
else:
args.criterion_bc = WeightedKLDivLoss(weight=weight)
# Loss Max-Pooling
if args.mpl:
criterion = MaxPoolLoss(criterion)
# optimizer
if args.opt == 'Adam':
if args.model == 'deeplab_resnet':
optimizer = optim.Adam([{'params': get_1x_lr_params_NOscale(model), 'lr': args.lr},
{'params': get_10x_lr_params(model), 'lr': 10 * args.lr}],
lr=args.lr, weight_decay=args.w_decay)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.w_decay)
elif args.opt == 'sgd':
if args.model == 'deeplab_resnet':
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.lr},
{'params': get_10x_lr_params(model), 'lr': 10 * args.lr}],
lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.w_decay)
# learning schedule
if args.lr_scheduler == 'StepLR':
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
elif args.lr_scheduler == 'PolyLR':
exp_lr_scheduler = PolyLR(optimizer, max_iter=args.num_train_epochs, power=0.9)
# save args to log file
for arg in vars(args):
print("{} : {}".format(arg, getattr(args, arg)))
# plot samples used for train, val and test respectively
print("Dataset:")
for mode in ['train', 'val', 'test']:
config_file = osp.join('../configs/{}'.format(args.config), mode+'.txt')
print(mode)
with open(config_file, 'r') as reader:
for line in reader.readlines():
print(line.strip('\n'))
since = time.time()
if not args.only_test:
train_model(model, criterion, optimizer, exp_lr_scheduler, args)
# model reference
model_reference(args, sample_stack_rows=50)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60)) | Python |
3D | kkhuang1990/PlaqueDetection | datasets/pix2pix.py | .py | 3,483 | 81 | import matplotlib as mpl
mpl.use('Agg')
import os
import os.path as osp
from os import listdir
import numpy as np
import random
from skimage import io
from multiprocessing import Pool
from utils import dcm2hu, hu2gray, rgb2gray, rgb2mask, centra_crop, mask2gray
def create_image_mask_pair_cycleGAN(data_dir, des_dir, mode, prob=0.1, patch_size= 256):
""" create image and mask pairs for training cycleGAN
Args:
data_dir: str, from where to read slices
des_dir: str, to where to save slices
mode: str, data type: train/val/test
prob: float, sampling ratio of slices with risk 0
"""
slice_inx = 0
with open(osp.join('./config', mode+'.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
des_image_dir = osp.join(des_dir, 'A', mode)
des_mask_dir = osp.join(des_dir, 'B', mode)
if not osp.exists(des_image_dir):
os.makedirs(des_image_dir)
if not osp.exists(des_mask_dir):
os.makedirs(des_mask_dir)
for sample in samples:
sample_path = osp.join(data_dir, sample)
for artery in sorted(listdir(sample_path)):
artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'image')
mask_path = osp.join(sample_path, artery, 'mask')
# extract slice files
slice_files = sorted([file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
label_files = sorted([file for file in listdir(mask_path) if file.endswith('.tiff') and not file.startswith('.')])
# extract slice information
risks = np.loadtxt(osp.join(artery_path, "risk_labels.txt"), dtype=np.uint8)
for slice_file, label_file, risk in zip(slice_files, label_files, risks):
slice_path = osp.join(image_path, slice_file)
label_path = osp.join(mask_path, label_file)
x = random.uniform(0, 1)
if (risk == 0 and x < prob) or risk != 0:
slice_inx += 1
slice, label = io.imread(slice_path), io.imread(label_path)
slice = centra_crop(hu2gray(slice), patch_size=patch_size)
label = centra_crop(mask2gray(label), patch_size=patch_size)
assert slice.shape == label.shape, "slice size and label size must match with each other"
des_slice_path = osp.join(des_image_dir, "{:05d}.tiff".format(slice_inx))
des_label_path = osp.join(des_mask_dir, "{:05d}.tiff".format(slice_inx))
io.imsave(des_slice_path, slice)
io.imsave(des_label_path, label)
print("{} : {} samples".format(mode, slice_inx))
def create_image_mask_pair_cycleGAN_multi_preocess(method, data_dir, des_dir, prob=0.1, patch_size=256, num_workers=24):
""" resave data into desired format for faster read
Args:
method: function, use which method to resave the data
data_dir: string, from where to read data
des_dir: string, to where to save data
num_workers: int, how many processes in parallel
"""
args = []
modes = ['train', 'val', 'test']
for mode in modes:
args.append((data_dir, des_dir, mode, prob, patch_size))
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
pool.starmap(method, args)
pool.close() | Python |
3D | kkhuang1990/PlaqueDetection | datasets/multiview.py | .py | 11,942 | 240 | # _*_ coding: utf-8 _*_
""" Functions for creating dataset of multi-view slices """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from skimage import io
import pydicom as dicom
import os
import os.path as osp
from os import listdir
import numpy as np
from utils import dcm2hu
from multiprocessing import Pool
from skimage import transform
from operation import get_slice_info
from utils import rgb2gray
def create_multiview_dataset(data_dir, des_dir):
""" create multi-view dataset with abscissa, ordinate and applicate (x, y and z) respectively
Main steps are listed as below:
(1) read slice information from sliceinfo.txt, which includes effective range, risk, positive remodeling,
napkin ring sign, significant stenosis and et al
(2) extract angle_0 and angle_90 CPR image for both input and annotation and resize into proper size
(3) extract patches along each CPR image and resave them into corresponding directories
:param src_dir: str, from where to read artery slices
:param des_dir: str, to where to write multi-view slices into
"""
print("processing {}".format(data_dir.split('/')[-3:]))
phase = data_dir.split('/')[-1]
# load masks
mask_dir = data_dir + 'conf'
mask_files = [file for file in listdir(mask_dir) if file.startswith('I0') and file.endswith('.tiff')]
mask_files = sorted(mask_files, key=lambda x: int(x.split('.')[0][2:]))
# load images
img_dir = data_dir
img_files = [file for file in listdir(img_dir) if file.startswith('I0')]
img_files = sorted(img_files, key=lambda x: int(x.split('.')[0][2:]))
if len(mask_files) > 0 and len(img_files) > 0:
slice_info_file = osp.join(mask_dir, 'sliceinfo.txt')
start, end, risks, sig_stenosises, pos_remodelings, napkin_rings = get_slice_info(slice_info_file)
# make sure at least one sample is effective
if start < end:
# read dcm information from I01.dcm and angle_0.tiff file
dcm = dicom.read_file(osp.join(data_dir, 'I01.dcm'))
s_shape = (dcm.Rows, dcm.Columns)
s_thick = dcm.SliceThickness
print("slice thickness: {}".format(s_thick))
pix_space = [float(ele) for ele in dcm.PixelSpacing]
s_spcae = [space * size for space, size in zip(pix_space, s_shape)]
print("slice shape: {}, pixel space: {}".format(s_shape, pix_space))
num_slices = s_spcae[0] / s_thick
CPR_angle0_file = osp.join(data_dir + 'conf', 'angle_0.tiff')
CPR_angle0 = io.imread(CPR_angle0_file).astype(np.uint8)
num_rows = len(CPR_angle0)
print("# of rows: {}, # of slices: {}".format(num_rows, len(img_files)))
rows_per_slice = num_rows / len(img_files)
num_rows_patch = int(num_rows * num_slices / len(img_files))
print("{} rows are necessary to extract a cube".format(num_rows_patch))
num_rows_patch = int(round(num_rows_patch / 32)) * 32
# resave slices along the applicate axis
image = np.stack([dcm2hu(dicom.read_file(osp.join(img_dir, file))) if file.endswith('.dcm')
else io.imread(osp.join(img_dir, file))
for file in img_files])
mask = np.stack([rgb2gray(io.imread(osp.join(mask_dir, file))) for file in mask_files])
info_range = []
non_calcified = np.zeros_like(risks)
calcified = np.zeros_like(risks)
for axis_name in ['applicate', 'abscissa', 'ordinate']:
for data_name in ['image', 'mask']:
des_path = osp.join(des_dir, axis_name, data_name)
if not osp.exists(des_path):
os.makedirs(des_path)
data = image if data_name == 'image' else mask
if axis_name != 'applicate':
if axis_name == 'abscissa':
cpr = data[:, s_shape[0]//2, :]
elif axis_name == 'ordinate':
cpr= data[:, :, s_shape[0]//2]
if data_name == 'image':
cpr_rescale = transform.resize(cpr, (num_rows, s_shape[0]), mode='reflect',
preserve_range=True)
cpr_rescale = cpr_rescale.astype(np.int16)
io.imsave(osp.join(des_dir, axis_name, 'cpr_image.tiff'), cpr_rescale)
else:
cpr_rescale = transform.resize(cpr, (num_rows, s_shape[0]), mode='reflect',
preserve_range=True, order=0)
cpr_rescale = cpr_rescale.astype(np.uint8)
io.imsave(osp.join(des_dir, axis_name, 'cpr_mask.tiff'), cpr_rescale)
for s_inx in range(start, end):
c_inx = 2 + int(rows_per_slice * s_inx)
lower, upper = c_inx - num_rows_patch // 2, c_inx + num_rows_patch // 2
# only save samples with complete multiple view slices
if lower >= 0 and upper <= num_rows:
if axis_name == "applicate":
io.imsave(osp.join(des_path, "{:03d}.tiff".format(s_inx + 1)), data[s_inx])
# save slice info along applicate axis
if data_name == 'mask':
info_range.append(s_inx - start)
if np.sum(data[s_inx] == 76) != 0:
non_calcified[s_inx - start] = 1
if np.sum(data[s_inx] == 151) != 0:
calcified[s_inx - start] = 1
else:
batch = cpr_rescale[lower:upper]
io.imsave(osp.join(des_path, "{:03d}.tiff".format(s_inx + 1)), batch)
# save annotation information
np.savetxt(osp.join(des_dir, "risk_labels.txt"), risks[info_range], fmt='%d')
np.savetxt(osp.join(des_dir, "significant_stenosis_labels.txt"), sig_stenosises[info_range], fmt='%d')
np.savetxt(osp.join(des_dir, "positive_remodeling_labels.txt"), pos_remodelings[info_range], fmt='%d')
np.savetxt(osp.join(des_dir, "napkin_ring_labels.txt"), napkin_rings[info_range], fmt='%d')
np.savetxt(osp.join(des_dir, "non_calcified_plaque_labels.txt"), non_calcified[info_range], fmt='%d')
np.savetxt(osp.join(des_dir, "calcified_plaque_labels.txt"), calcified[info_range], fmt='%d')
def remove_redundant_slice_applicate(data_dir):
""" remove redundant slices along applicate axis
so that the applicate and other axes can have the same number of slices
"""
# print("processing {}".format(data_dir.split('/')[-3:]))
noncals = np.genfromtxt(osp.join(data_dir, "non_calcified_plaque_labels.txt")).astype(np.uint8)
# for axis_name in ['applicate', 'abscissa']:
for data_name in ['image', 'mask']:
des_path = osp.join(data_dir, 'applicate', data_name)
if not osp.exists(des_path):
print("Be careful not to remove necessary files/folders")
break
src_path = des_path.replace('applicate', 'abscissa')
if not osp.exists(src_path):
break
slice_files = [file for file in listdir(des_path) if file.endswith('.tiff') and not file.startswith('.')]
slice_files = sorted(slice_files, key=lambda x: int(x.split('.')[0]))
for file in slice_files:
src_file_path = osp.join(src_path, file)
des_file_path = osp.join(des_path, file)
if not osp.exists(src_file_path):
print("there still exists redundant file, please remove it!!!")
os.remove(des_file_path)
slice_files = [file for file in listdir(des_path) if file.endswith('.tiff') and not file.startswith('.')]
assert len(slice_files) == len(noncals), "number of slices should be the same as non-calcification records"
def remove_redundant_slice_applicate_multi_preocess(method, data_dir, num_workers=1):
""" resave data into desired format for faster read
WARNING!!! please do not use multiple process to read data
Args:
method: function, use which method to resave the data
data_dir: string, from where to read data
des_dir: string, to where to save data
num_workers: int, how many processes in parallel
"""
args = []
# samples = ['IRB00000000001269600000000211194620110823HOSPNO']
# for sample in samples:
for sample in listdir(data_dir):
print("Processing {}".format(sample))
# for sample in listdir(data_dir):
sample_path = osp.join(data_dir, sample)
if osp.isdir(sample_path) and (sample.startswith('S') or sample.startswith('IRB')):
exclusions = ['.tiff', 'conf']
phases = [phase for phase in sorted(listdir(sample_path))
if not any([phase.endswith(ex) for ex in exclusions]) and not phase.startswith('.')]
for phase in phases:
phase_path = osp.join(sample_path, phase)
args.append(phase_path)
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
pool.map(method, args)
pool.close()
def create_multiview_dataset_multi_preocess(method, data_dir, des_dir, num_workers=1):
""" resave data into desired format for faster read
WARNING!!! please do not use multiple process to read data
Args:
method: function, use which method to resave the data
data_dir: string, from where to read data
des_dir: string, to where to save data
num_workers: int, how many processes in parallel
"""
args = []
samples = ['S2189281c5_S1ff973cbc08376_20181120']
for sample in samples:
# for sample in listdir(data_dir):
print("Processing {}".format(sample))
sample_path = osp.join(data_dir, sample)
if osp.isdir(sample_path) and (sample.startswith('S') or sample.startswith('IRB')):
series = [s for s in listdir(sample_path) if not s.startswith('.')]
series = sorted(series, key=lambda x: int(x[1:]))
for ser in series:
series_path = osp.join(sample_path, ser)
exclusions = ['.tiff', 'conf']
phases = [phase for phase in sorted(listdir(series_path))
if not any([phase.endswith(ex) for ex in exclusions]) and not phase.startswith('.')]
for phase in phases:
phase_path = osp.join(series_path, phase)
des_phase_path = osp.join(des_dir, sample, phase)
args.append((phase_path, des_phase_path))
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
pool.starmap(method, args)
pool.close()
if __name__ == "__main__":
# cal_slice_size()
# CPR_extraction()
# data_dir = "/data/ugui0/antonio-t/CPR_all"
# data_dir = "/data/ugui0/antonio-t/01"
# data_dir = "/data/ugui0/antonio-t/CPR_20181003"
data_dir = "/data/ugui0/antonio-t/CPR_20190108"
# data_dir = "/data/ugui0/antonio-t/CPR_20180601/20180601"
# data_dir = "/data/ugui0/antonio-t/CPR_20180713"
des_dir = "/data/ugui0/antonio-t/CPR_20190108_misannotation_tmp"
# des_dir = "/data/ugui0/antonio-t/CPR_multiview"
create_multiview_dataset_multi_preocess(create_multiview_dataset, data_dir, des_dir, num_workers=1)
# read_window_info_multi_preocess(read_window_info, data_dir, num_workers=24)
| Python |
3D | kkhuang1990/PlaqueDetection | datasets/normal.py | .py | 4,083 | 95 | import os
import os.path as osp
from os import listdir
import numpy as np
import pydicom as dicom
from skimage import io
from multiprocessing import Pool
from utils import dcm2hu, hu2gray, rgb2gray, rgb2mask, centra_crop, mask2gray
def resave_multi_preocess(method, data_dir, des_dir, num_workers=24):
""" resave data into desired format for faster read
Args:
method: function, use which method to resave the data
data_dir: string, from where to read data
des_dir: string, to where to save data
num_workers: int, how many processes in parallel
"""
args = []
for sample in listdir(data_dir):
print("Processing {}".format(sample))
# for sample in listdir(data_dir):
sample_path = osp.join(data_dir, sample)
if any([sample.startswith(prefix)] for prefix in ['IRB', 'S']) and osp.isdir(sample_path):
series = [s for s in listdir(sample_path) if not s.startswith('.')]
series = sorted(series, key=lambda x: int(x[1:]))
series_path = osp.join(sample_path, series[0])
exclusions = ['.tiff', 'conf']
phases = [phase for phase in sorted(listdir(series_path))
if not any([phase.endswith(ex) for ex in exclusions]) and not phase.startswith('.')]
for phase in phases:
phase_path = osp.join(series_path, phase)
des_phase_path = osp.join(des_dir, sample, phase)
args.append((phase_path, des_phase_path))
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
pool.starmap(method, args)
pool.close()
#########################################################################################################
# resave dcm to tiff without data augmentation
def dcm2tiff_per_artery_wo_augment(data_dir, des_dir):
""" resave dcm file into tiff image for each artery
Args:
data_dir: string, from where to read data
des_dir: string, to where to write data into
"""
print("processing {}".format(data_dir.split('/')[-3:]))
phase = data_dir.split('/')[-1]
# load masks
mask_dir = data_dir + 'conf'
mask_files = [file for file in listdir(mask_dir) if file.startswith('I0') and file.endswith('.tiff')]
mask_files = sorted(mask_files, key=lambda x: int(x.split('.')[0][2:]))
# load images
img_dir = data_dir
img_files = [file for file in listdir(img_dir) if file.startswith('I0')]
img_files = sorted(img_files, key=lambda x: int(x.split('.')[0][2:]))
if len(mask_files) > 0 and len(img_files) > 0:
slice_info_file = osp.join(mask_dir, 'sliceinfo.txt')
start, end, risks, sig_stenosises, pos_remodelings, napkin_rings = get_slice_info(slice_info_file)
mask_files, img_files = mask_files[start:end], img_files[start:end]
img = np.stack([dcm2hu(dicom.read_file(osp.join(img_dir, file))) if file.endswith('.dcm')
else io.imread(osp.join(img_dir, file))
for file in img_files])
mask = np.stack([rgb2mask(io.imread(osp.join(mask_dir, file))) for file in mask_files])
des_image_path = osp.join(des_dir, 'image')
des_mask_path = osp.join(des_dir, 'mask')
if not osp.exists(des_image_path):
os.makedirs(des_image_path)
if not osp.exists(des_mask_path):
os.makedirs(des_mask_path)
for slice, label in zip(img, mask):
io.imsave(osp.join(des_image_path, "{:03d}.tiff".format(start + 1)), slice)
io.imsave(osp.join(des_mask_path, "{:03d}.tiff".format(start + 1)), label)
start += 1
# save annotation information
np.savetxt(osp.join(des_dir, "risk_labels.txt"), risks, fmt='%d')
np.savetxt(osp.join(des_dir, "significant_stenosis_labels.txt"), sig_stenosises, fmt='%d')
np.savetxt(osp.join(des_dir, "positive_remodeling_labels.txt"), pos_remodelings, fmt='%d')
np.savetxt(osp.join(des_dir, "napkin_ring_labels.txt"), napkin_rings, fmt='%d')
| Python |
3D | kkhuang1990/PlaqueDetection | datasets/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | datasets/multiview_45degree.py | .py | 12,280 | 243 | # _*_ coding: utf-8 _*_
""" Functions for creating dataset of multi-view slices """
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from skimage import io
import pydicom as dicom
import os
import os.path as osp
from os import listdir
import numpy as np
from utils import dcm2hu
from multiprocessing import Pool
from skimage import transform
from operation import get_slice_info
from utils import rgb2gray
def create_multiview_dataset(data_dir, des_dir):
""" create multi-view dataset with abscissa, ordinate and applicate (x, y and z) respectively
Main steps are listed as below:
(1) read slice information from sliceinfo.txt, which includes effective range, risk, positive remodeling,
napkin ring sign, significant stenosis and et al
(2) extract angle_0 and angle_90 CPR image for both input and annotation and resize into proper size
(3) extract patches along each CPR image and resave them into corresponding directories
:param src_dir: str, from where to read artery slices
:param des_dir: str, to where to write multi-view slices into
"""
print("processing {}".format(data_dir.split('/')[-3:]))
phase = data_dir.split('/')[-1]
# load masks
mask_dir = data_dir + 'conf'
mask_files = [file for file in listdir(mask_dir) if file.startswith('I0') and file.endswith('.tiff')]
mask_files = sorted(mask_files, key=lambda x: int(x.split('.')[0][2:]))
# load images
img_dir = data_dir
img_files = [file for file in listdir(img_dir) if file.startswith('I0')]
img_files = sorted(img_files, key=lambda x: int(x.split('.')[0][2:]))
if len(mask_files) > 0 and len(img_files) > 0:
slice_info_file = osp.join(mask_dir, 'sliceinfo.txt')
start, end, risks, sig_stenosises, pos_remodelings, napkin_rings = get_slice_info(slice_info_file)
# make sure at least one sample is effective
if start < end:
# read dcm information from I01.dcm and angle_0.tiff file
dcm = dicom.read_file(osp.join(data_dir, 'I01.dcm'))
s_shape = (dcm.Rows, dcm.Columns)
s_thick = dcm.SliceThickness
# print("slice thickness: {}".format(s_thick))
pix_space = [float(ele) for ele in dcm.PixelSpacing]
s_spcae = [space * size for space, size in zip(pix_space, s_shape)]
# print("slice shape: {}, pixel space: {}".format(s_shape, pix_space))
num_slices = s_spcae[0] / s_thick
CPR_angle0_file = osp.join(data_dir + 'conf', 'angle_0.tiff')
CPR_angle0 = io.imread(CPR_angle0_file).astype(np.uint8)
num_rows = len(CPR_angle0)
# print("# of rows: {}, # of slices: {}".format(num_rows, len(img_files)))
rows_per_slice = num_rows / len(img_files)
num_rows_patch = int(num_rows * num_slices / len(img_files))
# print("{} rows are necessary to extract a cube".format(num_rows_patch))
num_rows_patch = int(round(num_rows_patch / 32)) * 32
# resave slices along the applicate axis
image = np.stack([dcm2hu(dicom.read_file(osp.join(img_dir, file))) if file.endswith('.dcm')
else io.imread(osp.join(img_dir, file))
for file in img_files])
mask = np.stack([rgb2gray(io.imread(osp.join(mask_dir, file))) for file in mask_files])
info_range = []
non_calcified = np.zeros_like(risks)
calcified = np.zeros_like(risks)
for axis_name in ['diagonal', 'antidiagonal']: # diagonal -- 135 degree, antidiagonal -- 45 degree
for data_name in ['image', 'mask']:
des_path = osp.join(des_dir, axis_name, data_name)
if not osp.exists(des_path):
os.makedirs(des_path)
data = image if data_name == 'image' else mask
if axis_name != 'applicate':
if axis_name == 'diagonal': # 135 degree
cpr = np.stack([np.diagonal(slice) for slice in data])
# print("cpr with size: {}".format(cpr.shape))
elif axis_name == 'antidiagonal':
cpr = np.stack([np.diagonal(np.rot90(slice)) for slice in data])
# print("cpr with size: {}".format(cpr.shape))
if data_name == 'image':
cpr_rescale = transform.resize(cpr, (num_rows, s_shape[0]), mode='reflect',
preserve_range=True)
cpr_rescale = cpr_rescale.astype(np.int16)
io.imsave(osp.join(des_dir, axis_name, 'cpr_image.tiff'), cpr_rescale)
else:
cpr_rescale = transform.resize(cpr, (num_rows, s_shape[0]), mode='reflect',
preserve_range=True, order=0)
cpr_rescale = cpr_rescale.astype(np.uint8)
io.imsave(osp.join(des_dir, axis_name, 'cpr_mask.tiff'), cpr_rescale)
for s_inx in range(start, end):
c_inx = 2 + int(rows_per_slice * s_inx)
lower, upper = c_inx - num_rows_patch // 2, c_inx + num_rows_patch // 2
# only save samples with complete multiple view slices
if lower >= 0 and upper <= num_rows:
if axis_name == "applicate":
io.imsave(osp.join(des_path, "{:03d}.tiff".format(s_inx + 1)), data[s_inx])
# save slice info along applicate axis
if data_name == 'mask':
info_range.append(s_inx - start)
if np.sum(data[s_inx] == 76) != 0:
non_calcified[s_inx - start] = 1
if np.sum(data[s_inx] == 151) != 0:
calcified[s_inx - start] = 1
else:
batch = cpr_rescale[lower:upper]
io.imsave(osp.join(des_path, "{:03d}.tiff".format(s_inx + 1)), batch)
# save annotation information
# np.savetxt(osp.join(des_dir, "risk_labels.txt"), risks[info_range], fmt='%d')
# np.savetxt(osp.join(des_dir, "significant_stenosis_labels.txt"), sig_stenosises[info_range], fmt='%d')
# np.savetxt(osp.join(des_dir, "positive_remodeling_labels.txt"), pos_remodelings[info_range], fmt='%d')
# np.savetxt(osp.join(des_dir, "napkin_ring_labels.txt"), napkin_rings[info_range], fmt='%d')
# np.savetxt(osp.join(des_dir, "non_calcified_plaque_labels.txt"), non_calcified[info_range], fmt='%d')
# np.savetxt(osp.join(des_dir, "calcified_plaque_labels.txt"), calcified[info_range], fmt='%d')
def remove_redundant_slice_applicate(data_dir):
""" remove redundant slices along applicate axis
so that the applicate and other axes can have the same number of slices
"""
# print("processing {}".format(data_dir.split('/')[-3:]))
noncals = np.genfromtxt(osp.join(data_dir, "non_calcified_plaque_labels.txt")).astype(np.uint8)
# for axis_name in ['applicate', 'abscissa']:
for data_name in ['image', 'mask']:
des_path = osp.join(data_dir, 'applicate', data_name)
if not osp.exists(des_path):
print("Be careful not to remove necessary files/folders")
break
src_path = des_path.replace('applicate', 'abscissa')
if not osp.exists(src_path):
break
slice_files = [file for file in listdir(des_path) if file.endswith('.tiff') and not file.startswith('.')]
slice_files = sorted(slice_files, key=lambda x: int(x.split('.')[0]))
for file in slice_files:
src_file_path = osp.join(src_path, file)
des_file_path = osp.join(des_path, file)
if not osp.exists(src_file_path):
print("there still exists redundant file, please remove it!!!")
os.remove(des_file_path)
slice_files = [file for file in listdir(des_path) if file.endswith('.tiff') and not file.startswith('.')]
assert len(slice_files) == len(noncals), "number of slices should be the same as non-calcification records"
def remove_redundant_slice_applicate_multi_preocess(method, data_dir, num_workers=1):
""" resave data into desired format for faster read
WARNING!!! please do not use multiple process to read data
Args:
method: function, use which method to resave the data
data_dir: string, from where to read data
des_dir: string, to where to save data
num_workers: int, how many processes in parallel
"""
args = []
# samples = ['IRB00000000001269600000000211194620110823HOSPNO']
# for sample in samples:
for sample in listdir(data_dir):
print("Processing {}".format(sample))
# for sample in listdir(data_dir):
sample_path = osp.join(data_dir, sample)
if osp.isdir(sample_path) and (sample.startswith('S') or sample.startswith('IRB')):
exclusions = ['.tiff', 'conf']
phases = [phase for phase in sorted(listdir(sample_path))
if not any([phase.endswith(ex) for ex in exclusions]) and not phase.startswith('.')]
for phase in phases:
phase_path = osp.join(sample_path, phase)
args.append(phase_path)
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
pool.map(method, args)
pool.close()
def create_multiview_dataset_multi_preocess(method, data_dir, des_dir, num_workers=1):
""" resave data into desired format for faster read
WARNING!!! please do not use multiple process to read data
Args:
method: function, use which method to resave the data
data_dir: string, from where to read data
des_dir: string, to where to save data
num_workers: int, how many processes in parallel
"""
args = []
# samples = ['S2189281c5_S1ff973cbc08376_20181120']
# for sample in samples:
for sample in listdir(data_dir):
sample_path = osp.join(data_dir, sample)
# if osp.isdir(sample_path) and (sample.startswith('S') or sample.startswith('IRB')):
if osp.isdir(sample_path) and sample.startswith('IRB'):
print("Processing {}".format(sample))
series = [s for s in listdir(sample_path) if not s.startswith('.')]
series = sorted(series, key=lambda x: int(x[1:]))
for ser in series:
series_path = osp.join(sample_path, ser)
exclusions = ['.tiff', 'conf']
phases = [phase for phase in sorted(listdir(series_path))
if not any([phase.endswith(ex) for ex in exclusions]) and not phase.startswith('.')]
for phase in phases:
phase_path = osp.join(series_path, phase)
des_phase_path = osp.join(des_dir, sample, phase)
args.append((phase_path, des_phase_path))
pool = Pool(processes=num_workers)
print("{} CPUs are used".format(num_workers))
pool.starmap(method, args)
pool.close()
if __name__ == "__main__":
# cal_slice_size()
# CPR_extraction()
# data_dir = "/data/ugui0/antonio-t/CPR_all"
# data_dir = "/data/ugui0/antonio-t/01"
# data_dir = "/data/ugui0/antonio-t/CPR_20181003"
data_dir = "/data/ugui0/antonio-t/CPR_all"
# data_dir = "/data/ugui0/antonio-t/CPR_20180601/20180601"
# data_dir = "/data/ugui0/antonio-t/CPR_20180713"
des_dir = "/data/ugui0/antonio-t/CPR_multiview"
# des_dir = "/data/ugui0/antonio-t/CPR_multiview"
create_multiview_dataset_multi_preocess(create_multiview_dataset, data_dir, des_dir, num_workers=1)
# read_window_info_multi_preocess(read_window_info, data_dir, num_workers=24)
| Python |
3D | kkhuang1990/PlaqueDetection | hybrid/dataloader_debug.py | .py | 14,978 | 342 | # _*_ coding: utf-8 _*_
""" Dataloader used for debug
Here we want to check whether each time the same order of slices can be ensured by
setting the random seed as fixed
"""
import matplotlib as mpl
mpl.use('Agg')
import os
import os.path as osp
from os import listdir
import random
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import time
from skimage import io
from skimage import transform
from .transforms import ToTensor, RandomCrop, GaussianCrop, HU2Gray, CentralCrop, Gray2Binary, Gray2Mask
from .transforms import RandomRotation, RandomFlip, RandomCentralCrop, Rescale
from .transforms import Gray2InnerOuterBound
from torchvision import transforms
from vision import sample_stack
class CPRPlaqueTrainDataset(Dataset):
""" dataloader of train and validation dataset.
Patches are randomly extracted within the central part of given volume
"""
def __init__(self, data_dir, mode = 'train', config='config', interval=15, down_sample=1):
""" read images from img_dir and save them into a list """
super(CPRPlaqueTrainDataset, self).__init__()
self.mode = mode
self.data_dir = data_dir
self.config = config
self.interval = interval
self.down_sample = down_sample
self.slice_range = self.interval * self.down_sample
self.phases = self.get_phases()
def __len__(self):
return len(self.phases)
def get_phases(self):
phases = []
with open(osp.join('../configs/{}'.format(self.config), self.mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'ordinate', 'image')
mask_path = osp.join(sample_path, artery, 'ordinate', 'mask')
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
for s_inx in range(start, end + 1 - self.slice_range + self.down_sample):
phases.append((image_path, mask_path, s_inx))
print("{} : {} samples".format(self.mode, len(phases)))
return phases
def __getitem__(self, inx):
sample = self.phases[inx]
image_path, mask_path, rand_inx = sample
path_list = image_path.split('/')
sample_name, artery_name = path_list[-4], path_list[-3]
path_abbrev = '/'.join([sample_name, artery_name, str(rand_inx)])
return path_abbrev
class CPRPlaqueTestDataset(Dataset):
""" dataloader for test dataset
the whole artery is extracted with given stride along applicate axis, then divide them into mini-batch for test
"""
def __init__(self, data_dir, transform = None, mode = 'train', multi_view = False, interval=32, down_sample=1,
config='config'):
""" read images from img_dir and save them into a list """
super(CPRPlaqueTestDataset, self).__init__()
self.interval = interval
self.mode = mode
self.data_dir = data_dir
self.transform = transform
self.down_sample = down_sample
self.multi_view = multi_view # whether to use multi-view inputs or not
self.stride = self.interval * self.down_sample
self.config = config
self.phases = self.get_phases()
def get_phases(self): # number of arteries
phases = []
with open(osp.join('../configs/{}'.format(self.config), self.mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'ordinate', 'image')
mask_path = osp.join(sample_path, artery, 'ordinate', 'mask')
phases.append((image_path, mask_path))
print("{} : {} samples".format(self.mode, len(phases)))
return phases
def __len__(self):
return len(self.phases)
def __getitem__(self, inx):
sample = self.phases[inx]
image_path, mask_path = sample
sample_name = '/'.join(image_path.split('/')[-4:-2])
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
n_sample = len(range(start, end + 1 - self.stride + self.down_sample)) # num of samples
for s_inx in range(start, end + 1 - self.stride + self.down_sample):
# whether multi-view data or not
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
# read multi-view slices and concatenate them together
for a_inx, axis_name in enumerate(axis_names):
image_path_axis = image_path.replace('ordinate', axis_name)
mask_path_axis = mask_path.replace('ordinate', axis_name)
slice_files_axis = [osp.join(image_path_axis, "{:03d}.tiff".format(i))
for i in range(s_inx, s_inx + self.stride, self.down_sample)]
label_files_axis = [osp.join(mask_path_axis, "{:03d}.tiff".format(i))
for i in range(s_inx, s_inx + self.stride, self.down_sample)]
image_axis = np.stack([io.imread(slice_file) for slice_file in slice_files_axis])
mask_axis = np.stack([io.imread(label_file) for label_file in label_files_axis])
if axis_name == 'applicate':
new_d, new_h, new_w = image_axis.shape
image = np.zeros((*image_axis.shape, len(axis_names)), dtype=np.int16)
image[:, :, :, a_inx] = image_axis
mask = mask_axis
else:
# if slice size doesn't match with each other, resize them into the same as applicate slice
for slice_inx in range(new_d):
slice_axis = image_axis[slice_inx]
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
image[slice_inx, :, :, a_inx] = slice_axis
image, mask = self.transform((image, mask))
mask = mask[self.interval // 2] # only remain the central slice
if s_inx == start: # the first mini-batch
if isinstance(image, list): # for Hyper DenseNet (this part can be omitted)
sample_img = [torch.zeros([n_sample, *list(image.size())]).float() for _ in range(len(image))]
else:
sample_img = torch.zeros([n_sample, *list(image.size())]).float()
sample_mask = torch.zeros([n_sample, *list(mask.size())]).long()
if isinstance(image, list):
for i in range(len(image)):
sample_img[i][s_inx-start] = image[i]
else:
sample_img[s_inx - start] = image
sample_mask[s_inx - start] = mask
return (sample_img, sample_mask, sample_name, start)
def read_train_data(data_dir, metric_prev_epoch = None, phases_prev_epoch = None, transform = None, mode = 'train',
is_hard_mining = False, percentile = 85, multi_view = False, interval=32, down_sample=1,
batch_size= 32, num_workers= 12, shuffle=True, config='config'):
""" read data for train/validation """
dataloaders = {}
phases = ['train', 'val', 'test'] if mode=='train' else ['test']
transform['val'] = transform['test']
for phase in phases:
cprplaque = CPRPlaqueTrainDataset(data_dir, metric_prev_epoch, phases_prev_epoch, transform[phase],
phase, is_hard_mining, percentile, multi_view, interval, down_sample, config)
dataloaders[phase] = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size= batch_size)
return dataloaders
def read_plot_data(data_dir, transform, plot_data, multi_view=False, interval=16, down_sample=1,
num_workers= 16, shuffle=False, config='config'):
""" read data for train/validation """
dataloaders = {}
transform['val'] = transform['test']
cprplaque = CPRPlaqueTestDataset(data_dir, transform[plot_data], plot_data, multi_view, interval, down_sample, config)
dataloaders[plot_data] = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size=1)
return dataloaders
def debug_dataloader(mode='train', config='config', shuffle=True, num_workers=16, batch_size=4):
""" show each data sample to verify the correctness of dataloader """
since = time.time()
# def worker_init_fn(worker_id):
# np.random.seed(np.random.get_state()[1][0] + worker_id)
# Please refer to https://qiita.com/chat-flip/items/4c0b71a7c0f5f6ae437f
torch.manual_seed(42) # for shuffle=True
data_dir = "/data/ugui0/antonio-t/CPR_multiview_interp2_huang"
cprplaque = CPRPlaqueTrainDataset(data_dir, mode=mode, config=config)
dataloader = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size=batch_size,
worker_init_fn=lambda x: random.seed(x))
paths_abbrev = []
paths_dir = "./data_samples"
if not osp.exists(paths_dir):
os.makedirs(paths_dir)
for i, sample in enumerate(dataloader):
path_abbrev = list(sample)
paths_abbrev += path_abbrev
with open(osp.join(paths_dir, "{}.txt".format(random.randint(1, 1000))), 'w') as writer:
for line in paths_abbrev:
writer.write("%s\n" % line)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
def show_train_dataloader():
""" show each data sample to verify the correctness of dataloader """
since = time.time()
# data_dir = "/home/mil/huang/Dataset/CPR_multiview"
data_dir = "/data/ugui0/antonio-t/CPR_multiview"
# data_dir = "/Users/AlbertHuang/CT_Anomaly_Detection/Plaque_CPR/20180213"
trans_params = {
'central_crop' : 192,
'random_crop' : (96, 96),
'rescale' : (96, 96),
'output_channel' : 2
}
composed = {'train': transforms.Compose([HU2Gray(),
RandomCentralCrop(),
RandomRotation(),
RandomFlip(),
Rescale(trans_params['rescale']),
Gray2InnerOuterBound() if trans_params['output_channel'] == 2 else Gray2Mask(),
# AddNoise(),
# RandomTranslate(),
ToTensor()]),
'test': transforms.Compose([HU2Gray(),
CentralCrop(160),
Rescale(trans_params['rescale']),
Gray2InnerOuterBound() if trans_params['output_channel'] == 2 else Gray2Mask(),
ToTensor()])}
# dataloaders = read_train_data(data_dir, None, None, composed, 'train', False, 85, True, interval=32,
# down_sample=1, batch_size=8, num_workers=8, shuffle=True)
dataloaders = read_plot_data(data_dir, composed, 'test', False, interval=15, down_sample=1, num_workers=8, shuffle=False)
num_pixel = np.zeros(5, dtype=np.uint32)
for inx in range(1):
datasizes = {'train':0, 'val':0}
# datasizes = {'test': 0}
for phase in ['train', 'val']:
# for phase in ['test']:
# des_path = osp.join(des_dir, phase, str(inx))
# if not osp.exists(des_path):
# os.makedirs(des_path)
for i, sample in enumerate(dataloaders[phase]):
image, mask = sample
print("image size: {}".format(image.size()))
print("mask size: {}".format(mask.size()))
if image.size(1) == 1:
image_np = image.squeeze(1).numpy()
else:
image_np = image[:, 0, ::].numpy()
mask_np = mask.numpy()
img_dir = "./data_samples/{}".format(i)
if not osp.exists(img_dir):
os.makedirs(img_dir)
image_name = "./data_samples/{}/image".format(i)
mask_name = "./data_samples/{}/mask".format(i)
sample_stack(image_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=image_name)
sample_stack(mask_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=mask_name)
# image, mask, _, _ = sample
datasizes[phase] += image.size(0)
for i, label in enumerate(mask.numpy()):
for j in range(trans_params['output_channel']):
num_pixel[j] += np.sum(label == j)
# print("Train: {}, Val: {}".format(datasizes['train'], datasizes['val']))
# class_freq = num_pixel / num_pixel.sum()
# class_weight = np.median(class_freq) / class_freq
# print("median frequency balancing: {}".format(class_weight))
# np.save("./class_weight_mfb.npy", class_weight)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
if __name__ == "__main__":
in_channels = 1
out_channels = 2
unet = ResUNet18(in_channels, out_channels, p=0.0)
print(unet)
x = torch.FloatTensor(6, 1, 31, 96, 96) # the smallest patch size is 16 * 16
y = unet(x)
show_plot_dataloader() | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/dataloader.py | .py | 20,977 | 452 | # _*_ coding: utf-8 _*_
""" functions used to load images and masks """
import matplotlib as mpl
mpl.use('Agg')
import os
import os.path as osp
from os import listdir
import random
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import time
from skimage import io
from skimage import transform
from .transforms import ToTensor, RandomCrop, GaussianCrop, HU2Gray, CentralCrop, Gray2Binary, Gray2Mask
from .transforms import RandomRotation, RandomFlip, RandomCentralCrop, Rescale
from .transforms import Gray2InnerOuterBound
from torchvision import transforms
from vision import sample_stack
torch.manual_seed(42) # for shuffle=True
class CPRPlaqueTrainDataset(Dataset):
""" dataloader of train and validation dataset.
Patches are randomly extracted within the central part of given volume
"""
def __init__(self, data_dir, metric_prev_epoch = None, phases_prev_epoch = None, transform = None, mode = 'train',
is_hard_mining = False, percentile = 85, multi_view = False, interval=32, down_sample=1, config='config'):
""" read images from img_dir and save them into a list
Args:
data_dir: string, from where to read image
transform: transform, what transforms to operate on input images
interval: int, interval of sub-volume
n_samples_art: int, how many samples to extract per artery
hard_mining: bool, whether use bad mining or not
metric_prev_epoch: numpy ndarray, metric obtained from the previous epoch
phases_prev_epoch: list, phases of the previous epoch
multi_view: whether use multi_view input or not
interval: int, how many slices in one batch volume
down_sample: int, down sampling rate (every how many slices)
"""
super(CPRPlaqueTrainDataset, self).__init__()
self.interval = interval
self.mode = mode
self.data_dir = data_dir
self.transform = transform
self.down_sample = down_sample
self.is_hard_mining = is_hard_mining
self.percentile = percentile
self.multi_view = multi_view # whether to use multi-view inputs or not
self.slice_range = self.interval * self.down_sample
self.config = config
# initialize phases for different modes
if self.mode == 'train':
self.phases = self.update_phases(metric_prev_epoch, phases_prev_epoch)
else:
self.phases = self.get_phases()
def update_phases(self, metric_prev_epoch, phases_prev_epoch):
""" update the phases by mining the bad samples
:return: phases: refined phases after mining the bad samples
"""
if phases_prev_epoch is None:
phases = self.get_phases()
else:
if self.is_hard_mining:
thres = np.percentile(metric_prev_epoch, self.percentile)
phases = [phase for phase, metric in zip(phases_prev_epoch, metric_prev_epoch) if metric <= thres]
else:
phases = phases_prev_epoch
return phases
def __len__(self):
return len(self.phases)
def get_phases(self):
phases = []
with open(osp.join('../configs/{}'.format(self.config), self.mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'ordinate', 'image')
mask_path = osp.join(sample_path, artery, 'ordinate', 'mask')
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
for s_inx in range(start, end + 1 - self.slice_range + self.down_sample):
phases.append((image_path, mask_path, s_inx))
print("{} : {} samples".format(self.mode, len(phases)))
return phases
def __getitem__(self, inx):
sample = self.phases[inx]
image_path, mask_path, rand_inx = sample
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
for a_inx, axis_name in enumerate(axis_names):
image_path_axis = image_path.replace('ordinate', axis_name)
mask_path_axis = mask_path.replace('ordinate', axis_name)
slice_files_axis = [osp.join(image_path_axis, "{:03d}.tiff".format(i))
for i in range(rand_inx, rand_inx + self.slice_range, self.down_sample)]
label_files_axis = [osp.join(mask_path_axis, "{:03d}.tiff".format(i))
for i in range(rand_inx, rand_inx + self.slice_range, self.down_sample)]
image_axis = np.stack([io.imread(slice_file) for slice_file in slice_files_axis])
mask_axis = np.stack([io.imread(label_file) for label_file in label_files_axis])
if axis_name == 'applicate':
new_d, new_h, new_w = image_axis.shape
image = np.zeros((*image_axis.shape, len(axis_names)), dtype=np.int16)
image[:, :, :, a_inx] = image_axis
mask = mask_axis
else:
# if slice size doesn't match with each other, resize them into the same as applicate slice
for s_inx in range(new_d):
slice_axis = image_axis[s_inx]
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
image[s_inx, :, :, a_inx] = slice_axis
# transform 3D image and mask
# here we have to define volume transform for image and 2D transform for mask
sample_img, sample_mask = self.transform((image, mask))
sample_central_mask = sample_mask[self.interval//2]
return (sample_img, sample_central_mask)
class CPRPlaqueTestDataset(Dataset):
""" dataloader for test dataset
the whole artery is extracted with given stride along applicate axis, then divide them into mini-batch for test
"""
def __init__(self, data_dir, transform = None, mode = 'train', multi_view = False, interval=32, down_sample=1,
config='config'):
""" read images from img_dir and save them into a list """
super(CPRPlaqueTestDataset, self).__init__()
self.interval = interval
self.mode = mode
self.data_dir = data_dir
self.transform = transform
self.down_sample = down_sample
self.multi_view = multi_view # whether to use multi-view inputs or not
self.stride = self.interval * self.down_sample
self.config = config
self.phases = self.get_phases()
def get_phases(self): # number of arteries
phases = []
with open(osp.join('../configs/{}'.format(self.config), self.mode + '.txt'), 'r') as reader:
samples = [line.strip('\n') for line in reader.readlines()]
for sample in samples:
sample_path = osp.join(self.data_dir, sample)
for artery in sorted(listdir(sample_path)):
# artery_path = osp.join(sample_path, artery)
image_path = osp.join(sample_path, artery, 'ordinate', 'image')
mask_path = osp.join(sample_path, artery, 'ordinate', 'mask')
phases.append((image_path, mask_path))
print("{} : {} samples".format(self.mode, len(phases)))
return phases
def __len__(self):
return len(self.phases)
def __getitem__(self, inx):
sample = self.phases[inx]
image_path, mask_path = sample
sample_name = '/'.join(image_path.split('/')[-4:-2])
# extract slice files
slice_files = sorted(
[file for file in listdir(image_path) if file.endswith('.tiff') and not file.startswith('.')])
start_file, end_file = slice_files[0], slice_files[-1]
start, end = int(start_file.split('.')[0]), int(end_file.split('.')[0])
n_sample = len(range(start, end + 1 - self.stride + self.down_sample)) # num of samples
for s_inx in range(start, end + 1 - self.stride + self.down_sample):
# whether multi-view data or not
if self.multi_view:
axis_names = ['applicate', 'abscissa', 'ordinate']
else:
axis_names = ['applicate']
# read multi-view slices and concatenate them together
for a_inx, axis_name in enumerate(axis_names):
image_path_axis = image_path.replace('ordinate', axis_name)
mask_path_axis = mask_path.replace('ordinate', axis_name)
slice_files_axis = [osp.join(image_path_axis, "{:03d}.tiff".format(i))
for i in range(s_inx, s_inx + self.stride, self.down_sample)]
label_files_axis = [osp.join(mask_path_axis, "{:03d}.tiff".format(i))
for i in range(s_inx, s_inx + self.stride, self.down_sample)]
image_axis = np.stack([io.imread(slice_file) for slice_file in slice_files_axis])
mask_axis = np.stack([io.imread(label_file) for label_file in label_files_axis])
if axis_name == 'applicate':
new_d, new_h, new_w = image_axis.shape
image = np.zeros((*image_axis.shape, len(axis_names)), dtype=np.int16)
image[:, :, :, a_inx] = image_axis
mask = mask_axis
else:
# if slice size doesn't match with each other, resize them into the same as applicate slice
for slice_inx in range(new_d):
slice_axis = image_axis[slice_inx]
if slice_axis.shape != (new_h, new_w):
slice_axis = transform.resize(slice_axis, (new_h, new_w), mode='reflect',
preserve_range=True).astype(np.int16)
image[slice_inx, :, :, a_inx] = slice_axis
image, mask = self.transform((image, mask))
mask = mask[self.interval // 2] # only remain the central slice
if s_inx == start: # the first mini-batch
if isinstance(image, list): # for Hyper DenseNet (this part can be omitted)
sample_img = [torch.zeros([n_sample, *list(image.size())]).float() for _ in range(len(image))]
else:
sample_img = torch.zeros([n_sample, *list(image.size())]).float()
sample_mask = torch.zeros([n_sample, *list(mask.size())]).long()
if isinstance(image, list):
for i in range(len(image)):
sample_img[i][s_inx-start] = image[i]
else:
sample_img[s_inx - start] = image
sample_mask[s_inx - start] = mask
return (sample_img, sample_mask, sample_name, start)
def read_train_data(data_dir, metric_prev_epoch = None, phases_prev_epoch = None, transform = None, mode = 'train',
is_hard_mining = False, percentile = 85, multi_view = False, interval=32, down_sample=1,
batch_size= 32, num_workers= 12, shuffle=True, config='config'):
""" read data for train/validation """
dataloaders = {}
phases = ['train', 'val', 'test'] if mode=='train' else ['test']
transform['val'] = transform['test']
for phase in phases:
cprplaque = CPRPlaqueTrainDataset(data_dir, metric_prev_epoch, phases_prev_epoch, transform[phase],
phase, is_hard_mining, percentile, multi_view, interval, down_sample, config)
dataloaders[phase] = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size= batch_size,
worker_init_fn=lambda x: random.seed(x))
return dataloaders
def read_plot_data(data_dir, transform, plot_data, multi_view=False, interval=16, down_sample=1,
num_workers= 16, shuffle=False, config='config'):
""" read data for train/validation """
dataloaders = {}
transform['val'] = transform['test']
cprplaque = CPRPlaqueTestDataset(data_dir, transform[plot_data], plot_data, multi_view, interval, down_sample, config)
dataloaders[plot_data] = DataLoader(dataset=cprplaque, shuffle=shuffle,
num_workers=num_workers, batch_size=1,
worker_init_fn=lambda x: random.seed(x))
return dataloaders
def show_plot_dataloader():
""" show each data sample to verify the correctness of dataloader """
since = time.time()
data_dir = "/home/mil/huang/Dataset/CPR_multiview"
# data_dir = "/data/ugui0/antonio-t/CPR_multiview"
# data_dir = "/Users/AlbertHuang/CT_Anomaly_Detection/Plaque_CPR/20180213"
trans_params = {
'central_crop' : 192,
'random_crop' : (96, 96),
'rescale' : (96, 96),
'output_channel' : 2
}
composed = {'train': transforms.Compose([HU2Gray(),
RandomCentralCrop(),
RandomRotation(),
RandomFlip(),
Rescale(trans_params['rescale']),
Gray2InnerOuterBound() if trans_params['output_channel'] == 2 else Gray2Mask(),
# AddNoise(),
# RandomTranslate(),
ToTensor()]),
'test': transforms.Compose([HU2Gray(),
CentralCrop(160),
Rescale(trans_params['rescale']),
Gray2InnerOuterBound() if trans_params['output_channel'] == 2 else Gray2Mask(),
ToTensor()])}
# dataloaders = read_train_data(data_dir, None, None, composed, 'train', False, 85, True, interval=32,
# down_sample=1, batch_size=8, num_workers=8, shuffle=True)
dataloaders = read_plot_data(data_dir, composed, 'test', False, interval=15, down_sample=1, num_workers=8, shuffle=False)
num_pixel = np.zeros(5, dtype=np.uint32)
for inx in range(1):
# datasizes = {'train':0, 'val':0}
datasizes = {'test': 0}
# for phase in ['train', 'val']:
for phase in ['test']:
# des_path = osp.join(des_dir, phase, str(inx))
# if not osp.exists(des_path):
# os.makedirs(des_path)
for i, sample in enumerate(dataloaders[phase]):
image, mask, sample_name, start = sample
image, mask = image.squeeze(0), mask.squeeze(0)
print("image size: {}".format(image.size()))
print("mask size: {}".format(mask.size()))
if image.size(1) == 1:
image_np = image.squeeze(1).numpy()
else:
image_np = image[:, 0, ::].numpy()
mask_np = mask.numpy()
img_dir = "./data_samples/{}".format(i)
if not osp.exists(img_dir):
os.makedirs(img_dir)
image_name = "./data_samples/{}/image".format(i)
mask_name = "./data_samples/{}/mask".format(i)
# sample_stack(image_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=image_name)
# sample_stack(mask_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=mask_name)
# # image, mask, _, _ = sample
# datasizes[phase] += image.size(0)
# for i, label in enumerate(mask.numpy()):
# for j in range(trans_params['output_channel']):
# num_pixel[j] += np.sum(label == j)
# print("Train: {}, Val: {}".format(datasizes['train'], datasizes['val']))
# class_freq = num_pixel / num_pixel.sum()
# class_weight = np.median(class_freq) / class_freq
# print("median frequency balancing: {}".format(class_weight))
# np.save("./class_weight_mfb.npy", class_weight)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
def show_train_dataloader():
""" show each data sample to verify the correctness of dataloader """
since = time.time()
# data_dir = "/home/mil/huang/Dataset/CPR_multiview"
data_dir = "/data/ugui0/antonio-t/CPR_multiview"
# data_dir = "/Users/AlbertHuang/CT_Anomaly_Detection/Plaque_CPR/20180213"
trans_params = {
'central_crop' : 192,
'random_crop' : (96, 96),
'rescale' : (96, 96),
'output_channel' : 2
}
composed = {'train': transforms.Compose([HU2Gray(),
RandomCentralCrop(),
RandomRotation(),
RandomFlip(),
Rescale(trans_params['rescale']),
Gray2InnerOuterBound() if trans_params['output_channel'] == 2 else Gray2Mask(),
# AddNoise(),
# RandomTranslate(),
ToTensor()]),
'test': transforms.Compose([HU2Gray(),
CentralCrop(160),
Rescale(trans_params['rescale']),
Gray2InnerOuterBound() if trans_params['output_channel'] == 2 else Gray2Mask(),
ToTensor()])}
# dataloaders = read_train_data(data_dir, None, None, composed, 'train', False, 85, True, interval=32,
# down_sample=1, batch_size=8, num_workers=8, shuffle=True)
dataloaders = read_plot_data(data_dir, composed, 'test', False, interval=15, down_sample=1, num_workers=8, shuffle=False)
num_pixel = np.zeros(5, dtype=np.uint32)
for inx in range(1):
datasizes = {'train':0, 'val':0}
# datasizes = {'test': 0}
for phase in ['train', 'val']:
# for phase in ['test']:
# des_path = osp.join(des_dir, phase, str(inx))
# if not osp.exists(des_path):
# os.makedirs(des_path)
for i, sample in enumerate(dataloaders[phase]):
image, mask = sample
print("image size: {}".format(image.size()))
print("mask size: {}".format(mask.size()))
if image.size(1) == 1:
image_np = image.squeeze(1).numpy()
else:
image_np = image[:, 0, ::].numpy()
mask_np = mask.numpy()
img_dir = "./data_samples/{}".format(i)
if not osp.exists(img_dir):
os.makedirs(img_dir)
image_name = "./data_samples/{}/image".format(i)
mask_name = "./data_samples/{}/mask".format(i)
sample_stack(image_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=image_name)
sample_stack(mask_np[0], rows=10, cols=10, start_with=0, show_every=2, scale=4, fig_name=mask_name)
# image, mask, _, _ = sample
datasizes[phase] += image.size(0)
for i, label in enumerate(mask.numpy()):
for j in range(trans_params['output_channel']):
num_pixel[j] += np.sum(label == j)
# print("Train: {}, Val: {}".format(datasizes['train'], datasizes['val']))
# class_freq = num_pixel / num_pixel.sum()
# class_weight = np.median(class_freq) / class_freq
# print("median frequency balancing: {}".format(class_weight))
# np.save("./class_weight_mfb.npy", class_weight)
time_elapsed = time.time() - since
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
time_elapsed // 3600, (time_elapsed % 3600) // 60, time_elapsed % 60))
if __name__ == "__main__":
show_plot_dataloader() | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/transforms.py | .py | 14,554 | 440 | # _*_ coding: utf-8 _*_
""" transforms for 3D volume """
import torch
from skimage import transform
import numpy as np
import random
import warnings
import cv2
from scipy import ndimage
from sklearn.preprocessing import label_binarize
from utils import hu2lut, gray2mask, central_crop, hu2lut, hu2gray
from utils import gray2bound, gray2mask, rgb2mask, gray2triplewithbound
from utils import gray2innerbound, gray2outerbound, gray2innerouterbound
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
class Gray2TripleWithBound(object):
""" convert grayscale value into triple segmentation + bound """
def __init__(self, n_classes=4, width=1): # number of classes after conversion
self.n_classes = n_classes
self.width = width
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
for l_inx, label in enumerate(gray):
mask[l_inx] = gray2triplewithbound(label, self.n_classes, self.width)
return image, mask
class Gray2InnerBound(object):
""" convert mask with grayscale value to inner bound """
def __init__(self, width=1):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
inner_bound = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
inner_bound[l_inx] = gray2innerbound(label, self.width)
return image, inner_bound
class Gray2OuterBound(object):
""" convert mask with grayscale value to inner bound """
def __init__(self, width=1):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
inner_bound = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
inner_bound[l_inx] = gray2outerbound(label, self.width)
return image, inner_bound
class Gray2InnerOuterBound(object):
""" convert mask with grayscale value to inner and outer boundaries
where inner and outer boundaries are treated as different classes
"""
def __init__(self, width=2):
self.width = width # boundary width
def __call__(self, sample):
image, gray = sample
bounds = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
bounds[l_inx] = gray2innerouterbound(label, self.width)
return image, bounds
class Gray2Bound(object):
""" convert mask with grayscale value to boundary """
def __init__(self, n_classes=3, width=2):
self.width = width # boundary width
self.n_classes = n_classes
def __call__(self, sample):
image, gray = sample
bounds = np.zeros_like(gray)
for l_inx, label in enumerate(gray):
bounds[l_inx] = gray2bound(label, self.n_classes, self.width)
return image, bounds
class RandomRotation(object):
""" random rotation (angle is randomly set as a multiplier of given angle) """
def __init__(self, angle=90, prob=0.8):
self.angle = angle
self.prob = prob
def __call__(self, sample):
image, mask = sample
rand_angle = random.randrange(0, 360, self.angle)
x = random.uniform(0, 1)
if x <= self.prob:
for s_inx, (slice, label) in enumerate(zip(image, mask)):
image[s_inx] = transform.rotate(slice, rand_angle, mode='reflect', preserve_range=True)
mask[s_inx] = transform.rotate(label, rand_angle, mode='reflect', preserve_range=True, order=0)
return (image, mask)
class RandomFlip(object):
""" random horizontal or vertical flip """
def __init__(self, prob=0.8):
self.prob = prob
def __call__(self, sample):
image, mask = sample
x = random.uniform(0, 1)
if x < self.prob:
phase = random.randint(0, 1)
for s_inx, (slice, label) in enumerate(zip(image, mask)):
image[s_inx] = np.flip(slice, phase)
mask[s_inx] = np.flip(label, phase)
return (image, mask)
class Rescale(object):
"""Rescale the image in a sample to a given size with range preserved
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2, "output_size should be a 2-dimensional tuple"
self.output_size = output_size
def __call__(self, sample):
image, mask = sample
h, w = image.shape[1:3]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
if image.ndim == 3:
new_image = np.zeros((len(image), new_h, new_w), dtype=image.dtype)
elif image.ndim == 4:
new_image = np.zeros((len(image), new_h, new_w, image.shape[3]), dtype=image.dtype)
new_mask = np.zeros((len(mask), new_h, new_w), dtype=mask.dtype)
for i, (slice, label) in enumerate(zip(image, mask)):
new_image[i] = transform.resize(slice, (new_h, new_w), mode= 'reflect', preserve_range=True)
new_mask[i] = transform.resize(label, (new_h, new_w), mode= 'reflect', preserve_range=True, order=0)
return new_image, new_mask
class RandomCrop(object):
""" Randomly crop the image
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, mask = sample
h, w = image.shape[1:3]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
# print("random crop position: {} {}".format(top, left))
image = image[:, top:top + new_h, left:left + new_w]
mask = mask[:, top:top + new_h, left:left + new_w]
return image, mask
class RGB2Mask(object):
""" convert 3D rgb annotation to 3D mask
red - (255, 0, 0) : low-density plaque --> 4
black - (0, 0, 0) : background --> 0
orange - (255, 128, 0) : calcification --> 3
white - (255, 255, 255) : Border of the artery (small in healthy patients) --> 2
blue - (0, 0, 255) : inside of the artery --> 1
"""
def __call__(self, sample):
image, rgb = sample
d, h, w = rgb.shape[:3]
mask = np.zeros((d, h, w), dtype=np.uint8)
mask[np.all(rgb == [255, 0, 0], axis=3)] = 4
mask[np.all(rgb == [255, 128, 0], axis=3)] = 3
mask[np.all(rgb == [255, 255, 255], axis=3)] = 2
mask[np.all(rgb == [0, 0, 255], axis=3)] = 1
return image, mask
class Gray2Mask(object):
""" convert gray-scale image to 2D mask
red - 76 : low-density plaque --> 4
black - 0 : background --> 0
orange - 151 : calcification --> 3
white - 255 : Border of the artery (small in healthy patients) --> 2
blue - 29 : inside of the artery --> 1
"""
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray == 76] = 4
mask[gray == 151] = 3
mask[gray == 255] = 2
mask[gray == 29] = 1
return image, mask
class Gray2Binary(object):
""" convert gray-scale image to Binary label mask """
def __call__(self, sample):
image, gray = sample
mask = np.zeros_like(gray, dtype=np.uint8)
mask[gray != 0] = 1
return image, mask
class HU2Gray(object):
def __init__(self, hu_max=1440.0, hu_min=-1024.0):
self.hu_max = hu_max
self.hu_min = hu_min
self.scale = float(255) / (self.hu_max - self.hu_min)
def __call__(self, sample):
""" convert HU value to gray scale [0, 255]
hu: numpy ndarray, Image of HU value, [H, W]
"""
image, mask = sample
image = (image - self.hu_min) * self.scale
return image, mask
class HU2LUT(object):
def __init__(self, window, level):
self.window = window
self.level = level
def __call__(self, sample):
data, mask = sample
lut = np.piecewise(data, [data <= (self.level - 0.5 - (self.window - 1) / 2),
data > (self.level - 0.5 + (self.window - 1) / 2)],
[0, 255, lambda data: ((data - (self.level - 0.5)) / (self.window - 1) + 0.5) * (255 - 0)])
return lut.astype(np.uint8), mask
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, norm=True):
self.norm = norm
def __call__(self, sample):
image, mask = sample
# swap color axis because (1) numpy image: H x W x C (2) torch image: C X H X W
if image.ndim == 4:
image = image.transpose((3, 0, 1, 2))
elif image.ndim == 3:
image = image[np.newaxis, :, :, :]
image = torch.from_numpy(image).float()
if self.norm:
image = image / 255.0
mask = torch.from_numpy(mask).long()
return image, mask
class HU2GrayMultiStreamToTensor(object):
""" convert HU value to grayscale for different windows + ToTensor """
def __init__(self, w_widths = [500.0, 100.0], w_centers = [250.0, 50.0], norm=True):
self.w_widths = w_widths
self.w_centers = w_centers
self.norm = norm
def __call__(self, sample):
image, mask = sample
if image.ndim == 4:
image = image.transpose((3, 0, 1, 2))
elif image.ndim == 3:
image = image[np.newaxis, :, :, :]
sample_img = []
for w_w, w_c in zip(self.w_widths, self.w_centers):
stream = hu2lut(image, w_w, w_c)
stream = torch.from_numpy(stream).float()
if self.norm:
stream = stream / 255.0
sample_img.append(stream)
sample_mask = torch.from_numpy(mask).long()
return sample_img, sample_mask
class Identical(object):
def __call__(self, sample):
return sample
class CentralCrop(object):
def __init__(self, size):
assert isinstance(size, (int, tuple))
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
""" centre crop the given image
Args:
sample : (image, mask)
size: tuple, new image size
"""
image, mask = sample
h, w = image.shape[1:3]
assert (h - self.size[0]) % 2 == 0 and (w - self.size[1]) % 2 == 0, \
"new image size must match with the input image size"
h_low, w_low = (h - self.size[0]) // 2, (w - self.size[1]) // 2
h_high, w_high = (h + self.size[0]) // 2, (w + self.size[1]) // 2
new_image = image[:, h_low:h_high, w_low:w_high]
new_mask = mask[:, h_low:h_high, w_low:w_high]
return new_image, new_mask
class GaussianCrop(object):
""" crop patches with central pixel position (x, y) obeying Guassian distribution """
def __init__(self, size, sigma=0.1):
assert isinstance(size, (int, tuple))
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.sigma = sigma
def __call__(self, sample):
""" centre crop the given image
Args:
sample : (image, mask)
"""
image, mask = sample
h, w = image.shape[1:3]
p_h, p_w = self.size
c_h = int(random.normalvariate(0.5, self.sigma) * h)
if c_h < p_h // 2:
c_h = p_h // 2
elif c_h > h - p_h // 2:
c_h = h - p_h // 2
c_w = int(random.normalvariate(0.5, self.sigma) * w)
if c_w < p_w // 2 :
c_w = p_w // 2
elif c_w > w - p_w // 2:
c_w = w - p_w // 2
new_image = image[:, c_h - p_h // 2:c_h + p_h // 2, c_w - p_w // 2:c_w + p_w // 2]
new_mask = mask[:, c_h - p_h // 2:c_h + p_h // 2, c_w - p_w // 2:c_w + p_w // 2]
return new_image, new_mask
class RandomCentralCrop(object):
""" randomly central crop with given size options """
def __init__(self, lower_size=160, upper_size=256, step=4):
assert lower_size%2 ==0 and upper_size%2==0, "both lower and upper size should be even number"
self.lower_size = lower_size
self.upper_size = upper_size
self.step = step
def __call__(self, sample):
x = random.randrange(self.lower_size, self.upper_size, self.step)
return CentralCrop(x)(sample)
class AddNoise(object):
""" add Gaussian noise to given sample """
def __init__(self, loc=0.0, scale=1.0, prob=0.5):
self.loc = loc
self.scale = scale
self.prob = prob
def __call__(self, sample):
image, mask = sample
x = random.uniform(0, 1)
if x <= self.prob:
noise = np.random.normal(self.loc, self.scale, image.shape)
image += noise
return image, mask
class RandomTranslate(object):
""" random translate the given image """
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, sample):
image, mask = sample
[H, W] = image.shape[1:3]
x = random.uniform(0, 1)
if x <= self.prob:
right = random.randint(int(-W/4), int(W/4))
down = random.randint(int(H/4), int(H/4))
M = np.float32([[1, 0, right], [0, 1, down]])
for i, (slice, label) in enumerate(zip(image, mask)):
image[i] = cv2.warpAffine(slice, M, (W, H))
mask[i] = cv2.warpAffine(label, M, (W, H))
return image, mask | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/models/__init__.py | .py | 0 | 0 | null | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/models/hybrid_res_unet.py | .py | 8,457 | 210 | # coding = utf-8
""" Hybrid Res-UNet architecture with regularization of number of predicted boundary pixels
the contract path is 3D while the expansion path is 2D.
For input, slices before and after current slice are concatenated as a volume.
For output, annotation of current slice is compared with the prediction (single slice)
"""
import torch
from torch import nn
from .utils import _initialize_weights_2d, _initialize_weights_3d
import torch.nn.functional as F
# 3D convolution
def conv_333(in_channels, out_channels, stride=1, padding=1):
# here only the X and Y directions are padded and no padding along Z direction
# in this way, we can make sure the central slice of the input volume will remain central
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=padding, bias=True)
class ResBlock3D(nn.Module):
""" residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm3d(in_channels)
padding = 1 if stride == 1 else (0, 1, 1)
self.conv1 = conv_333(in_channels, out_channels, stride=stride, padding=padding)
self.bn2 = nn.BatchNorm3d(out_channels)
self.conv2 = conv_333(out_channels, out_channels, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout3d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv3d(in_channels, out_channels,
kernel_size=3, stride=stride, bias=False, padding=padding),
nn.BatchNorm3d(out_channels)
)
def forward(self, x):
residual = x
# print("input residual size: {}".format(residual.size()))
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
# print("output residual size: {}".format(residual.size()))
# print("output size: {}".format(out.size()))
out += residual
return out
# 2D convolution
def conv_33(in_channels, out_channels, stride=1):
# since BN is used, bias is not necessary
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResBlock2D(nn.Module):
""" 2D residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = conv_33(in_channels, out_channels, stride=stride)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv2 = conv_33(out_channels, out_channels, stride=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout2d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.dp(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return out
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
""" skip is 3D volume and x is 2D slice, central slice of skip is concatenated with x """
central_inx = skip.size(2) // 2
skip_slice = skip[:, :, central_inx]
out = self.transconv(x)
out = torch.cat([skip_slice, out], 1)
return out
class ResUNet(nn.Module):
""" Res UNet class """
def __init__(self, in_channels=1, out_channels=5, n_slices=31, input_size=96, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=0.5):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.n_slices = n_slices
self.input_size = input_size
self.conv1 = nn.Conv3d(in_channels, self.down_blocks[0], 3, padding=1)
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks):
output_channel = self.down_blocks[b_inx]
if b_inx == 0:
input_channel = self.down_blocks[0]
self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=1, p=p))
else:
input_channel = self.down_blocks[b_inx-1]
self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=2, p=p))
# bottleneck block
# make sure there is only single one slice in current layer
self.bottleneck = ResBlock3D(self.down_blocks[-1], bottleneck, stride=2, p=p)
scale = 2 ** len(down_blocks)
self.conv_n11 = nn.Conv3d(bottleneck, bottleneck, kernel_size=(n_slices//scale, 1, 1))
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ResBlock2D(input_channel, output_channel, stride=1, p=p))
# final convolution layer
self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights_3d(self)
_initialize_weights_2d(self)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
# print(out.size())
skip_connections = []
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
# print(out.size())
out = self.bottleneck(out)
# if out.size(2) > 1:
out = self.conv_n11(out) # fuse several slices in the bottleneck layer
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
if b_inx == 0:
out = self.TransUpBlocks[b_inx](skip, out[:, :, 0])
else:
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
output = self.fl(out)
return output
def ResUNet28(in_channels, out_channels, n_slices=63, input_size=96, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,
down_blocks=[32, 64, 128, 256, 512], up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)
def ResUNet23(in_channels, out_channels, n_slices=31, input_size=96, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,
down_blocks=[32, 64, 128, 256], up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)
def ResUNet18(in_channels, out_channels, n_slices=15, input_size=96, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,
down_blocks=[32, 64, 128], up_blocks = [128, 64, 32], bottleneck = 256, p=p)
if __name__ == "__main__":
in_channels = 1
out_channels = 3
n_slices = 15
input_size = 96
unet = ResUNet18(in_channels, out_channels, n_slices=n_slices, input_size=input_size)
print(unet)
x = torch.FloatTensor(6, in_channels, n_slices, input_size, input_size) # the smallest patch size is 12 * 12
y = unet(x) | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/models/utils.py | .py | 1,072 | 32 | # _*_ coding: utf-8 _*_
from torch import nn
import math
import torch
torch.manual_seed(42) # make random weight fixed for every running
def count_parameters(model):
""" count number of parameters """
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def _initialize_weights_3d(model):
""" model weight initialization """
for m in model.modules():
if isinstance(m, (nn.Conv3d, nn.ConvTranspose3d)):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _initialize_weights_2d(model):
""" model weight initialization """
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| Python |
3D | kkhuang1990/PlaqueDetection | hybrid/models/hybrid_res_unet_reg.py | .py | 9,157 | 224 | # coding = utf-8
""" Hybrid Res-UNet architecture with regularization of number of predicted boundary pixels
the contract path is 3D while the expansion path is 2D.
For input, slices before and after current slice are concatenated as a volume.
For output, annotation of current slice is compared with the prediction (single slice)
"""
import torch
from torch import nn
from .utils import _initialize_weights_2d, _initialize_weights_3d
import torch.nn.functional as F
# 3D convolution
def conv_333(in_channels, out_channels, stride=1, padding=1):
# here only the X and Y directions are padded and no padding along Z direction
# in this way, we can make sure the central slice of the input volume will remain central
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=padding, bias=True)
class ResBlock3D(nn.Module):
""" residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm3d(in_channels)
padding = 1 if stride == 1 else (0, 1, 1)
self.conv1 = conv_333(in_channels, out_channels, stride=stride, padding=padding)
self.bn2 = nn.BatchNorm3d(out_channels)
self.conv2 = conv_333(out_channels, out_channels, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout3d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv3d(in_channels, out_channels,
kernel_size=3, stride=stride, bias=False, padding=padding),
nn.BatchNorm3d(out_channels)
)
def forward(self, x):
residual = x
# print("input residual size: {}".format(residual.size()))
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
# print("output residual size: {}".format(residual.size()))
# print("output size: {}".format(out.size()))
out += residual
return out
# 2D convolution
def conv_33(in_channels, out_channels, stride=1):
# since BN is used, bias is not necessary
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResBlock2D(nn.Module):
""" 2D residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = conv_33(in_channels, out_channels, stride=stride)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv2 = conv_33(out_channels, out_channels, stride=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout2d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.dp(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return out
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
""" skip is 3D volume and x is 2D slice, central slice of skip is concatenated with x """
central_inx = skip.size(2) // 2
skip_slice = skip[:, :, central_inx]
out = self.transconv(x)
out = torch.cat([skip_slice, out], 1)
return out
class ResUNet(nn.Module):
""" Res UNet class """
def __init__(self, in_channels=1, out_channels=5, n_slices=31, input_size=96, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=0.5):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.n_slices = n_slices
self.input_size = input_size
self.conv1 = nn.Conv3d(in_channels, self.down_blocks[0], 3, padding=1)
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks):
output_channel = self.down_blocks[b_inx]
if b_inx == 0:
input_channel = self.down_blocks[0]
self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=1, p=p))
else:
input_channel = self.down_blocks[b_inx-1]
self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=2, p=p))
# bottleneck block
# make sure there is only single one slice in current layer
self.bottleneck = ResBlock3D(self.down_blocks[-1], bottleneck, stride=2, p=p)
scale = 2 ** len(down_blocks)
self.conv_n11 = nn.Conv3d(bottleneck, bottleneck, kernel_size=(n_slices//scale, 1, 1))
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ResBlock2D(input_channel, output_channel, stride=1, p=p))
# final convolution layer
self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)
self.dim_reg = ((self.input_size // scale) ** 2) * bottleneck + (self.input_size ** 2) * out_channels
self.conv_reg = nn.Linear(self.dim_reg, 1)
# initialize weights
_initialize_weights_3d(self)
_initialize_weights_2d(self)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
# print(out.size())
skip_connections = []
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
# print(out.size())
out = self.bottleneck(out)
out = self.conv_n11(out) # fuse several slices in the bottleneck layer
assert out.size(2) == 1, "there should be only 1 slice in the output of bottleneck layer, but got {}"\
.format(out.size(2))
reg1 = out.view(out.size(0), -1)
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
if b_inx == 0:
out = self.TransUpBlocks[b_inx](skip, out[:, :, 0])
else:
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
# print(out.size())
output = self.fl(out)
reg2 = F.softmax(output, dim=1)
reg2 = reg2.view(reg2.size(0), -1) # with size [B, K]
reg = torch.cat([reg1, reg2], 1)
assert reg.size(1) == self.dim_reg, "tensor size should be the same as dim_reg"
# reg = torch.log(torch.exp(self.conv_reg(reg)) + 1)
reg = torch.squeeze(self.conv_reg(reg), 1)
return output, reg
def ResUNet28(in_channels, out_channels, n_slices=63, input_size=96, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,
down_blocks=[32, 64, 128, 256, 512], up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)
def ResUNet23(in_channels, out_channels, n_slices=31, input_size=96, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,
down_blocks=[32, 64, 128, 256], up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)
def ResUNet18(in_channels, out_channels, n_slices=15, input_size=96, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,
down_blocks=[32, 64, 128], up_blocks = [128, 64, 32], bottleneck = 256, p=p)
if __name__ == "__main__":
in_channels = 1
out_channels = 3
n_slices = 15
input_size = 96
unet = ResUNet18(in_channels, out_channels, n_slices=n_slices, input_size=input_size)
print(unet)
x = torch.FloatTensor(6, in_channels, n_slices, input_size, input_size) # the smallest patch size is 12 * 12
y = unet(x) | Python |
3D | kkhuang1990/PlaqueDetection | hybrid/models/hybrid_res_unet_bp.py | .py | 7,862 | 209 | # coding = utf-8
""" define the Hybrid Res-UNet structure in which the contract path is 3D while the expansion
path is 2D.
For input, slices before and after current slice are concatenated as a volume.
For output, annotation of current slice is compared with the prediction (single slice)
"""
import torch
from torch import nn
from .utils import _initialize_weights_2d, _initialize_weights_3d
# 3D convolution
def conv_333(in_channels, out_channels, stride=1, padding=1):
# here only the X and Y directions are padded and no padding along Z direction
# in this way, we can make sure the central slice of the input volume will remain central
return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=padding, bias=True)
class ResBlock3D(nn.Module):
""" residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm3d(in_channels)
padding = 1 if stride == 1 else (0, 1, 1)
self.conv1 = conv_333(in_channels, out_channels, stride=stride, padding=padding)
self.bn2 = nn.BatchNorm3d(out_channels)
self.conv2 = conv_333(out_channels, out_channels, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout3d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv3d(in_channels, out_channels,
kernel_size=3, stride=stride, bias=False, padding=padding),
nn.BatchNorm3d(out_channels)
)
def forward(self, x):
residual = x
# print("input residual size: {}".format(residual.size()))
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
# print("output residual size: {}".format(residual.size()))
# print("output size: {}".format(out.size()))
out += residual
return out
# 2D convolution
def conv_33(in_channels, out_channels, stride=1):
# since BN is used, bias is not necessary
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, bias=False)
class ResBlock2D(nn.Module):
""" 2D residual block """
def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):
super().__init__()
self.downsample = downsample
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = conv_33(in_channels, out_channels, stride=stride)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv2 = conv_33(out_channels, out_channels, stride=1)
self.relu = nn.ReLU(inplace=True)
self.dp = nn.Dropout2d(p=p)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.dp(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.dp(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
return out
class UpConv(nn.Module):
""" up convolution """
def __init__(self, in_channels, out_channels):
super().__init__()
self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,
stride=2, padding=0)
def forward(self, skip, x):
""" skip is 3D volume and x is 2D slice, central slice of skip is concatenated with x """
central_inx = skip.size(2) // 2
skip_slice = skip[:, :, central_inx]
out = self.transconv(x)
out = torch.cat([skip_slice, out], 1)
return out
class ResUNet(nn.Module):
""" UNet class """
def __init__(self, in_channels=1, out_channels=5, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=0.5):
super().__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
self.conv1 = nn.Conv3d(in_channels, self.down_blocks[0], 3, padding=1)
# contract path
self.BlocksDown = nn.ModuleList([])
for b_inx, down_block in enumerate(self.down_blocks):
output_channel = self.down_blocks[b_inx]
if b_inx == 0:
input_channel = self.down_blocks[0]
self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=1, p=p))
else:
input_channel = self.down_blocks[b_inx-1]
self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=2, p=p))
# bottleneck block
# make sure there is only single one slice in current layer
self.bottleneck = ResBlock3D(self.down_blocks[-1], bottleneck, stride=2, p=p)
# expansive path
self.BlocksUp = nn.ModuleList([])
self.TransUpBlocks = nn.ModuleList([])
for b_inx, up_block in enumerate(self.up_blocks):
input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]
output_channel = self.up_blocks[b_inx]
self.TransUpBlocks.append(UpConv(input_channel, output_channel))
self.BlocksUp.append(ResBlock2D(input_channel, output_channel, stride=1, p=p))
# final convolution layer
self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)
# initialize weights
_initialize_weights_3d(self)
_initialize_weights_2d(self)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
# print(out.size())
skip_connections = []
for down_block in self.BlocksDown:
out = down_block(out)
skip_connections.append(out)
# print(out.size())
out = self.bottleneck(out)
# print(out.size())
for b_inx in range(len(self.up_blocks)):
skip = skip_connections.pop()
if b_inx == 0:
n_slices = out.size(2)
out = self.TransUpBlocks[b_inx](skip, out[:, :, n_slices//2])
else:
out = self.TransUpBlocks[b_inx](skip, out)
out = self.BlocksUp[b_inx](out)
# print(out.size())
output = self.fl(out)
# print(output.size())
return output
def ResUNet28(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256, 512],
up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)
def ResUNet23(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128, 256],
up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)
def ResUNet18(in_channels, out_channels, p=0.0):
return ResUNet(in_channels=in_channels, out_channels=out_channels, down_blocks=[32, 64, 128],
up_blocks = [128, 64, 32], bottleneck = 256, p=p)
if __name__ == "__main__":
in_channels = 1
out_channels = 2
unet = ResUNet18(in_channels, out_channels, p=0.0)
print(unet)
x = torch.FloatTensor(6, 1, 15, 96, 96) # the smallest patch size is 16 * 16
y = unet(x) | Python |
3D | yuanqidu/LeftNet | main_md17.py | .py | 8,602 | 213 | from md17_dataset import MD17
from model import LEFTNet
import sys, os
import argparse
import os
import torch
from torch.optim import Adam,AdamW
from torch_geometric.data import DataLoader
from torch.autograd import grad
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR,ReduceLROnPlateau,CosineAnnealingLR
from tqdm import tqdm
def run(device, train_dataset, valid_dataset, test_dataset, model, loss_func, eval_steps=50, eval_start=0,
epochs=800, batch_size=4, vt_batch_size=32, lr=0.0005, lr_decay_factor=0.5, lr_decay_step_size=50, weight_decay=0,
energy_and_force=True, p=100, save_dir='models/', log_dir=''):
model = model.to(device)
num_params = sum(p.numel() for p in model.parameters())
print('num_parameters:', num_params)
optimizer = AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
scheduler = StepLR(optimizer, step_size=lr_decay_step_size, gamma=lr_decay_factor)
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
valid_loader = DataLoader(valid_dataset, vt_batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, vt_batch_size, shuffle=False)
best_valid = float('inf')
test_valid = float('inf')
start_epoch = 1
if save_dir != '':
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if log_dir != '':
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = SummaryWriter(log_dir=log_dir)
for epoch in range(start_epoch, epochs + 1):
print("=====Epoch {}".format(epoch), flush=True)
test_mae = float('inf')
train_mae = train(model, optimizer, train_loader, energy_and_force, p, loss_func, device)
valid_mae = val(model, valid_loader, energy_and_force, p, device)
if epoch > eval_start and epoch % eval_steps == 0:
print('Testing')
test_mae = val(model, test_loader, energy_and_force, p, device)
if log_dir != '':
writer.add_scalar('train_mae', train_mae, epoch)
writer.add_scalar('valid_mae', valid_mae, epoch)
writer.add_scalar('test_mae', test_mae, epoch)
if valid_mae < best_valid:
if epoch > eval_start and epoch % eval_steps != 0:
print('Testing')
test_mae = val(model, test_loader, energy_and_force, p, device)
best_valid = valid_mae
test_valid = test_mae
if save_dir != '':
print('Saving checkpoint...')
checkpoint = {'epoch': epoch, 'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(), 'best_valid_mae': best_valid,
'num_params': num_params}
torch.save(checkpoint, os.path.join(save_dir, 'valid_checkpoint.pt'))
print({'Train': train_mae, 'Validation': valid_mae, 'Test': test_mae, 'Best valid': best_valid})
scheduler.step()
print(f'Best validation MAE so far: {best_valid}')
print(f'Test MAE when got best validation result: {test_valid}')
if log_dir != '':
writer.close()
def train(model, optimizer, train_loader, energy_and_force, p, loss_func, device):
model.train()
loss_accum = 0
for step, batch_data in enumerate(tqdm(train_loader, disable=True)):
optimizer.zero_grad()
batch_data = batch_data.to(device)
out,forces = model(batch_data)
NUM_ATOM = batch_data.force.size()[0]
out = out * FORCE_MEAN_TOTAL + ENERGY_MEAN_TOTAL * NUM_ATOM
forces = forces * FORCE_MEAN_TOTAL
if energy_and_force:
force = -grad(outputs=out, inputs=batch_data.posc, grad_outputs=torch.ones_like(out), create_graph=True,
retain_graph=True)[0] + forces/1000
e_loss = loss_func(out, batch_data.y.unsqueeze(1))
f_loss = loss_func(force, batch_data.force)
loss = e_loss + p * f_loss
else:
loss = loss_func(out, batch_data.y.unsqueeze(1))
loss.backward()
optimizer.step()
loss_accum += loss.detach().cpu().item()
return loss_accum / (step + 1)
def val(model, data_loader, energy_and_force, p, device):
model.eval()
preds = torch.Tensor([]).to(device)
targets = torch.Tensor([]).to(device)
if energy_and_force:
preds_force = torch.Tensor([]).to(device)
targets_force = torch.Tensor([]).to(device)
for step, batch_data in enumerate(tqdm(data_loader, disable=True)):
batch_data = batch_data.to(device)
out, forces = model(batch_data)
out = out * FORCE_MEAN_TOTAL + ENERGY_MEAN_TOTAL * NUM_ATOM
forces = forces * FORCE_MEAN_TOTAL
if energy_and_force:
force = -grad(outputs=out, inputs=batch_data.posc, grad_outputs=torch.ones_like(out), create_graph=True,
retain_graph=True)[0] + forces/1000
if torch.sum(torch.isnan(force)) != 0:
mask = torch.isnan(force)
force = force[~mask].reshape((-1, 3))
batch_data.force = batch_data.force[~mask].reshape((-1, 3))
preds_force = torch.cat([preds_force, force.detach_()], dim=0)
targets_force = torch.cat([targets_force, batch_data.force], dim=0)
preds = torch.cat([preds, out.detach_()], dim=0)
targets = torch.cat([targets, batch_data.y.unsqueeze(1)], dim=0)
if energy_and_force:
energy_mae = torch.mean(torch.abs(preds - targets)).cpu().item()
force_mae = torch.mean(torch.abs(preds_force - targets_force)).cpu().item()
print({'Energy MAE': energy_mae, 'Force MAE': force_mae})
return energy_mae + p * force_mae
return torch.mean(torch.abs(preds - targets)).cpu().item()
parser = argparse.ArgumentParser(description='MD17')
parser.add_argument('--device', type=int, default=9)
parser.add_argument('--name', type=str, default='ethanol') #aspirin, benzene2017, ethanol, malonaldehyde, naphthalene, salicylic, toluene, uracil
parser.add_argument('--cutoff', type=float, default=8)
parser.add_argument('--num_layers', type=int, default=4)
parser.add_argument('--hidden_channels', type=int, default=200)
parser.add_argument('--num_radial', type=int, default=32)
parser.add_argument('--eval_steps', type=int, default=50)
parser.add_argument('--eval_start', type=int, default=500)
parser.add_argument('--epochs', type=int, default=1100)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--vt_batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=0.0004)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('--lr_decay_step_size', type=int, default=180)
parser.add_argument('--p', type=int, default=1000)
parser.add_argument('--save_dir', type=str, default='')
args = parser.parse_args()
print(args)
dataset = MD17(name=args.name, root = 'dataset/')
split_idx = dataset.get_idx_split(len(dataset.data.y), train_size=1000, valid_size=1000, seed=42)
y_mean = None
y_std = None
train_dataset, valid_dataset, test_dataset = dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']]
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
print('device',device)
y_mean = 0
y_std = 1
force_mean = 0
ENERGY_MEAN_TOTAL = 0
FORCE_MEAN_TOTAL = 0
NUM_ATOM = None
for data in train_dataset:
energy = data.y
force = data.force
NUM_ATOM = force.size()[0]
energy_mean = energy / NUM_ATOM
ENERGY_MEAN_TOTAL += energy_mean
force_rms = torch.sqrt(torch.mean(force.square()))
FORCE_MEAN_TOTAL += force_rms
ENERGY_MEAN_TOTAL /= len(train_dataset)
FORCE_MEAN_TOTAL /= len(train_dataset)
ENERGY_MEAN_TOTAL = ENERGY_MEAN_TOTAL.to(device)
FORCE_MEAN_TOTAL = FORCE_MEAN_TOTAL.to(device)
model = LEFTNet(pos_require_grad=True, cutoff=args.cutoff, num_layers=args.num_layers,
hidden_channels=args.hidden_channels, num_radial=args.num_radial,y_mean=y_mean, y_std=y_std)
loss_func = torch.nn.L1Loss()
run(device, train_dataset, valid_dataset, test_dataset, model, loss_func,
eval_steps=args.eval_steps, eval_start=args.eval_start,
epochs=args.epochs, batch_size=args.batch_size, vt_batch_size=args.vt_batch_size,
lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size,
p=args.p, save_dir=args.save_dir) | Python |
3D | yuanqidu/LeftNet | model.py | .py | 16,857 | 482 | import math
from math import pi
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import Embedding
from torch_geometric.nn import radius_graph
from torch_geometric.nn.conv import MessagePassing
from torch_scatter import scatter
def nan_to_num(vec, num=0.0):
idx = torch.isnan(vec)
vec[idx] = num
return vec
def _normalize(vec, dim=-1):
return nan_to_num(
torch.div(vec, torch.norm(vec, dim=dim, keepdim=True)))
def swish(x):
return x * torch.sigmoid(x)
## radial basis function to embed distances
class rbf_emb(nn.Module):
def __init__(self, num_rbf, soft_cutoff_upper, rbf_trainable=False):
super().__init__()
self.soft_cutoff_upper = soft_cutoff_upper
self.soft_cutoff_lower = 0
self.num_rbf = num_rbf
self.rbf_trainable = rbf_trainable
means, betas = self._initial_params()
self.register_buffer("means", means)
self.register_buffer("betas", betas)
def _initial_params(self):
start_value = torch.exp(torch.scalar_tensor(-self.soft_cutoff_upper))
end_value = torch.exp(torch.scalar_tensor(-self.soft_cutoff_lower))
means = torch.linspace(start_value, end_value, self.num_rbf)
betas = torch.tensor([(2 / self.num_rbf * (end_value - start_value))**-2] *
self.num_rbf)
return means, betas
def reset_parameters(self):
means, betas = self._initial_params()
self.means.data.copy_(means)
self.betas.data.copy_(betas)
def forward(self, dist):
dist=dist.unsqueeze(-1)
soft_cutoff = 0.5 * \
(torch.cos(dist * pi / self.soft_cutoff_upper) + 1.0)
soft_cutoff = soft_cutoff * (dist < self.soft_cutoff_upper).float()
return soft_cutoff*torch.exp(-self.betas * torch.square((torch.exp(-dist) - self.means)))
class NeighborEmb(MessagePassing):
def __init__(self, hid_dim: int):
super(NeighborEmb, self).__init__(aggr='add')
self.embedding = nn.Embedding(95, hid_dim)
self.hid_dim = hid_dim
def forward(self, z, s, edge_index, embs):
s_neighbors = self.embedding(z)
s_neighbors = self.propagate(edge_index, x=s_neighbors, norm=embs)
s = s + s_neighbors
return s
def message(self, x_j, norm):
return norm.view(-1, self.hid_dim) * x_j
class S_vector(MessagePassing):
def __init__(self, hid_dim: int):
super(S_vector, self).__init__(aggr='add')
self.hid_dim = hid_dim
self.lin1 = nn.Sequential(
nn.Linear(hid_dim, hid_dim),
nn.SiLU())
def forward(self, s, v, edge_index, emb):
s = self.lin1(s)
emb = emb.unsqueeze(1) * v
v = self.propagate(edge_index, x=s, norm=emb)
return v.view(-1, 3, self.hid_dim)
def message(self, x_j, norm):
x_j = x_j.unsqueeze(1)
a = norm.view(-1, 3, self.hid_dim) * x_j
return a.view(-1, 3 * self.hid_dim)
class EquiMessagePassing(MessagePassing):
def __init__(
self,
hidden_channels,
num_radial,
):
super(EquiMessagePassing, self).__init__(aggr="add", node_dim=0)
self.hidden_channels = hidden_channels
self.num_radial = num_radial
self.inv_proj = nn.Sequential(
nn.Linear(3 * self.hidden_channels + self.num_radial, self.hidden_channels * 3), nn.SiLU(inplace=True),
nn.Linear(self.hidden_channels * 3, self.hidden_channels * 3), )
self.x_proj = nn.Sequential(
nn.Linear(hidden_channels, hidden_channels),
nn.SiLU(),
nn.Linear(hidden_channels, hidden_channels * 3),
)
self.rbf_proj = nn.Linear(num_radial, hidden_channels * 3)
self.inv_sqrt_3 = 1 / math.sqrt(3.0)
self.inv_sqrt_h = 1 / math.sqrt(hidden_channels)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.x_proj[0].weight)
self.x_proj[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.x_proj[2].weight)
self.x_proj[2].bias.data.fill_(0)
nn.init.xavier_uniform_(self.rbf_proj.weight)
self.rbf_proj.bias.data.fill_(0)
def forward(self, x, vec, edge_index, edge_rbf, weight, edge_vector):
xh = self.x_proj(x)
rbfh = self.rbf_proj(edge_rbf)
weight = self.inv_proj(weight)
rbfh = rbfh * weight
# propagate_type: (xh: Tensor, vec: Tensor, rbfh_ij: Tensor, r_ij: Tensor)
dx, dvec = self.propagate(
edge_index,
xh=xh,
vec=vec,
rbfh_ij=rbfh,
r_ij=edge_vector,
size=None,
)
return dx, dvec
def message(self, xh_j, vec_j, rbfh_ij, r_ij):
x, xh2, xh3 = torch.split(xh_j * rbfh_ij, self.hidden_channels, dim=-1)
xh2 = xh2 * self.inv_sqrt_3
vec = vec_j * xh2.unsqueeze(1) + xh3.unsqueeze(1) * r_ij.unsqueeze(2)
vec = vec * self.inv_sqrt_h
return x, vec
def aggregate(
self,
features: Tuple[torch.Tensor, torch.Tensor],
index: torch.Tensor,
ptr: Optional[torch.Tensor],
dim_size: Optional[int],
) -> Tuple[torch.Tensor, torch.Tensor]:
x, vec = features
x = scatter(x, index, dim=self.node_dim, dim_size=dim_size)
vec = scatter(vec, index, dim=self.node_dim, dim_size=dim_size)
return x, vec
def update(
self, inputs: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
return inputs
class FTE(nn.Module):
def __init__(self, hidden_channels):
super().__init__()
self.hidden_channels = hidden_channels
self.equi_proj = nn.Linear(
hidden_channels, hidden_channels * 2, bias=False
)
self.xequi_proj = nn.Sequential(
nn.Linear(hidden_channels * 2, hidden_channels),
nn.SiLU(),
nn.Linear(hidden_channels, hidden_channels * 3),
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
self.inv_sqrt_h = 1 / math.sqrt(hidden_channels)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.equi_proj.weight)
nn.init.xavier_uniform_(self.xequi_proj[0].weight)
self.xequi_proj[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.xequi_proj[2].weight)
self.xequi_proj[2].bias.data.fill_(0)
def forward(self, x, vec, node_frame):
vec = self.equi_proj(vec)
vec1,vec2 = torch.split(
vec, self.hidden_channels, dim=-1
)
scalrization = torch.sum(vec1.unsqueeze(2) * node_frame.unsqueeze(-1), dim=1)
scalrization[:, 1, :] = torch.abs(scalrization[:, 1, :].clone())
scalar = torch.norm(vec1, dim=-2) # torch.sqrt(torch.sum(vec1 ** 2, dim=-2))
vec_dot = (vec1 * vec2).sum(dim=1)
vec_dot = vec_dot * self.inv_sqrt_h
x_vec_h = self.xequi_proj(
torch.cat(
[x, scalar], dim=-1
)
)
xvec1, xvec2, xvec3 = torch.split(
x_vec_h, self.hidden_channels, dim=-1
)
dx = xvec1 + xvec2 + vec_dot
dx = dx * self.inv_sqrt_2
dvec = xvec3.unsqueeze(1) * vec2
return dx, dvec
class aggregate_pos(MessagePassing):
def __init__(self, aggr='mean'):
super(aggregate_pos, self).__init__(aggr=aggr)
def forward(self, vector, edge_index):
v = self.propagate(edge_index, x=vector)
return v
class EquiOutput(nn.Module):
def __init__(self, hidden_channels):
super().__init__()
self.hidden_channels = hidden_channels
self.output_network = nn.ModuleList(
[
# GatedEquivariantBlock(
# hidden_channels,
# hidden_channels // 2,
# ),
GatedEquivariantBlock(hidden_channels, 1),
]
)
self.reset_parameters()
def reset_parameters(self):
for layer in self.output_network:
layer.reset_parameters()
def forward(self, x, vec):
for layer in self.output_network:
x, vec = layer(x, vec)
return vec.squeeze()
# Borrowed from TorchMD-Net
class GatedEquivariantBlock(nn.Module):
"""Gated Equivariant Block as defined in Schütt et al. (2021):
Equivariant message passing for the prediction of tensorial properties and molecular spectra
"""
def __init__(
self,
hidden_channels,
out_channels,
):
super(GatedEquivariantBlock, self).__init__()
self.out_channels = out_channels
self.vec1_proj = nn.Linear(
hidden_channels, hidden_channels, bias=False
)
self.vec2_proj = nn.Linear(hidden_channels, out_channels, bias=False)
self.update_net = nn.Sequential(
nn.Linear(hidden_channels * 2, hidden_channels),
nn.SiLU(),
nn.Linear(hidden_channels, out_channels * 2),
)
self.act = nn.SiLU()
def reset_parameters(self):
nn.init.xavier_uniform_(self.vec1_proj.weight)
nn.init.xavier_uniform_(self.vec2_proj.weight)
nn.init.xavier_uniform_(self.update_net[0].weight)
self.update_net[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.update_net[2].weight)
self.update_net[2].bias.data.fill_(0)
def forward(self, x, v):
vec1 = torch.norm(self.vec1_proj(v), dim=-2)
vec2 = self.vec2_proj(v)
x = torch.cat([x, vec1], dim=-1)
x, v = torch.split(self.update_net(x), self.out_channels, dim=-1)
v = v.unsqueeze(1) * vec2
x = self.act(x)
return x, v
class LEFTNet(torch.nn.Module):
r"""
LEFTNet
Args:
pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. (default: :obj:`False`)
cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`)
num_layers (int, optional): Number of building blocks. (default: :obj:`4`)
hidden_channels (int, optional): Hidden embedding size. (default: :obj:`128`)
num_radial (int, optional): Number of radial basis functions. (default: :obj:`32`)
y_mean (float, optional): Mean value of the labels of training data. (default: :obj:`0`)
y_std (float, optional): Standard deviation of the labels of training data. (default: :obj:`1`)
"""
def __init__(
self, pos_require_grad=False, cutoff=5.0, num_layers=4,
hidden_channels=128, num_radial=32, y_mean=0, y_std=1, **kwargs):
super(LEFTNet, self).__init__()
self.y_std = y_std
self.y_mean = y_mean
self.num_layers = num_layers
self.hidden_channels = hidden_channels
self.cutoff = cutoff
self.pos_require_grad = pos_require_grad
self.z_emb = Embedding(95, hidden_channels)
self.radial_emb = rbf_emb(num_radial, self.cutoff)
self.radial_lin = nn.Sequential(
nn.Linear(num_radial, hidden_channels),
nn.SiLU(inplace=True),
nn.Linear(hidden_channels, hidden_channels))
self.neighbor_emb = NeighborEmb(hidden_channels)
self.S_vector = S_vector(hidden_channels)
self.lin = nn.Sequential(
nn.Linear(3, hidden_channels // 4),
nn.SiLU(inplace=True),
nn.Linear(hidden_channels // 4, 1))
self.message_layers = nn.ModuleList()
self.FTEs = nn.ModuleList()
for _ in range(num_layers):
self.message_layers.append(
EquiMessagePassing(hidden_channels, num_radial).jittable()
)
self.FTEs.append(FTE(hidden_channels))
self.last_layer = nn.Linear(hidden_channels, 1)
if self.pos_require_grad:
self.out_forces = EquiOutput(hidden_channels)
# for node-wise frame
self.mean_neighbor_pos = aggregate_pos(aggr='mean')
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
self.reset_parameters()
def reset_parameters(self):
self.radial_emb.reset_parameters()
for layer in self.message_layers:
layer.reset_parameters()
for layer in self.FTEs:
layer.reset_parameters()
self.last_layer.reset_parameters()
for layer in self.radial_lin:
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
for layer in self.lin:
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
def forward(self, batch_data):
z, pos, batch = batch_data.z, batch_data.posc, batch_data.batch
if self.pos_require_grad:
pos.requires_grad_()
# embed z
z_emb = self.z_emb(z)
# construct edges based on the cutoff value
edge_index = radius_graph(pos, r=self.cutoff, batch=batch)
i, j = edge_index
# embed pair-wise distance
dist = torch.norm(pos[i]-pos[j], dim=-1)
# radial_emb shape: (num_edges, num_radial), radial_hidden shape: (num_edges, hidden_channels)
radial_emb = self.radial_emb(dist)
radial_hidden = self.radial_lin(radial_emb)
soft_cutoff = 0.5 * (torch.cos(dist * pi / self.cutoff) + 1.0)
radial_hidden = soft_cutoff.unsqueeze(-1) * radial_hidden
# init invariant node features
# shape: (num_nodes, hidden_channels)
s = self.neighbor_emb(z, z_emb, edge_index, radial_hidden)
# init equivariant node features
# shape: (num_nodes, 3, hidden_channels)
vec = torch.zeros(s.size(0), 3, s.size(1), device=s.device)
# bulid edge-wise frame
edge_diff = pos[i] - pos[j]
edge_diff = _normalize(edge_diff)
edge_cross = torch.cross(pos[i], pos[j])
edge_cross = _normalize(edge_cross)
edge_vertical = torch.cross(edge_diff, edge_cross)
# edge_frame shape: (num_edges, 3, 3)
edge_frame = torch.cat((edge_diff.unsqueeze(-1), edge_cross.unsqueeze(-1), edge_vertical.unsqueeze(-1)), dim=-1)
# build node-wise frame
mean_neighbor_pos = self.mean_neighbor_pos(pos, edge_index)
node_diff = pos - mean_neighbor_pos
node_diff = _normalize(node_diff)
node_cross = torch.cross(pos, mean_neighbor_pos)
node_cross = _normalize(node_cross)
node_vertical = torch.cross(node_diff, node_cross)
# node_frame shape: (num_nodes, 3, 3)
node_frame = torch.cat((node_diff.unsqueeze(-1), node_cross.unsqueeze(-1), node_vertical.unsqueeze(-1)), dim=-1)
# LSE: local 3D substructure encoding
# S_i_j shape: (num_nodes, 3, hidden_channels)
S_i_j = self.S_vector(s, edge_diff.unsqueeze(-1), edge_index, radial_hidden)
scalrization1 = torch.sum(S_i_j[i].unsqueeze(2) * edge_frame.unsqueeze(-1), dim=1)
scalrization2 = torch.sum(S_i_j[j].unsqueeze(2) * edge_frame.unsqueeze(-1), dim=1)
scalrization1[:, 1, :] = torch.abs(scalrization1[:, 1, :].clone())
scalrization2[:, 1, :] = torch.abs(scalrization2[:, 1, :].clone())
scalar3 = (self.lin(torch.permute(scalrization1, (0, 2, 1))) + torch.permute(scalrization1, (0, 2, 1))[:, :,
0].unsqueeze(2)).squeeze(-1)
scalar4 = (self.lin(torch.permute(scalrization2, (0, 2, 1))) + torch.permute(scalrization2, (0, 2, 1))[:, :,
0].unsqueeze(2)).squeeze(-1)
A_i_j = torch.cat((scalar3, scalar4), dim=-1) * soft_cutoff.unsqueeze(-1)
A_i_j = torch.cat((A_i_j, radial_hidden, radial_emb), dim=-1)
for i in range(self.num_layers):
# equivariant message passing
ds, dvec = self.message_layers[i](
s, vec, edge_index, radial_emb, A_i_j, edge_diff
)
s = s + ds
vec = vec + dvec
# FTE: frame transition encoding
ds, dvec = self.FTEs[i](s, vec, node_frame)
s = s + ds
vec = vec + dvec
if self.pos_require_grad:
forces = self.out_forces(s, vec)
s = self.last_layer(s)
s = scatter(s, batch, dim=0)
s = s * self.y_std + self.y_mean
if self.pos_require_grad:
return s, forces
return s
@property
def num_params(self):
return sum(p.numel() for p in self.parameters())
| Python |
3D | yuanqidu/LeftNet | qm9_dataset.py | .py | 7,456 | 174 | import os
import os.path as osp
import numpy as np
from tqdm import tqdm
import torch
from sklearn.utils import shuffle
from rdkit import Chem
from torch_geometric.data import Data, DataLoader, InMemoryDataset, download_url, extract_zip
HAR2EV = 27.211386246
KCALMOL2EV = 0.04336414
conversion = torch.tensor([
1., 1., HAR2EV, HAR2EV, HAR2EV, 1., HAR2EV, HAR2EV, HAR2EV, HAR2EV, HAR2EV,
1., KCALMOL2EV, KCALMOL2EV, KCALMOL2EV, KCALMOL2EV, 1., 1., 1.
])
types = {'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4}
class QM93D(InMemoryDataset):
r"""
A `Pytorch Geometric <https://pytorch-geometric.readthedocs.io/en/latest/index.html>`_ data interface for :obj:`QM9` dataset
which is from `"Quantum chemistry structures and properties of 134 kilo molecules" <https://www.nature.com/articles/sdata201422>`_ paper.
It connsists of about 130,000 equilibrium molecules with 12 regression targets:
:obj:`mu`, :obj:`alpha`, :obj:`homo`, :obj:`lumo`, :obj:`gap`, :obj:`r2`, :obj:`zpve`, :obj:`U0`, :obj:`U`, :obj:`H`, :obj:`G`, :obj:`Cv`.
Each molecule includes complete spatial information for the single low energy conformation of the atoms in the molecule.
.. note::
Based on the code of `QM9 in Pytorch Geometric <https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/datasets/qm9.html#QM9>`_.
Args:
root (string): the dataset folder will be located at root/qm9.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
Example:
--------
>>> dataset = QM93D()
>>> target = 'mu'
>>> dataset.data.y = dataset.data[target]
>>> split_idx = dataset.get_idx_split(len(dataset.data.y), train_size=110000, valid_size=10000, seed=42)
>>> train_dataset, valid_dataset, test_dataset = dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']]
>>> train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
>>> data = next(iter(train_loader))
>>> data
Batch(Cv=[32], G=[32], H=[32], U=[32], U0=[32], alpha=[32], batch=[579], gap=[32], homo=[32], lumo=[32], mu=[32], pos=[579, 3], ptr=[33], r2=[32], y=[32], z=[579], zpve=[32])
Where the attributes of the output data indicates:
* :obj:`z`: The atom type.
* :obj:`pos`: The 3D position for atoms.
* :obj:`y`: The target property for the graph (molecule).
* :obj:`batch`: The assignment vector which maps each node to its respective graph identifier and can help reconstructe single graphs
"""
def __init__(self, root = 'dataset/', transform = None, pre_transform = None, pre_filter = None):
self.raw_url = ('https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/'
'molnet_publish/qm9.zip')
self.raw_url2 = 'https://ndownloader.figshare.com/files/3195404'
self.folder = osp.join(root, 'qm9')
super(QM93D, self).__init__(self.folder, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return ['gdb9.sdf', 'gdb9.sdf.csv', 'uncharacterized.txt']
@property
def processed_file_names(self):
return 'qm9_pyg.pt'
def download(self):
file_path = download_url(self.raw_url, self.raw_dir)
extract_zip(file_path, self.raw_dir)
os.unlink(file_path)
file_path = download_url(self.raw_url2, self.raw_dir)
os.rename(osp.join(self.raw_dir, '3195404'),
osp.join(self.raw_dir, 'uncharacterized.txt'))
def process(self):
with open(self.raw_paths[1], 'r') as f:
target = [[float(x) for x in line.split(',')[1:20]]
for line in f.read().split('\n')[1:-1]]
y = torch.tensor(target, dtype=torch.float)
y = torch.cat([y[:, 3:], y[:, :3]], dim=-1)
y = y * conversion.view(1, -1)
with open(self.raw_paths[2], 'r') as f:
skip = [int(x.split()[0]) - 1 for x in f.read().split('\n')[9:-2]]
suppl = Chem.SDMolSupplier(self.raw_paths[0], removeHs=False,
sanitize=False)
data_list = []
for i, mol in enumerate(tqdm(suppl)):
if i in skip:
continue
conf = mol.GetConformer()
pos = conf.GetPositions()
pos = torch.tensor(pos, dtype=torch.float)
posc = pos - pos.mean(dim=0)
atomic_number = []
for atom in mol.GetAtoms():
atomic_number.append(atom.GetAtomicNum())
z = torch.tensor(atomic_number, dtype=torch.long)
data = Data(
z=z,
pos=pos,
posc=posc,
y=y[i].unsqueeze(0),
mu=y[i][0], alpha=y[i][1], homo=y[i][2], lumo=y[i][3], gap=y[i][4], r2=y[i][5], zpve=y[i][6], U0=y[i][7], U=y[i][12], H=y[i][13], G=y[i][14], Cv=y[i][15]
)
data_list.append(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def get_idx_split(self, data_size, train_size, valid_size, seed):
ids = shuffle(range(data_size), random_state=seed)
train_idx, val_idx, test_idx = torch.tensor(ids[:train_size]), torch.tensor(ids[train_size:train_size + valid_size]), torch.tensor(ids[train_size + valid_size:])
split_dict = {'train':train_idx, 'valid':val_idx, 'test':test_idx}
return split_dict
if __name__ == '__main__':
dataset = QM93D(root='dataset/')
print(dataset)
print(dataset.data.z.shape)
print(dataset.data.pos.shape)
target = 'mu'
dataset.data.y = dataset.data[target]
print(dataset.data.y.shape)
print(dataset.data.y)
print(dataset.data.mu)
split_idx = dataset.get_idx_split(len(dataset.data.y), train_size=110000, valid_size=10000, seed=42)
print(split_idx)
print(dataset[split_idx['train']])
train_dataset, valid_dataset, test_dataset = dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']]
train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)
data = next(iter(train_loader))
print(data)
| Python |
3D | yuanqidu/LeftNet | md17_dataset.py | .py | 5,674 | 127 | import os.path as osp
import numpy as np
from tqdm import tqdm
import torch
from sklearn.utils import shuffle
from torch_geometric.data import InMemoryDataset, download_url
from torch_geometric.data import Data, DataLoader
class MD17(InMemoryDataset):
r"""
A `Pytorch Geometric <https://pytorch-geometric.readthedocs.io/en/latest/index.html>`_ data interface for :obj:`MD17` dataset
which is from `"Machine learning of accurate energy-conserving molecular force fields" <https://advances.sciencemag.org/content/3/5/e1603015.short>`_ paper.
MD17 is a collection of eight molecular dynamics simulations for small organic molecules.
Args:
root (string): The dataset folder will be located at root/name.
name (string): The name of dataset. Available dataset names are as follows: :obj:`aspirin`, :obj:`benzene_old`, :obj:`ethanol`, :obj:`malonaldehyde`,
:obj:`naphthalene`, :obj:`salicylic`, :obj:`toluene`, :obj:`uracil`. (default: :obj:`benzene_old`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
Example:
--------
>>> dataset = MD17(name='aspirin')
>>> split_idx = dataset.get_idx_split(len(dataset.data.y), train_size=1000, valid_size=10000, seed=42)
>>> train_dataset, valid_dataset, test_dataset = dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']]
>>> train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
>>> data = next(iter(train_loader))
>>> data
Batch(batch=[672], force=[672, 3], pos=[672, 3], ptr=[33], y=[32], z=[672])
Where the attributes of the output data indicates:
* :obj:`z`: The atom type.
* :obj:`pos`: The 3D position for atoms.
* :obj:`y`: The property (energy) for the graph (molecule).
* :obj:`force`: The 3D force for atoms.
* :obj:`batch`: The assignment vector which maps each node to its respective graph identifier and can help reconstructe single graphs
"""
def __init__(self, root = 'dataset/', name = 'benzene2017', transform = None, pre_transform = None, pre_filter = None):
self.name = name
self.folder = osp.join(root, self.name)
self.url = 'http://quantum-machine.org/gdml/data/npz/md17_' + self.name + '.npz'
super(MD17, self).__init__(self.folder, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return 'md17_' + self.name + '.npz'
@property
def processed_file_names(self):
return self.name + '_pyg.pt'
def download(self):
download_url(self.url, self.raw_dir)
def process(self):
data = np.load(osp.join(self.raw_dir, self.raw_file_names))
E = data['E']
F = data['F']
R = data['R']
z = data['z']
data_list = []
for i in tqdm(range(len(E))):
R_i = torch.tensor(R[i],dtype=torch.float32)
z_i = torch.tensor(z,dtype=torch.int64)
E_i = torch.tensor(E[i],dtype=torch.float32)
F_i = torch.tensor(F[i],dtype=torch.float32)
center_i = R_i.mean(dim=0)
posc_i = R_i - center_i
data = Data(pos=R_i,posc=posc_i, z=z_i, y=E_i, force=F_i)
data_list.append(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def get_idx_split(self, data_size, train_size, valid_size, seed):
ids = shuffle(range(data_size), random_state=seed)
train_idx, val_idx, test_idx = torch.tensor(ids[:train_size]), torch.tensor(ids[train_size:train_size + valid_size]), torch.tensor(ids[train_size + valid_size:])
split_dict = {'train':train_idx, 'valid':val_idx, 'test':test_idx}
return split_dict
if __name__ == '__main__':
dataset = MD17(name='aspirin')
print(dataset)
print(dataset.data.z.shape)
print(dataset.data.pos.shape)
print(dataset.data.posc.shape)
print(dataset.data.y.shape)
print(dataset.data.force.shape)
split_idx = dataset.get_idx_split(len(dataset.data.y), train_size=1000, valid_size=1000, seed=42)
print(split_idx)
print(dataset[split_idx['train']])
train_dataset, valid_dataset, test_dataset = dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']]
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
data = next(iter(train_loader))
print(data) | Python |
3D | yuanqidu/LeftNet | main_qm9.py | .py | 7,283 | 176 | ### Based on the code in https://github.com/divelab/DIG/tree/dig-stable/dig/threedgraph
from qm9_dataset import QM93D
from model import LEFTNet
import argparse
import os
import torch
from torch.optim import Adam
from torch_geometric.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR, OneCycleLR, CosineAnnealingWarmRestarts
from tqdm import tqdm
import time
def run(device, train_dataset, valid_dataset, test_dataset, model, scheduler_name, loss_func, epochs=800, batch_size=32, vt_batch_size=32, lr=0.0005, lr_decay_factor=0.5, lr_decay_step_size=50, weight_decay=0,
save_dir='models/', log_dir='', disable_tqdm=False):
model = model.to(device)
num_params = sum(p.numel() for p in model.parameters())
print('num_parameters:', num_params)
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
valid_loader = DataLoader(valid_dataset, vt_batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, vt_batch_size, shuffle=False)
optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
if scheduler_name == 'steplr':
scheduler = StepLR(optimizer, step_size=lr_decay_step_size, gamma=lr_decay_factor)
elif scheduler_name == 'onecyclelr':
scheduler = OneCycleLR(optimizer, max_lr=lr, steps_per_epoch=len(train_loader), epochs=epochs)
best_valid = float('inf')
test_valid = float('inf')
if save_dir != '':
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if log_dir != '':
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = SummaryWriter(log_dir=log_dir)
start_epoch = 1
for epoch in range(start_epoch, epochs + 1):
print("=====Epoch {}".format(epoch), flush=True)
t_start = time.perf_counter()
train_mae = train(model, optimizer, scheduler, scheduler_name, train_loader, loss_func, device, disable_tqdm)
valid_mae = val(model, valid_loader, device, disable_tqdm)
test_mae = val(model, test_loader, device, disable_tqdm)
if log_dir != '':
writer.add_scalar('train_mae', train_mae, epoch)
writer.add_scalar('valid_mae', valid_mae, epoch)
writer.add_scalar('test_mae', test_mae, epoch)
if valid_mae < best_valid:
best_valid = valid_mae
test_valid = test_mae
if save_dir != '':
print('Saving checkpoint...')
checkpoint = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'best_valid_mae': best_valid, 'num_params': num_params}
torch.save(checkpoint, os.path.join(save_dir, 'valid_checkpoint.pt'))
t_end = time.perf_counter()
print({'Train': train_mae, 'Validation': valid_mae, 'Test': test_mae, 'Best valid': best_valid, 'Test@ best valid': test_valid, 'Duration': t_end-t_start})
if scheduler_name == 'steplr':
scheduler.step()
print(f'Best validation MAE so far: {best_valid}')
print(f'Test MAE when got best validation result: {test_valid}')
if log_dir != '':
writer.close()
def train(model, optimizer, scheduler, scheduler_name, train_loader, loss_func, device, disable_tqdm):
model.train()
loss_accum = 0
for step, batch_data in enumerate(tqdm(train_loader, disable=disable_tqdm)):
optimizer.zero_grad()
batch_data = batch_data.to(device)
out = model(batch_data)
loss = loss_func(out, batch_data.y.unsqueeze(1))
loss.backward()
optimizer.step()
if scheduler_name == 'onecyclelr':
scheduler.step()
loss_accum += loss.detach().cpu().item()
return loss_accum / (step + 1)
def val(model, data_loader, device, disable_tqdm):
model.eval()
preds = torch.Tensor([]).to(device)
targets = torch.Tensor([]).to(device)
for step, batch_data in enumerate(tqdm(data_loader, disable=disable_tqdm)):
batch_data = batch_data.to(device)
with torch.no_grad():
out = model(batch_data)
preds = torch.cat([preds, out.detach_()], dim=0)
targets = torch.cat([targets, batch_data.y.unsqueeze(1)], dim=0)
return torch.mean(torch.abs(preds - targets)).cpu().item()
parser = argparse.ArgumentParser(description='QM9')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--target', type=str, default='U0')
parser.add_argument('--train_size', type=int, default=110000)
parser.add_argument('--valid_size', type=int, default=10000)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--cutoff', type=float, default=5.0)
parser.add_argument('--num_radial', type=int, default=32)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--num_layers', type=int, default=4)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--vt_batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=0.0005)
parser.add_argument('--lr_decay_factor', type=float, default=0.5)
parser.add_argument('--lr_decay_step_size', type=int, default=150)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--save_dir', type=str, default='')
parser.add_argument('--disable_tqdm', default=False, action='store_true')
parser.add_argument('--scheduler', type=str, default='steplr')
parser.add_argument('--norm_label', default=False, action='store_true')
args = parser.parse_args()
print(args)
print(args.save_dir)
dataset = QM93D(root='dataset/')
target = args.target
dataset.data.y = dataset.data[target]
split_idx = dataset.get_idx_split(len(dataset.data.y), train_size=args.train_size, valid_size=args.valid_size, seed=args.seed)
train_dataset, valid_dataset, test_dataset = dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']]
print('train, validaion, test:', len(train_dataset), len(valid_dataset), len(test_dataset))
if args.norm_label:
y_mean = torch.mean(train_dataset.data.y).item()
y_std = torch.std(train_dataset.data.y).item()
print('y_mean, y_std:', y_mean, y_std)
else:
y_mean = 0
y_std = 1
model = LEFTNet(pos_require_grad=False, cutoff=args.cutoff, num_layers=args.num_layers,
hidden_channels=args.hidden_channels, num_radial=args.num_radial, y_mean=y_mean, y_std=y_std)
loss_func = torch.nn.L1Loss()
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
print('device',device)
model.to(device)
run(device=device,
train_dataset=train_dataset, valid_dataset=valid_dataset, test_dataset=test_dataset,
model=model, scheduler_name=args.scheduler, loss_func=loss_func,
epochs=args.epochs, batch_size=args.batch_size, vt_batch_size=args.batch_size,
lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size,
weight_decay=args.weight_decay,
save_dir=args.save_dir, log_dir=args.save_dir, disable_tqdm=args.disable_tqdm) | Python |
3D | chenz53/MIM-Med3D | train.sh | .sh | 94 | 6 | #!/usr/bin/env bash
MAIN_FILE=$1
CONFIG_FILE=$2
python3 $MAIN_FILE fit --config $CONFIG_FILE
| Shell |
3D | chenz53/MIM-Med3D | setup.py | .py | 206 | 8 | from setuptools import setup, find_packages
setup(
name="mim3d",
version="1.0",
description="Codes for Masked Image Modeling advances 3D Medical Image Modeling",
packages=find_packages(),
) | Python |
3D | chenz53/MIM-Med3D | slurm_train.sh | .sh | 406 | 8 | # For example, using AWS g5.48xlarge instance for slurm training 2 days
# brats data pretraining using SimMIM on t1ce modality
sbatch --ntasks-per-node=192 \
--partition=g5-on-demand \
--time=2-00:00:00 \
--gres=gpu:8 \
--constraint="[g5.48xlarge]" \
--wrap="sh train.sh code/experiments/ssl/simmim_pretrain_main.py code/configs/ssl/brats/vitsimmim_base_m0.75_t1ce.yaml" | Shell |
3D | chenz53/MIM-Med3D | code/metrics/ravd_metric.py | .py | 5,524 | 135 | from typing import Union
import warnings
import torch
from monai.metrics import CumulativeIterationMetric
from monai.metrics.utils import do_metric_reduction, ignore_background
from monai.utils import MetricReduction
class RavdMetric(CumulativeIterationMetric):
"""
Compute the relative absolute volume difference between the (joined) binary objects in the two tensors. It can support both multi-classes and multi-labels tasks.
Input `y_pred` is compared with ground truth `y`.
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format. You can use suitable transforms
in ``monai.transforms.post`` first to achieve binarized values.
The `include_background` parameter can be set to ``False`` for an instance of DiceLoss to exclude
the first category (channel index 0) which is by convention assumed to be background. If the non-background
segmentations are small compared to the total image size they can get overwhelmed by the signal from the
background so excluding it in such cases helps convergence.
`y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]).
Args:
include_background: whether to skip Dice computation on the first channel of
the predicted output. Defaults to ``True``.
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result. Defaults to ``"mean"``.
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
"""
def __init__(
self,
include_background: bool = True,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
get_not_nans: bool = False,
) -> None:
super().__init__()
self.include_background = include_background
self.reduction = reduction
self.get_not_nans = get_not_nans
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor): # type: ignore
"""
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute mean dice metric. It must be one-hot format and first dim is batch.
The values should be binarized.
Raises:
ValueError: when `y` is not a binarized tensor.
ValueError: when `y_pred` has less than three dimensions.
"""
if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor):
raise ValueError("y_pred and y must be PyTorch Tensor.")
if not torch.all(y_pred.byte() == y_pred):
warnings.warn("y_pred should be a binarized tensor.")
if not torch.all(y.byte() == y):
raise ValueError("y should be a binarized tensor.")
dims = y_pred.ndimension()
if dims < 3:
raise ValueError("y_pred should have at least three dimensions.")
# compute dice (BxC) for each channel for each batch
return compute_meanravd(
y_pred=y_pred,
y=y,
include_background=self.include_background,
)
def aggregate(self): # type: ignore
"""
Execute reduction logic for the output of `compute_meandice`.
"""
data = self.get_buffer()
if not isinstance(data, torch.Tensor):
raise ValueError("the data to aggregate must be PyTorch Tensor.")
# do metric reduction
f, not_nans = do_metric_reduction(data, self.reduction)
return (f, not_nans) if self.get_not_nans else f
def compute_meanravd(
y_pred: torch.Tensor,
y: torch.Tensor,
include_background: bool = True,
) -> torch.Tensor:
"""Compute the relative absolute volume difference between the (joined) binary objects.
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute mean dice metric. It must be one-hot format and first dim is batch.
The values should be binarized.
include_background: whether to skip Dice computation on the first channel of
the predicted output. Defaults to True.
Returns:
relative absolute volume difference per batch and per class, (shape [batch_size, n_classes]).
Raises:
ValueError: when `y_pred` and `y` have different shapes.
"""
if not include_background:
y_pred, y = ignore_background(
y_pred=y_pred,
y=y,
)
y = y.float()
y_pred = y_pred.float()
if y.shape != y_pred.shape:
raise ValueError("y_pred and y should have same shapes.")
# reducing only spatial dimensions (not batch nor channels)
n_len = len(y_pred.shape)
reduce_axis = list(range(2, n_len))
y_o = torch.sum(y, reduce_axis)
y_pred_o = torch.sum(y_pred, dim=reduce_axis)
return torch.where(
y_o > 0,
torch.abs(y_o - y_pred_o) / y_o,
torch.tensor(float("nan"), device=y_o.device),
)
| Python |
3D | chenz53/MIM-Med3D | code/metrics/__init__.py | .py | 36 | 2 | from .ravd_metric import RavdMetric
| Python |
3D | chenz53/MIM-Med3D | code/models/upernet_3d.py | .py | 6,509 | 211 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from itertools import chain
from typing import Sequence
def initialize_weights(*models):
for model in models:
for m in model.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight.data, nonlinearity="relu")
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1.0)
m.bias.data.fill_(1e-4)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.0001)
m.bias.data.zero_()
class PSPModule3D(nn.Module):
# In the original inmplementation they use precise RoI pooling
# Instead of using adaptative average pooling
def __init__(self, in_channels, bin_sizes=[1, 2, 4, 6]):
super(PSPModule3D, self).__init__()
out_channels = in_channels // len(bin_sizes)
self.stages = nn.ModuleList(
[self._make_stages(in_channels, out_channels, b_s) for b_s in bin_sizes]
)
self.bottleneck = nn.Sequential(
nn.Conv3d(
in_channels + (out_channels * len(bin_sizes)),
in_channels,
kernel_size=3,
padding=1,
bias=False,
),
nn.BatchNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Dropout3d(0.1),
)
def _make_stages(self, in_channels, out_channels, bin_sz):
prior = nn.AdaptiveAvgPool3d(output_size=bin_sz)
conv = nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=False)
bn = nn.BatchNorm3d(out_channels)
relu = nn.ReLU(inplace=True)
return nn.Sequential(prior, conv, bn, relu)
def forward(self, features):
d, h, w = features.size()[2], features.size()[3], features.size()[4]
pyramids = [features]
pyramids.extend(
[
F.interpolate(
stage(features),
size=(d, h, w),
mode="trilinear",
align_corners=True,
)
for stage in self.stages
]
)
output = self.bottleneck(torch.cat(pyramids, dim=1))
return output
def up_and_add(x, y):
return (
F.interpolate(
x,
size=(y.size(2), y.size(3), y.size(4)),
mode="trilinear",
align_corners=True,
)
+ y
)
class FPN_fuse3D(nn.Module):
def __init__(self, feature_channels=[256, 512, 1024, 2048], fpn_out=256):
super(FPN_fuse3D, self).__init__()
self.conv1x1 = nn.ModuleList(
[
nn.Conv3d(ft_size, fpn_out, kernel_size=1)
for ft_size in feature_channels[1:]
]
)
self.smooth_conv = nn.ModuleList(
[nn.Conv3d(fpn_out, fpn_out, kernel_size=3, padding=1)]
* (len(feature_channels) - 1)
)
self.conv_fusion = nn.Sequential(
nn.Conv3d(
len(feature_channels) * fpn_out,
fpn_out,
kernel_size=3,
padding=1,
bias=False,
),
nn.BatchNorm3d(fpn_out),
nn.ReLU(inplace=True),
)
def forward(self, features):
features[1:] = [
conv1x1(feature) for feature, conv1x1 in zip(features[1:], self.conv1x1)
]
P = [
up_and_add(features[i], features[i - 1])
for i in reversed(range(1, len(features)))
]
P = [smooth_conv(x) for smooth_conv, x in zip(self.smooth_conv, P)]
P = list(reversed(P))
P.append(features[-1]) # P = [P1, P2, P3, P4]
D, H, W = P[0].size(2), P[0].size(3), P[0].size(4)
P[1:] = [
F.interpolate(feature, size=(D, H, W), mode="trilinear", align_corners=True)
for feature in P[1:]
]
x = self.conv_fusion(torch.cat((P), dim=1))
return x
class UperNet3D(nn.Module):
# Implementing only the object path
def __init__(
self,
image_size: Sequence[int],
num_classes: int,
feature_channels: int = [64, 128, 256, 512],
fpn_out: int = 64,
freeze_bn: bool = False,
**_
):
super(UperNet3D, self).__init__()
self.image_size = image_size
# if backbone == "resnet34" or backbone == "resnet18":
# feature_channels = [64, 128, 256, 512]
# else:
# feature_channels = [256, 512, 1024, 2048]
assert feature_channels[0] == fpn_out
# self.backbone = ResNet(
# in_channels=in_channels,
# output_stride=output_stride,
# backbone=backbone,
# pretrained=pretrained,
# )
self.PPN = PSPModule3D(feature_channels[-1])
self.FPN = FPN_fuse3D(feature_channels, fpn_out=fpn_out)
self.head = nn.Conv3d(fpn_out, num_classes, kernel_size=3, padding=1)
if freeze_bn:
self.freeze_bn()
def forward(self, features):
# features = self.backbone(x)
# for feat in features:
# print(feat.size())
features[-1] = self.PPN(features[-1])
x = self.head(self.FPN(features))
x = F.interpolate(x, size=self.image_size, mode="trilinear", align_corners=True)
return x
# def get_backbone_params(self):
# return self.backbone.parameters()
def get_decoder_params(self):
return chain(
self.PPN.parameters(), self.FPN.parameters(), self.head.parameters()
)
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm3d):
module.eval()
if __name__ == "__main__":
# x = [
# torch.randn(2, 192, 24, 12, 12),
# torch.randn(2, 384, 24, 6, 6),
# torch.randn(2, 768, 32, 3, 3),
# torch.randn(2, 768, 32, 3, 3),
# ]
x = [
torch.randn(2, 256, 24, 12, 12),
torch.randn(2, 512, 24, 6, 6),
torch.randn(2, 1024, 24, 3, 3),
torch.randn(2, 1024, 24, 3, 3),
]
model = UperNet3D(
image_size=(96, 96, 96),
num_classes=14,
# feature_channels=[192, 384, 768, 768],
feature_channels=[256, 512, 1024, 1024],
# fpn_out=192,
fpn_out=256,
freeze_bn=False,
)
y = model(x)
print(y.size())
| Python |
3D | chenz53/MIM-Med3D | code/models/vit_3d.py | .py | 16,828 | 496 | import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
# comment out this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention3D(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
window_size=None,
attn_head_dim=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
# cls to token & token to cls & cls to cls
self.num_relative_distance = (2 * window_size[0] - 1) * (
2 * window_size[1] - 1
) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2,
dtype=relative_coords.dtype,
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat(
(
self.q_bias,
torch.zeros_like(self.v_bias, requires_grad=False),
self.v_bias,
)
)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.relative_position_bias_table is not None:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1,
-1,
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block3D(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
init_values=None,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
window_size=None,
attn_head_dim=None,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention3D(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
window_size=window_size,
attn_head_dim=attn_head_dim,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
if init_values is not None:
self.gamma_1 = nn.Parameter(
init_values * torch.ones((dim)), requires_grad=True
)
self.gamma_2 = nn.Parameter(
init_values * torch.ones((dim)), requires_grad=True
)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(
self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)
)
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed3D(nn.Module):
""" Image to Patch Embedding
"""
def __init__(
self, img_size=(96, 96, 96), patch_size=(16, 16, 16), in_chans=3, embed_dim=768
):
super().__init__()
# img_size = to_2tuple(img_size)
# patch_size = to_2tuple(patch_size)
num_patches = (
(img_size[2] // patch_size[2])
* (img_size[1] // patch_size[1])
* (img_size[0] // patch_size[0])
)
self.patch_shape = (
img_size[0] // patch_size[0],
img_size[1] // patch_size[1],
img_size[2] // patch_size[2],
)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv3d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x, **kwargs):
B, C, D, H, W = x.shape
# FIXME look at relaxing size constraints
assert (
D == self.img_size[0] and H == self.img_size[1] and W == self.img_size[2]
), f"Input image size ({D}*{H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}*{self.img_size[2]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (
2 * window_size[1] - 1
) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
def forward(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1,
-1,
) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer3D(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=(96, 96, 96),
patch_size=(16, 16, 16),
in_chans=1,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
init_values=None,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
use_mean_pooling=True,
init_scale=0.001,
):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed3D(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=self.patch_embed.patch_shape, num_heads=num_heads
)
else:
self.rel_pos_bias = None
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList(
[
Block3D(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
window_size=self.patch_embed.patch_shape
if use_rel_pos_bias
else None,
)
for i in range(depth)
]
)
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
self.head = (
nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
if self.pos_embed is not None:
self._trunc_normal_(self.pos_embed, std=0.02)
self._trunc_normal_(self.cls_token, std=0.02)
if num_classes > 0:
self._trunc_normal_(self.head.weight, std=0.02)
self.apply(self._init_weights)
self.fix_init_weight()
if num_classes > 0:
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def _trunc_normal_(self, tensor, mean=0.0, std=1.0):
trunc_normal_(tensor, mean=mean, std=std)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
self._trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
self._trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=""):
self.num_classes = num_classes
self.head = (
nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
def forward_features(self, x):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x = blk(x, rel_pos_bias=rel_pos_bias)
x = self.norm(x)
if self.fc_norm is not None:
t = x[:, 1:, :]
return self.fc_norm(t.mean(1))
else:
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
if __name__ == "__main__":
model = VisionTransformer3D(
img_size=(96, 96, 96),
patch_size=(16, 16, 16),
in_chans=1,
num_classes=0,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
init_values=None,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
use_mean_pooling=True,
init_scale=0.001,
)
x = torch.randn(1, 1, 96, 96, 96)
y = model(x)
print(y.size())
| Python |
3D | chenz53/MIM-Med3D | code/models/unetr.py | .py | 9,000 | 267 | from typing import Sequence, Tuple, Union
import torch.nn as nn
from monai.networks.blocks.dynunet_block import UnetOutBlock
from monai.networks.blocks.unetr_block import (
UnetrBasicBlock,
UnetrPrUpBlock,
UnetrUpBlock,
)
from monai.networks.nets.vit import ViT
from monai.utils import ensure_tuple_rep
from mmcv.runner import load_checkpoint
class UNETR(nn.Module):
"""
UNETR based on: "Hatamizadeh et al.,
UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
"""
def __init__(
self,
pretrained: Union[str, None],
in_channels: int,
out_channels: int,
img_size: Union[Sequence[int], int],
feature_size: int = 16,
hidden_size: int = 768,
mlp_dim: int = 3072,
num_layers: int = 12,
num_heads: int = 12,
pos_embed: str = "conv",
norm_name: Union[Tuple, str] = "instance",
conv_block: bool = True,
res_block: bool = True,
dropout_rate: float = 0.0,
spatial_dims: int = 3,
revise_keys=[],
) -> None:
"""
Args:
in_channels: dimension of input channels.
out_channels: dimension of output channels.
img_size: dimension of input image.
feature_size: dimension of network feature size.
hidden_size: dimension of hidden layer.
mlp_dim: dimension of feedforward layer.
num_heads: number of attention heads.
pos_embed: position embedding layer type.
norm_name: feature normalization type and arguments.
conv_block: bool argument to determine if convolutional block is used.
res_block: bool argument to determine if residual block is used.
dropout_rate: faction of the input units to drop.
spatial_dims: number of spatial dims.
Examples::
# for single channel input 4-channel output with image size of (96,96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=(96,96,96), feature_size=32, norm_name='batch')
# for single channel input 4-channel output with image size of (96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=96, feature_size=32, norm_name='batch', spatial_dims=2)
# for 4-channel input 3-channel output with image size of (128,128,128), conv position embedding and instance norm
>>> net = UNETR(in_channels=4, out_channels=3, img_size=(128,128,128), pos_embed='conv', norm_name='instance')
"""
super().__init__()
if not (0 <= dropout_rate <= 1):
raise ValueError("dropout_rate should be between 0 and 1.")
if hidden_size % num_heads != 0:
raise ValueError("hidden_size should be divisible by num_heads.")
self.pretrained = pretrained
self.num_layers = num_layers
assert self.num_layers // 4
self.stage_layers = self.num_layers // 4
img_size = ensure_tuple_rep(img_size, spatial_dims)
self.patch_size = ensure_tuple_rep(16, spatial_dims)
self.feat_size = tuple(
img_d // p_d for img_d, p_d in zip(img_size, self.patch_size)
)
self.hidden_size = hidden_size
self.classification = False
self.vit = ViT(
in_channels=in_channels,
img_size=img_size,
patch_size=self.patch_size,
hidden_size=hidden_size,
mlp_dim=mlp_dim,
num_layers=self.num_layers,
num_heads=num_heads,
pos_embed=pos_embed,
classification=self.classification,
dropout_rate=dropout_rate,
spatial_dims=spatial_dims,
)
self.encoder1 = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=in_channels,
out_channels=feature_size,
kernel_size=3,
stride=1,
norm_name=norm_name,
res_block=res_block,
)
self.encoder2 = UnetrPrUpBlock(
spatial_dims=spatial_dims,
in_channels=hidden_size,
out_channels=feature_size * 2,
num_layer=2,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.encoder3 = UnetrPrUpBlock(
spatial_dims=spatial_dims,
in_channels=hidden_size,
out_channels=feature_size * 4,
num_layer=1,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.encoder4 = UnetrPrUpBlock(
spatial_dims=spatial_dims,
in_channels=hidden_size,
out_channels=feature_size * 8,
num_layer=0,
kernel_size=3,
stride=1,
upsample_kernel_size=2,
norm_name=norm_name,
conv_block=conv_block,
res_block=res_block,
)
self.decoder5 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=hidden_size,
out_channels=feature_size * 8,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder4 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size * 8,
out_channels=feature_size * 4,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder3 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size * 4,
out_channels=feature_size * 2,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.decoder2 = UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_size * 2,
out_channels=feature_size,
kernel_size=3,
upsample_kernel_size=2,
norm_name=norm_name,
res_block=res_block,
)
self.out = UnetOutBlock(
spatial_dims=spatial_dims,
in_channels=feature_size,
out_channels=out_channels,
)
self.proj_axes = (0, spatial_dims + 1) + tuple(
d + 1 for d in range(spatial_dims)
)
self.proj_view_shape = list(self.feat_size) + [self.hidden_size]
self.init_weights(revise_keys=revise_keys)
def proj_feat(self, x):
new_view = [x.size(0)] + self.proj_view_shape
x = x.view(new_view)
x = x.permute(self.proj_axes).contiguous()
return x
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
print("load checkpoints from {}".format(self.pretrained))
load_checkpoint(
self,
filename=self.pretrained,
# map_location="cpu",
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
pass
else:
raise TypeError("pretrained must be a str or None")
def forward(self, x_in):
x, hidden_states_out = self.vit(x_in)
enc1 = self.encoder1(x_in)
x2 = hidden_states_out[self.stage_layers * 1]
enc2 = self.encoder2(self.proj_feat(x2))
x3 = hidden_states_out[self.stage_layers * 2]
enc3 = self.encoder3(self.proj_feat(x3))
x4 = hidden_states_out[self.stage_layers * 3]
enc4 = self.encoder4(self.proj_feat(x4))
dec4 = self.proj_feat(x)
dec3 = self.decoder5(dec4, enc4)
dec2 = self.decoder4(dec3, enc3)
dec1 = self.decoder3(dec2, enc2)
out = self.decoder2(dec1, enc1)
return self.out(out)
if __name__ == "__main__":
import torch
x = torch.randn(1, 1, 96, 96, 96)
model = UNETR(
pretrained=None,
in_channels=1,
out_channels=14,
img_size=(96, 96, 96),
feature_size=16,
hidden_size=1024,
mlp_dim=4096,
num_layers=24,
num_heads=16,
pos_embed="perceptron",
norm_name="instance",
conv_block=True,
res_block=True,
dropout_rate=0.0,
spatial_dims=3,
revise_keys=[],
)
y = model(x)
print(y.shape)
| Python |
3D | chenz53/MIM-Med3D | code/models/vitautoenc.py | .py | 6,619 | 193 | from typing import Sequence, Union
import math
import torch
import torch.nn as nn
from monai.networks.blocks.patchembedding import PatchEmbeddingBlock
from monai.networks.blocks.transformerblock import TransformerBlock
from monai.networks.layers import Conv
from monai.utils import ensure_tuple_rep
from timm.models.layers import trunc_normal_
__all__ = ["ViTAutoEnc"]
class ViTAutoEnc(nn.Module):
"""
Vision Transformer (ViT), based on: "Dosovitskiy et al.,
An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>"
Modified to also give same dimension outputs as the input size of the image
"""
def __init__(
self,
pretrained: Union[str, None],
in_channels: int,
img_size: Union[Sequence[int], int],
patch_size: Union[Sequence[int], int],
out_channels: int = 1,
deconv_chns: int = 16,
hidden_size: int = 768,
mlp_dim: int = 3072,
num_layers: int = 12,
num_heads: int = 12,
pos_embed: str = "conv",
dropout_rate: float = 0.0,
spatial_dims: int = 3,
revise_keys=[("model.", "")],
) -> None:
"""
Args:
in_channels: dimension of input channels or the number of channels for input
img_size: dimension of input image.
patch_size: dimension of patch size.
hidden_size: dimension of hidden layer.
out_channels: number of output channels.
deconv_chns: number of channels for the deconvolution layers.
mlp_dim: dimension of feedforward layer.
num_layers: number of transformer blocks.
num_heads: number of attention heads.
pos_embed: position embedding layer type.
dropout_rate: faction of the input units to drop.
spatial_dims: number of spatial dimensions.
Examples::
# for single channel input with image size of (96,96,96), conv position embedding and segmentation backbone
# It will provide an output of same size as that of the input
>>> net = ViTAutoEnc(in_channels=1, patch_size=(16,16,16), img_size=(96,96,96), pos_embed='conv')
# for 3-channel with image size of (128,128,128), output will be same size as of input
>>> net = ViTAutoEnc(in_channels=3, patch_size=(16,16,16), img_size=(128,128,128), pos_embed='conv')
"""
super().__init__()
self.pretrained = pretrained
self.patch_size = ensure_tuple_rep(patch_size, spatial_dims)
self.spatial_dims = spatial_dims
self.patch_embedding = PatchEmbeddingBlock(
in_channels=in_channels,
img_size=img_size,
patch_size=patch_size,
hidden_size=hidden_size,
num_heads=num_heads,
pos_embed=pos_embed,
dropout_rate=dropout_rate,
spatial_dims=self.spatial_dims,
)
self.blocks = nn.ModuleList(
[
TransformerBlock(hidden_size, mlp_dim, num_heads, dropout_rate)
for i in range(num_layers)
]
)
self.norm = nn.LayerNorm(hidden_size)
new_patch_size = [4] * self.spatial_dims
conv_trans = Conv[Conv.CONVTRANS, self.spatial_dims]
# self.conv3d_transpose* is to be compatible with existing 3d model weights.
self.conv3d_transpose = conv_trans(
hidden_size, deconv_chns, kernel_size=new_patch_size, stride=new_patch_size
)
self.conv3d_transpose_1 = conv_trans(
in_channels=deconv_chns,
out_channels=out_channels,
kernel_size=new_patch_size,
stride=new_patch_size,
)
self.init_weights(revise_keys=revise_keys)
def forward(self, x):
"""
Args:
x: input tensor must have isotropic spatial dimensions,
such as ``[batch_size, channels, sp_size, sp_size[, sp_size]]``.
"""
# spatial_size = x.shape[2:]
# x = self.patch_embedding(x)
# hidden_states_out = []
# for blk in self.blocks:
# x = blk(x)
# hidden_states_out.append(x)
# x = self.norm(x)
# x = x.transpose(1, 2)
# d = [s // p for s, p in zip(spatial_size, self.patch_size)]
# x = torch.reshape(x, [x.shape[0], x.shape[1], *d])
x = self.conv3d_transpose(x)
x = self.conv3d_transpose_1(x)
# return x, hidden_states_out
return x
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv3d):
fan_out = (
m.kernel_size[0]
* m.kernel_size[1]
* m.kernel_size[2]
* m.out_channels
)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
load_checkpoint(
self,
filename=self.pretrained,
map_location=torch.device("cpu"),
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError("pretrained must be a str or None")
if __name__ == "__main__":
x = torch.randn(1, 768, 24, 3, 3)
model = ViTAutoEnc(
pretrained=None,
in_channels=1,
img_size=(96, 96, 96),
patch_size=(16, 16, 16),
out_channels=1,
deconv_chns=16,
hidden_size=768,
mlp_dim=3072,
num_layers=12,
num_heads=12,
pos_embed="conv",
dropout_rate=0.0,
spatial_dims=3,
)
y = model(x)
print(y.size())
| Python |
3D | chenz53/MIM-Med3D | code/models/simmim.py | .py | 12,139 | 373 | from typing import Union, Sequence
import torch
from torch import nn
import torch.nn.functional as F
from einops import repeat
from .swin_3d import SwinTransformer3D
from monai.networks.layers import Conv
from monai.networks.nets import ViT
from mmcv.runner import load_checkpoint
from timm.models.layers import DropPath, trunc_normal_
class ViTSimMIM(nn.Module):
def __init__(
self,
pretrained: Union[str, None],
in_channels: int,
img_size: Union[Sequence[int], int],
patch_size: Union[Sequence[int], int],
hidden_size: int = 768,
mlp_dim: int = 3072,
num_layers: int = 12,
num_heads: int = 12,
pos_embed: str = "perceptron",
dropout_rate: float = 0.0,
spatial_dims: int = 3,
masking_ratio: float = 0.5,
revise_keys=[("model.", "")],
**kwargs,
):
super().__init__()
self.pretrained = pretrained
self.spatial_dims = spatial_dims
assert (
masking_ratio > 0 and masking_ratio < 1
), "masking ratio must be kept between 0 and 1"
self.masking_ratio = masking_ratio
self.encoder = ViT(
in_channels=in_channels,
img_size=img_size,
patch_size=patch_size,
hidden_size=hidden_size,
mlp_dim=mlp_dim,
num_layers=num_layers,
num_heads=num_heads,
pos_embed=pos_embed,
dropout_rate=dropout_rate,
spatial_dims=spatial_dims,
)
# patch embedding block
self.to_patch, self.patch_to_emb = self.encoder.patch_embedding.patch_embeddings
n_patches = self.encoder.patch_embedding.n_patches
patch_dim = self.encoder.patch_embedding.patch_dim
# simple linear head
self.mask_token = nn.Parameter(torch.randn(hidden_size))
self.to_pixels = nn.Linear(hidden_size, patch_dim)
self.init_weights(revise_keys=revise_keys)
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
load_checkpoint(
self,
filename=self.pretrained,
map_location=torch.device("cpu"),
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError("pretrained must be a path(str) or None")
def forward(self, img):
device = img.device
# get patches
patches = self.to_patch(img)
batch, num_patches, *_ = patches.shape
# patch to encoder tokens and add positions
tokens = self.patch_to_emb(patches)
tokens = tokens + self.encoder.patch_embedding.position_embeddings
# for indexing purposes
batch_range = torch.arange(batch, device=device)[:, None]
# prepare mask tokens
mask_tokens = repeat(self.mask_token, "d -> b n d", b=batch, n=num_patches)
mask_tokens = mask_tokens + self.encoder.patch_embedding.position_embeddings
# calculate of patches needed to be masked, and get positions (indices) to be masked
num_masked = int(self.masking_ratio * num_patches)
masked_indices = (
torch.rand(batch, num_patches, device=device)
.topk(k=num_masked, dim=-1)
.indices
)
masked_bool_mask = (
torch.zeros((batch, num_patches), device=device)
.scatter_(-1, masked_indices, 1)
.bool()
)
# mask tokens
tokens = torch.where(masked_bool_mask[..., None], mask_tokens, tokens)
# attend with vision transformer
for blk in self.encoder.blocks:
tokens = blk(tokens)
encoded = tokens
# get the masked tokens
encoded_mask_tokens = encoded[batch_range, masked_indices]
# small linear projection for predicted pixel values
pred_pixel_values = self.to_pixels(encoded_mask_tokens)
# # get the masked patches for the final reconstruction loss
# masked_patches = patches[batch_range, masked_indices]
# # calculate reconstruction loss
# recon_loss = F.l1_loss(pred_pixel_values, masked_patches) / num_masked
return pred_pixel_values, patches, batch_range, masked_indices
class SwinSimMIM(nn.Module):
def __init__(
self,
pretrained: Union[None, str],
patch_size: Sequence[int] = (4, 4, 4),
in_chans: int = 1,
embed_dim: int = 96,
depths: Sequence[int] = [2, 2, 6, 2],
num_heads: Sequence[int] = [3, 6, 12, 24],
window_size: Sequence[int] = (2, 7, 7),
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
qk_scale: Union[None, bool] = None,
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.2,
norm_layer=nn.LayerNorm,
patch_norm: bool = False,
frozen_stages: int = -1,
use_checkpoint: bool = False,
masking_ratio: float = 0.5,
revise_keys=[("model.", "")],
**kwargs,
):
super().__init__()
self.pretrained = pretrained
self.patch_size = patch_size
assert (
masking_ratio > 0 and masking_ratio < 1
), "masking ratio must be kept between 0 and 1"
self.masking_ratio = masking_ratio
self.encoder = SwinTransformer3D(
pretrained=pretrained,
pretrained2d=False,
patch_size=patch_size,
in_chans=in_chans,
num_classes=0,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
norm_layer=norm_layer,
patch_norm=patch_norm,
frozen_stages=frozen_stages,
use_checkpoint=use_checkpoint,
)
# patch embedding block
num_features = self.encoder.num_features
num_layers = self.encoder.num_layers
final_resolution = self.encoder.final_resolution
self.num_features = num_features
self.num_layers = num_layers
self.final_resolution = self.encoder.final_resolution
# masked tokens
self.mask_token = nn.Parameter(torch.randn(embed_dim))
# simple linear head
conv_trans = Conv[Conv.CONVTRANS, 3]
self.conv3d_transpose = conv_trans(
num_features,
16,
kernel_size=(
self.patch_size[0],
2 ** (num_layers - 1),
2 ** (num_layers - 1),
),
stride=(self.patch_size[0], 2 ** (num_layers - 1), 2 ** (num_layers - 1),),
)
self.conv3d_transpose_1 = conv_trans(
in_channels=16,
out_channels=in_chans,
kernel_size=(1, self.patch_size[1], self.patch_size[2]),
stride=(1, self.patch_size[1], self.patch_size[2]),
) # B C D H W
self.init_weights(revise_keys=revise_keys)
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
load_checkpoint(
self,
filename=self.pretrained,
map_location=torch.device("cpu"),
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError("pretrained must be a path(str) or None")
def forward(self, img):
# B, C, D, H, W = img.shape
device = img.device
# get patches
patches = rearrange(
img,
"b c (d p1) (h p2) (w p3) -> b (d h w) (p1 p2 p3 c)",
p1=self.patch_size[0],
p2=self.patch_size[0],
p3=self.patch_size[0],
)
tokens = self.encoder.patch_embed(img)
batch, num_patches, *_ = tokens.shape
assert num_patches == patches.shape[1]
# for indexing purposes
batch_range = torch.arange(batch, device=device)[:, None]
# calculate of patches needed to be masked, and get positions (indices) to be masked
num_masked = int(self.masking_ratio * num_patches)
masked_indices = (
torch.rand(batch, num_patches, device=device)
.topk(k=num_masked, dim=-1)
.indices
)
masked_bool_mask = (
torch.zeros((batch, num_patches), device=device)
.scatter_(-1, masked_indices, 1)
.bool()
)
# prepare mask tokens
mask_tokens = repeat(self.mask_token, "d -> b n d", b=batch, n=num_patches)
# mask_tokens = mask_tokens + self.encoder.patch_embedding.position_embeddings
# mask tokens
tokens = torch.where(masked_bool_mask[..., None], mask_tokens, tokens)
if self.encoder.ape:
tokens = tokens + self.encoder.absolute_pos_embed
tokens = self.encoder.pos_drop(tokens)
# attend with vision transformer
for layer in self.encoder.layers:
tokens = layer(tokens)
tokens = self.encoder.norm(tokens)
# small linear projection for predicted pixel values
tokens = tokens.transpose(1, 2).view(
-1, self.num_features, *self.final_resolution
)
tokens = self.conv3d_transpose(tokens)
pred_pixel_values = self.conv3d_transpose_1(tokens)
pred_pixel_values = rearrange(
pred_pixel_values,
"b c (d p1) (h p2) (w p3) -> b (d h w) (p1 p2 p3 c)",
p1=self.patch_size[0],
p2=self.patch_size[0],
p3=self.patch_size[0],
)
return pred_pixel_values, patches, batch_range, masked_indices
if __name__ == "__main__":
model = SwinSimMIM(
pretrained=None,
pretrained2d=True,
img_size=(96, 96, 96),
patch_size=(4, 4, 4),
in_chans=1,
num_classes=0,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=(7, 7, 7),
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
frozen_stages=-1,
use_checkpoint=False,
masking_ratio=0.75,
revise_keys=[],
)
x = torch.randn(1, 1, 96, 96, 96)
y = model(x)
print(y[0].shape)
| Python |
3D | chenz53/MIM-Med3D | code/models/upernet_swin.py | .py | 4,444 | 132 | from typing import Sequence, Tuple, Union
import torch
from .swin_3d import SwinTransformer3D
from .upernet_3d import UperNet3D
from mmcv.runner import load_checkpoint
class UperNetSwin(torch.nn.Module):
"""
UNETR based on: "Hatamizadeh et al.,
UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
"""
def __init__(
self,
pretrained: Union[str, None],
backbone_dict: dict,
decode_head_dict: dict,
revise_keys=[],
) -> None:
"""
Args:
in_channels: dimension of input channels.
out_channels: dimension of output channels.
img_size: dimension of input image.
feature_size: dimension of network feature size.
hidden_size: dimension of hidden layer.
mlp_dim: dimension of feedforward layer.
num_heads: number of attention heads.
pos_embed: position embedding layer type.
norm_name: feature normalization type and arguments.
conv_block: bool argument to determine if convolutional block is used.
res_block: bool argument to determine if residual block is used.
dropout_rate: faction of the input units to drop.
spatial_dims: number of spatial dims.
Examples::
# for single channel input 4-channel output with image size of (96,96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=(96,96,96), feature_size=32, norm_name='batch')
# for single channel input 4-channel output with image size of (96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=96, feature_size=32, norm_name='batch', spatial_dims=2)
# for 4-channel input 3-channel output with image size of (128,128,128), conv position embedding and instance norm
>>> net = UNETR(in_channels=4, out_channels=3, img_size=(128,128,128), pos_embed='conv', norm_name='instance')
"""
super().__init__()
self.pretrained = pretrained
self.backbone = SwinTransformer3D(**backbone_dict)
self.decode_head = UperNet3D(**decode_head_dict)
self.init_weights(revise_keys=revise_keys)
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
print("load checkpoints from {}".format(self.pretrained))
load_checkpoint(
self,
filename=self.pretrained,
# map_location="cpu",
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
pass
else:
raise TypeError("pretrained must be a str or None")
def forward(self, x_in):
# x_in [B, C, H, W, D]
x_in = x_in.permute(0, 1, 4, 2, 3) # b, c, d, h, w
x, stage_outputs = self.backbone.forward_features(x_in)
y = self.decode_head(stage_outputs) # b, c, d, h, w
return y.permute(0, 1, 3, 4, 2) # b, c, h, w, d
if __name__ == "__main__":
import torch
x = torch.randn(1, 1, 96, 96, 96)
backbone_dict = {
"pretrained": None,
"pretrained2d": True,
"img_size": (96, 96, 96),
"patch_size": (4, 4, 4),
"in_chans": 1,
"num_classes": 0,
"embed_dim": 96,
"depths": [2, 2, 6, 2],
"num_heads": [3, 6, 12, 24],
"window_size": (7, 7, 7),
"mlp_ratio": 4.0,
"qkv_bias": True,
"qk_scale": None,
"drop_rate": 0.0,
"attn_drop_rate": 0.0,
"drop_path_rate": 0.1,
"norm_layer": nn.LayerNorm,
"ape": False,
"patch_norm": True,
"frozen_stages": -1,
"use_checkpoint": False,
}
decode_head_dict = {
"image_size": (96, 96, 96),
"num_classes": 14,
"feature_channels": [192, 384, 768, 768],
"fpn_out": 192,
"freeze_bn": False,
}
model = UperNetSwin(backbone_dict=backbone_dict, decode_head_dict=decode_head_dict)
y = model(x)
print(y.shape)
| Python |
3D | chenz53/MIM-Med3D | code/models/upernet_van.py | .py | 3,367 | 91 | from typing import Sequence, Tuple, Union
import torch
from .van_3d import VAN3D
from .upernet_3d import UperNet3D
from mmcv.runner import load_checkpoint
class UperNetVAN(torch.nn.Module):
"""
UNETR based on: "Hatamizadeh et al.,
UNETR: Transformers for 3D Medical Image Segmentation <https://arxiv.org/abs/2103.10504>"
"""
def __init__(
self,
pretrained: Union[str, None],
backbone_dict: dict,
decode_head_dict: dict,
revise_keys=[],
) -> None:
"""
Args:
in_channels: dimension of input channels.
out_channels: dimension of output channels.
img_size: dimension of input image.
feature_size: dimension of network feature size.
hidden_size: dimension of hidden layer.
mlp_dim: dimension of feedforward layer.
num_heads: number of attention heads.
pos_embed: position embedding layer type.
norm_name: feature normalization type and arguments.
conv_block: bool argument to determine if convolutional block is used.
res_block: bool argument to determine if residual block is used.
dropout_rate: faction of the input units to drop.
spatial_dims: number of spatial dims.
Examples::
# for single channel input 4-channel output with image size of (96,96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=(96,96,96), feature_size=32, norm_name='batch')
# for single channel input 4-channel output with image size of (96,96), feature size of 32 and batch norm
>>> net = UNETR(in_channels=1, out_channels=4, img_size=96, feature_size=32, norm_name='batch', spatial_dims=2)
# for 4-channel input 3-channel output with image size of (128,128,128), conv position embedding and instance norm
>>> net = UNETR(in_channels=4, out_channels=3, img_size=(128,128,128), pos_embed='conv', norm_name='instance')
"""
super().__init__()
self.pretrained = pretrained
self.backbone = VAN3D(**backbone_dict)
self.decode_head = UperNet3D(**decode_head_dict)
self.init_weights(revise_keys=revise_keys)
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
print("load checkpoints from {}".format(self.pretrained))
load_checkpoint(
self,
filename=self.pretrained,
# map_location="cpu",
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
pass
else:
raise TypeError("pretrained must be a str or None")
def forward(self, x_in):
# x_in [B, C, H, W, D]
x_in = x_in.permute(0, 1, 4, 2, 3) # b, c, d, h, w
x, stage_outputs = self.backbone.forward_features(x_in)
y = self.decode_head(stage_outputs) # b, c, d, h, w
return y.permute(0, 1, 3, 4, 2) # b, c, h, w, d
| Python |
3D | chenz53/MIM-Med3D | code/models/__init__.py | .py | 324 | 11 | from .mae import MAE
from .simmim import ViTSimMIM
from .vit_3d import VisionTransformer3D
from .swin_3d import SwinTransformer3D
from .upernet_3d import UperNet3D
from .van_3d import VAN3D
from .vitautoenc import ViTAutoEnc
from .unetr import UNETR
from .upernet_swin import UperNetSwin
from .upernet_van import UperNetVAN
| Python |
3D | chenz53/MIM-Med3D | code/models/mae.py | .py | 7,582 | 212 | import math
import logging
from typing import Sequence, Union
import torch
import torch.nn as nn
from monai.networks.blocks.patchembedding import PatchEmbeddingBlock
from monai.networks.blocks.transformerblock import TransformerBlock
from monai.networks.nets import ViT
from einops import repeat
from mmcv.runner import load_checkpoint
from timm.models.layers import DropPath, trunc_normal_
__all__ = ["MAE"]
class MAE(nn.Module):
"""
Vision Transformer (ViT), based on: "Dosovitskiy et al.,
An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>"
Modified to also give same dimension outputs as the input size of the image
"""
def __init__(
self,
pretrained: str,
in_channels: int,
img_size: Union[Sequence[int], int],
patch_size: Union[Sequence[int], int],
hidden_size: int = 768,
mlp_dim: int = 3072,
num_layers: int = 12,
num_heads: int = 12,
pos_embed: str = "perceptron",
dropout_rate: float = 0.0,
spatial_dims: int = 3,
decoder_dim: int = 768,
decoder_depth: int = 1,
decoder_heads: int = 8,
masking_ratio: float = 0.75,
revise_keys=[("model.", "")],
**kwargs,
) -> None:
"""
Args:
in_channels: dimension of input channels or the number of channels for input
img_size: dimension of input image.
patch_size: dimension of patch size.
hidden_size: dimension of hidden layer.
mlp_dim: dimension of feedforward layer.
num_layers: number of transformer blocks.
num_heads: number of attention heads.
pos_embed: position embedding layer type.
dropout_rate: faction of the input units to drop.
spatial_dims: number of spatial dimensions.
decoder_dim: dimension of decoder hidden layer.
decoder_depth: number of decoder transformer layer.
decoder_heads: number of decoder heads.
masking_ratio: ratio of masking patches.
Examples::
# for single channel input with image size of (96,96,96), patch position embedding and segmentation backbone
# It will provide an output of same size as that of the input
>>> net = MAE(in_channels=1, patch_size=(16,16,16), img_size=(96,96,96), pos_embed='perceptron')
"""
super().__init__()
self.pretrained = pretrained
self.spatial_dims = spatial_dims
self.encoder = ViT(
in_channels=in_channels,
img_size=img_size,
patch_size=patch_size,
hidden_size=hidden_size,
mlp_dim=mlp_dim,
num_layers=num_layers,
num_heads=num_heads,
pos_embed=pos_embed,
dropout_rate=dropout_rate,
spatial_dims=spatial_dims,
)
# patch embedding block
patch_embedding = self.encoder.patch_embedding
self.to_patch, self.patch_to_emb = patch_embedding.patch_embeddings
n_patches = patch_embedding.n_patches
patch_dim = patch_embedding.patch_dim
# connect encoder and decoder if mismatch dimension
self.enc_to_dec = (
nn.Linear(hidden_size, decoder_dim)
if hidden_size != decoder_dim
else nn.Identity()
)
# build up decoder transformer blocks
self.decoder_blocks = nn.ModuleList(
[
TransformerBlock(
decoder_dim, decoder_dim * 4, decoder_heads, dropout_rate
)
for i in range(decoder_depth)
]
)
self.decoder_norm = nn.LayerNorm(decoder_dim)
self.masking_ratio = masking_ratio
assert (
masking_ratio > 0 and masking_ratio < 1
), "masking ratio must be kept between 0 and 1"
self.mask_token = nn.Parameter(torch.randn(decoder_dim))
self.decoder_pos_emb = nn.Embedding(n_patches, decoder_dim)
# embeddings to pixels
self.to_pixels = nn.Linear(decoder_dim, patch_dim)
self.init_weights(revise_keys=revise_keys)
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
logging.info(f"load model from: {self.pretrained}")
load_checkpoint(
self,
filename=self.pretrained,
map_location=torch.device("cpu"),
strict=False,
revise_keys=revise_keys,
)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError("pretrained must be a path(str) or None")
def forward(self, x):
"""
Args:
x: input tensor must have isotropic spatial dimensions,
such as ``[batch_size, channels, sp_size, sp_size[, sp_size]]``.
"""
device = x.device
# get patches
patches = self.to_patch(x)
batch, n_patches, *_ = patches.shape
# patch to encoder tokens and add positions
tokens = self.patch_to_emb(patches)
tokens = tokens + self.encoder.patch_embedding.position_embeddings
# calculate of patches needed to be masked, and get random indices
num_masked = int(self.masking_ratio * n_patches)
rand_indices = torch.rand(batch, n_patches, device=device).argsort(dim=-1)
masked_indices, unmasked_indices = (
rand_indices[:, :num_masked],
rand_indices[:, num_masked:],
)
# get the unmasked tokens to be encoded
batch_range = torch.arange(batch, device=device)[:, None]
tokens = tokens[batch_range, unmasked_indices]
# get the patches to be masked for the final reconstruction loss
# masked_patches = patches[batch_range, masked_indices]
for blk in self.encoder.blocks:
tokens = blk(tokens)
encoded_tokens = tokens
decoder_tokens = self.enc_to_dec(encoded_tokens)
decoder_tokens += self.decoder_pos_emb(unmasked_indices)
mask_tokens = repeat(self.mask_token, "d -> b n d", b=batch, n=num_masked)
mask_tokens = mask_tokens + self.decoder_pos_emb(masked_indices)
# concat the masked tokens to the decoder tokens and attend with decoder
decoder_tokens = torch.cat((mask_tokens, decoder_tokens), dim=1)
for blk in self.decoder_blocks:
decoder_tokens = blk(decoder_tokens)
decoded_tokens = self.decoder_norm(decoder_tokens)
# splice out the mask tokens and project to pixel values
mask_tokens = decoded_tokens[:, :num_masked]
pred_pixel_values = self.to_pixels(mask_tokens)
return pred_pixel_values, patches, batch_range, masked_indices
| Python |
3D | chenz53/MIM-Med3D | code/models/utils.py | .py | 5,676 | 149 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _quadruple
class Conv4d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: [int, tuple],
stride: [int, tuple] = (1, 1, 1, 1),
padding: [int, tuple] = (0, 0, 0, 0),
dilation: [int, tuple] = (1, 1, 1, 1),
groups: int = 1,
bias: bool = False,
padding_mode: str = "zeros",
):
super(Conv4d, self).__init__()
kernel_size = _quadruple(kernel_size)
stride = _quadruple(stride)
padding = _quadruple(padding)
dilation = _quadruple(dilation)
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
valid_padding_modes = {"zeros"}
if padding_mode not in valid_padding_modes:
raise ValueError(
"padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode
)
)
# Assertions for constructor arguments
assert len(kernel_size) == 4, "4D kernel size expected!"
assert len(stride) == 4, "4D Stride size expected!!"
assert len(padding) == 4, "4D Padding size expected!!"
assert len(dilation) == 4, "4D dilation size expected!"
assert groups == 1, "Groups other than 1 not yet implemented!"
# Store constructor arguments
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
# `_reversed_padding_repeated_twice` is the padding to be passed to
# `F.pad` if needed (e.g., for non-zero padding types that are
# implemented as two ops: padding + conv). `F.pad` accepts paddings in
# reverse order than the dimension.
# # # # # self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 3)
# Construct weight and bias of 4D convolution
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, *kernel_size)
)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
self.reset_parameters()
# Use a ModuleList to store layers to make the Conv4d layer trainable
self.conv3d_layers = torch.nn.ModuleList()
for i in range(self.kernel_size[0]):
# Initialize a Conv3D layer
conv3d_layer = nn.Conv3d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size[1::],
padding=self.padding[1::],
dilation=self.dilation[1::],
stride=self.stride[1::],
)
conv3d_layer.weight = nn.Parameter(self.weight[:, :, i, :, :])
# Store the layer
self.conv3d_layers.append(conv3d_layer)
del self.weight
def reset_parameters(self) -> None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
# Define shortcut names for dimensions of input and kernel
(Batch, _, l_i, d_i, h_i, w_i) = tuple(input.shape)
(l_k, d_k, h_k, w_k) = self.kernel_size
(l_p, d_p, h_p, w_p) = self.padding
(l_d, d_d, h_d, w_d) = self.dilation
(l_s, d_s, h_s, w_s) = self.stride
# Compute the size of the output tensor based on the zero padding
l_o = (l_i + 2 * l_p - (l_k) - (l_k - 1) * (l_d - 1)) // l_s + 1
d_o = (d_i + 2 * d_p - (d_k) - (d_k - 1) * (d_d - 1)) // d_s + 1
h_o = (h_i + 2 * h_p - (h_k) - (h_k - 1) * (h_d - 1)) // h_s + 1
w_o = (w_i + 2 * w_p - (w_k) - (w_k - 1) * (w_d - 1)) // w_s + 1
# Pre-define output tensors
out = torch.zeros(Batch, self.out_channels, l_o, d_o, h_o, w_o).to(input.device)
# Convolve each kernel frame i with each input frame j
for i in range(l_k):
# Calculate the zero-offset of kernel frame i
zero_offset = -l_p + (i * l_d)
# Calculate the range of input frame j corresponding to kernel frame i
j_start = max(zero_offset % l_s, zero_offset)
j_end = min(l_i, l_i + l_p - (l_k - i - 1) * l_d)
# Convolve each kernel frame i with corresponding input frame j
for j in range(j_start, j_end, l_s):
# Calculate the output frame
out_frame = (j - zero_offset) // l_s
# Add results to this output frame
out[:, :, out_frame, :, :, :] += self.conv3d_layers[i](
input[:, :, j, :, :]
)
# Add bias to output
if self.bias is not None:
out = out + self.bias.view(1, -1, 1, 1, 1, 1)
return out
if __name__ == "__main__":
x = torch.randn(1, 4, 1, 144, 224, 224).cuda()
embed = Conv4d(
in_channels=4, out_channels=96, kernel_size=(1, 2, 4, 4), stride=(1, 2, 4, 4)
).cuda()
outputs = embed(x)
print(outputs.size())
| Python |
3D | chenz53/MIM-Med3D | code/models/van_3d.py | .py | 14,082 | 462 | from typing import Optional, Union, Sequence
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
# from timm.models.registry import register_model
# from timm.models.vision_transformer import _cfg
import math
from monai.utils import ensure_tuple_rep
class Mlp3D(nn.Module):
def __init__(
self,
in_features: int,
hidden_features: Union[int, None] = None,
out_features: Union[int, None] = None,
act_layer: str = "gelu",
drop: float = 0.0,
):
super().__init__()
if act_layer == "relu":
act_layer = nn.ReLU
elif act_layer == "gelu":
act_layer = nn.GELU
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv3d(in_features, hidden_features, 1)
self.dwconv = DWConv3D(hidden_features)
self.act = act_layer()
self.fc2 = nn.Conv3d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv3d):
fan_out = (
m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.fc1(x)
x = self.dwconv(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class AttentionModule3D(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv0 = nn.Conv3d(dim, dim, 5, padding=2, groups=dim)
self.conv_spatial = nn.Conv3d(
dim, dim, 7, stride=1, padding=9, groups=dim, dilation=3
)
self.conv1 = nn.Conv3d(dim, dim, 1)
def forward(self, x):
u = x.clone()
attn = self.conv0(x)
attn = self.conv_spatial(attn)
attn = self.conv1(attn)
return u * attn
class SpatialAttention3D(nn.Module):
def __init__(self, d_model: int):
super().__init__()
self.proj_1 = nn.Conv3d(d_model, d_model, 1)
self.activation = nn.GELU()
self.spatial_gating_unit = AttentionModule3D(d_model)
self.proj_2 = nn.Conv3d(d_model, d_model, 1)
def forward(self, x):
shorcut = x.clone()
x = self.proj_1(x)
x = self.activation(x)
x = self.spatial_gating_unit(x)
x = self.proj_2(x)
x = x + shorcut
return x
class Block3D(nn.Module):
def __init__(
self,
dim: int,
mlp_ratio: float = 4.0,
drop: float = 0.0,
drop_path: float = 0.0,
act_layer: str = "gelu",
):
super().__init__()
if act_layer == "gelu":
act_layer = nn.GELU
elif act_layer == "relu":
act_layer = nn.ReLU
self.norm1 = nn.BatchNorm3d(dim)
self.attn = SpatialAttention3D(dim)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = nn.BatchNorm3d(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp3D(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
layer_scale_init_value = 1e-2
self.layer_scale_1 = nn.Parameter(
layer_scale_init_value * torch.ones((dim)), requires_grad=True
)
self.layer_scale_2 = nn.Parameter(
layer_scale_init_value * torch.ones((dim)), requires_grad=True
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv3d):
fan_out = (
m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = x + self.drop_path(
self.layer_scale_1.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
* self.attn(self.norm1(x))
)
x = x + self.drop_path(
self.layer_scale_2.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
* self.mlp(self.norm2(x))
)
return x
class OverlapPatchEmbed3D(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
# img_size: Union[Sequence[int], int] = 224,
patch_size: Union[Sequence[int], int] = 7,
stride: int = 4,
in_chans: int = 3,
embed_dim: int = 768,
):
super().__init__()
# img_size = ensure_tuple_rep(img_size, 3)
patch_size = ensure_tuple_rep(patch_size, 3)
# self.img_size = img_size
# self.patch_size = patch_size
# self.num_patches = np.prod(
# [im_d // p_d for im_d, p_d in zip(img_size, patch_size)]
# )
self.proj = nn.Conv3d(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2, patch_size[2] // 2),
)
self.norm = nn.BatchNorm3d(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv3d):
fan_out = (
m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, D, H, W = x.shape
x = self.norm(x)
# print(x.size())
return x, D, H, W
class VAN3D(nn.Module):
def __init__(
self,
# img_size: Union[Sequence[int], int] = 224,
in_chans: int = 3,
num_classes: int = 1000,
embed_dims: Sequence[int] = [64, 128, 256, 512],
mlp_ratios: Sequence[float] = [4.0, 4.0, 4.0, 4.0],
drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
norm_layer: bool = True,
depths: Sequence[int] = [3, 4, 6, 3],
num_stages: int = 4,
flag: bool = False,
):
super().__init__()
if norm_layer:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
if flag == False:
self.num_classes = num_classes
self.depths = depths
self.num_stages = num_stages
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
cur = 0
for i in range(num_stages):
# if isinstance(img_size, int):
# img_size = img_size if i == 0 else img_size // (2 ** (i + 1))
# else:
# img_size = (
# img_size
# if i == 0
# else [img_size[k] // (2 ** (i + 1)) for k in range(len(img_size))]
# )
patch_embed = OverlapPatchEmbed3D(
# img_size=img_size,
patch_size=7 if i == 0 else 3,
stride=4 if i == 0 else 2,
in_chans=in_chans if i == 0 else embed_dims[i - 1],
embed_dim=embed_dims[i],
)
block = nn.ModuleList(
[
Block3D(
dim=embed_dims[i],
mlp_ratio=mlp_ratios[i],
drop=drop_rate,
drop_path=dpr[cur + j],
)
for j in range(depths[i])
]
)
norm = norm_layer(embed_dims[i])
cur += depths[i]
setattr(self, f"patch_embed{i + 1}", patch_embed)
setattr(self, f"block{i + 1}", block)
setattr(self, f"norm{i + 1}", norm)
# classification head
self.head = (
nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv3d):
fan_out = (
m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {
"pos_embed1",
"pos_embed2",
"pos_embed3",
"pos_embed4",
"cls_token",
} # has pos_embed may be better
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=""):
self.num_classes = num_classes
self.head = (
nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
def forward_features(self, x):
B = x.shape[0]
stage_outputs = []
for i in range(self.num_stages):
patch_embed = getattr(self, f"patch_embed{i + 1}")
block = getattr(self, f"block{i + 1}")
norm = getattr(self, f"norm{i + 1}")
x, D, H, W = patch_embed(x)
for blk in block:
x = blk(x)
x = x.flatten(2).transpose(1, 2)
x = norm(x)
if i != self.num_stages - 1:
x = (
x.reshape(B, D, H, W, -1).permute(0, 4, 1, 2, 3).contiguous()
) # b, c, d, h, w
stage_outputs.append(x) # b, c, h, w, d
else:
out = x.reshape(B, D, H, W, -1).permute(0, 4, 1, 2, 3).contiguous()
stage_outputs.append(out)
return x.mean(dim=1), stage_outputs
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
class DWConv3D(nn.Module):
def __init__(self, dim: int = 768):
super(DWConv3D, self).__init__()
self.dwconv = nn.Conv3d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x):
x = self.dwconv(x)
return x
def _conv_filter(state_dict, patch_size=16):
"""convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if "patch_embed.proj.weight" in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
# @register_model
# def van_tiny(pretrained=False, **kwargs):
# model = VAN(
# embed_dims=[32, 64, 160, 256],
# mlp_ratios=[8, 8, 4, 4],
# norm_layer=partial(nn.LayerNorm, eps=1e-6),
# depths=[3, 3, 5, 2],
# **kwargs,
# )
# model.default_cfg = _cfg()
# return model
# @register_model
# def van_small(pretrained=False, **kwargs):
# model = VAN(
# embed_dims=[64, 128, 320, 512],
# mlp_ratios=[8, 8, 4, 4],
# norm_layer=partial(nn.LayerNorm, eps=1e-6),
# depths=[2, 2, 4, 2],
# **kwargs,
# )
# model.default_cfg = _cfg()
# return model
# @register_model
# def van_base(pretrained=False, **kwargs):
# model = VAN(
# embed_dims=[64, 128, 320, 512],
# mlp_ratios=[8, 8, 4, 4],
# norm_layer=partial(nn.LayerNorm, eps=1e-6),
# depths=[3, 3, 12, 3],
# **kwargs,
# )
# model.default_cfg = _cfg()
# return model
# @register_model
# def van_large(pretrained=False, **kwargs):
# model = VAN(
# embed_dims=[64, 128, 320, 512],
# mlp_ratios=[8, 8, 4, 4],
# norm_layer=partial(nn.LayerNorm, eps=1e-6),
# depths=[3, 5, 27, 3],
# **kwargs,
# )
# model.default_cfg = _cfg()
# return model
if __name__ == "__main__":
x = torch.randn(1, 1, 128, 96, 96)
model = VAN3D(
# img_size=(128, 128, 128),
in_chans=1,
num_classes=0,
embed_dims=[64, 128, 256, 512],
mlp_ratios=[8.0, 8.0, 4.0, 4.0],
drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=True,
depths=[3, 3, 12, 3],
num_stages=4,
flag=False,
)
y, stage_outputs = model.forward_features(x)
for output in stage_outputs:
print(output.shape)
| Python |
3D | chenz53/MIM-Med3D | code/models/swin_3d.py | .py | 33,945 | 986 | import logging
from functools import reduce, lru_cache
from operator import mul
from einops import rearrange
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import DropPath, trunc_normal_
from mmcv.runner import load_checkpoint
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Use ``get_logger`` method in mmcv to get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If ``log_file`` is specified, a FileHandler
will also be added. The name of the root logger is the top-level package
name, e.g., "mmaction".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
:obj:`logging.Logger`: The root logger.
"""
return get_logger(__name__.split(".")[0], log_file, log_level)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, D, H, W, C)
window_size (tuple[int]): window size
Returns:
windows: (B*num_windows, window_size*window_size, C)
"""
B, D, H, W, C = x.shape
x = x.view(
B,
D // window_size[0],
window_size[0],
H // window_size[1],
window_size[1],
W // window_size[2],
window_size[2],
C,
)
windows = (
x.permute(0, 1, 3, 5, 2, 4, 6, 7)
.contiguous()
.view(-1, reduce(mul, window_size), C)
)
return windows
def window_reverse(windows, window_size, B, D, H, W):
"""
Args:
windows: (B*num_windows, window_size, window_size, C)
window_size (tuple[int]): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, D, H, W, C)
"""
x = windows.view(
B,
D // window_size[0],
H // window_size[1],
W // window_size[2],
window_size[0],
window_size[1],
window_size[2],
-1,
)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)
return x
def get_window_size(x_size, window_size, shift_size=None):
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size)
class WindowAttention3D(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The temporal length, height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wd, Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros(
(2 * window_size[0] - 1)
* (2 * window_size[1] - 1)
* (2 * window_size[2] - 1),
num_heads,
)
) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
coords = torch.stack(
torch.meshgrid(coords_d, coords_h, coords_w, indexing="ij")
) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (
2 * self.window_size[2] - 1
)
relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
""" Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, N, N) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:N, :N].reshape(-1)
].reshape(
N, N, -1
) # Wd*Wh*Ww,Wd*Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
1
).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock3D(nn.Module):
""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): Window size.
shift_size (tuple[int]): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
window_size=(2, 7, 7),
shift_size=(0, 0, 0),
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
assert (
0 <= self.shift_size[0] < self.window_size[0]
), "shift_size must in 0-window_size"
assert (
0 <= self.shift_size[1] < self.window_size[1]
), "shift_size must in 0-window_size"
assert (
0 <= self.shift_size[2] < self.window_size[2]
), "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention3D(
dim,
window_size=self.window_size,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward_part1(self, x, mask_matrix):
D, H, W = self.input_resolution
B, L, C = x.shape
assert L == D * H * W, "input feature has wrong size"
window_size, shift_size = get_window_size(
(D, H, W), self.window_size, self.shift_size
)
x = self.norm1(x)
x = x.view(B, D, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = pad_d0 = 0
pad_d1 = (window_size[0] - D % window_size[0]) % window_size[0]
pad_b = (window_size[1] - H % window_size[1]) % window_size[1]
pad_r = (window_size[2] - W % window_size[2]) % window_size[2]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))
_, Dp, Hp, Wp, _ = x.shape
# cyclic shift
if any(i > 0 for i in shift_size):
shifted_x = torch.roll(
x,
shifts=(-shift_size[0], -shift_size[1], -shift_size[2]),
dims=(1, 2, 3),
)
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, window_size) # B*nW, Wd*Wh*Ww, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # B*nW, Wd*Wh*Ww, C
# merge windows
attn_windows = attn_windows.view(-1, *(window_size + (C,)))
shifted_x = window_reverse(
attn_windows, window_size, B, Dp, Hp, Wp
) # B D' H' W' C
# reverse cyclic shift
if any(i > 0 for i in shift_size):
x = torch.roll(
shifted_x,
shifts=(shift_size[0], shift_size[1], shift_size[2]),
dims=(1, 2, 3),
)
else:
x = shifted_x
if pad_d1 > 0 or pad_r > 0 or pad_b > 0:
x = x[:, :D, :H, :W, :].contiguous()
x = x.view(B, D * H * W, C)
return x
def forward_part2(self, x):
return self.drop_path(self.mlp(self.norm2(x)))
def forward(self, x, mask_matrix):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
mask_matrix: Attention mask for cyclic shift.
"""
shortcut = x
if self.use_checkpoint:
x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = self.forward_part1(x, mask_matrix)
x = shortcut + self.drop_path(x)
if self.use_checkpoint:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
"""
D, H, W = self.input_resolution
B, L, C = x.shape
assert L == D * H * W, "input feature has wrong size"
x = x.view(B, D, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C
x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C
x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C
x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
# cache each stage results
@lru_cache()
def compute_mask(D, H, W, window_size, shift_size, device):
img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1
cnt = 0
for d in (
slice(-window_size[0]),
slice(-window_size[0], -shift_size[0]),
slice(-shift_size[0], None),
):
for h in (
slice(-window_size[1]),
slice(-window_size[1], -shift_size[1]),
slice(-shift_size[1], None),
):
for w in (
slice(-window_size[2]),
slice(-window_size[2], -shift_size[2]),
slice(-shift_size[2], None),
):
img_mask[:, d, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1
mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2]
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
return attn_mask
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (tuple[int]): Local window size. Default: (1,7,7).
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size=(1, 7, 7),
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.window_size = window_size
self.shift_size = tuple(i // 2 for i in window_size)
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock3D(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=(0, 0, 0) if (i % 2 == 0) else self.shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
)
for i in range(depth)
]
)
self.downsample = downsample
if self.downsample is not None:
self.downsample = downsample(
dim=dim, input_resolution=input_resolution, norm_layer=norm_layer
)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, C, D, H, W).
"""
# calculate attention mask for SW-MSA
D, H, W = self.input_resolution
window_size, shift_size = get_window_size(
(D, H, W), self.window_size, self.shift_size
)
# x = rearrange(x, "b c d h w -> b d h w c")
Dp = int(np.ceil(D / window_size[0])) * window_size[0]
Hp = int(np.ceil(H / window_size[1])) * window_size[1]
Wp = int(np.ceil(W / window_size[2])) * window_size[2]
attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
# x = x.view(B, D, H, W, -1)
if self.downsample is not None:
x = self.downsample(x)
# x = rearrange(x, "b d h w c -> b c d h w")
return x
class PatchEmbed3D(nn.Module):
""" Video to Patch Embedding.
Args:
patch_size (int): Patch token size. Default: (2,4,4).
in_chans (int): Number of input video channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(
self,
img_size=(96, 96, 96),
patch_size=(2, 4, 4),
in_chans=3,
embed_dim=96,
norm_layer=None,
):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
patches_resolution = [
img_size[0] // patch_size[0],
img_size[1] // patch_size[1],
img_size[2] // patch_size[2],
]
self.patches_resolution = patches_resolution
self.num_patches = (
patches_resolution[0] * patches_resolution[1] * patches_resolution[2]
)
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv3d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, D, H, W = x.size()
if W % self.patch_size[2] != 0:
x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2]))
if H % self.patch_size[1] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]))
if D % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0]))
x = self.proj(x).flatten(2).transpose(1, 2) # B D*Wh*Ww C
if self.norm is not None:
# D, Wh, Ww = x.size(2), x.size(3), x.size(4)
# x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
# x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww)
return x
# @BACKBONES.register_module()
class SwinTransformer3D(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
patch_size (int | tuple(int)): Patch size. Default: (4,4,4).
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: Truee
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer: Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
"""
def __init__(
self,
pretrained=None,
pretrained2d=True,
img_size=(96, 96, 96),
patch_size=(4, 4, 4),
in_chans=3,
num_classes=0,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=(2, 7, 7),
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=False,
frozen_stages=-1,
use_checkpoint=False,
):
super().__init__()
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.num_classes = num_classes
self.patch_norm = patch_norm
self.frozen_stages = frozen_stages
self.window_size = window_size
self.img_size = img_size
self.patch_size = patch_size
self.ape = ape
# split image into non-overlapping patches
self.patch_embed = PatchEmbed3D(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, num_patches, embed_dim)
)
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(
patches_resolution[0],
patches_resolution[1] // (2 ** i_layer),
patches_resolution[2] // (2 ** i_layer),
),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if i_layer < self.num_layers - 1 else None,
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.final_resolution = [
patches_resolution[0],
patches_resolution[1] // (2 ** (self.num_layers - 1)),
patches_resolution[2] // (2 ** (self.num_layers - 1)),
]
# add a norm layer for each output
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool3d(1)
self.head = (
nn.Linear(self.num_features, num_classes)
if num_classes > 0
else nn.Identity()
)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1:
self.pos_drop.eval()
for i in range(0, self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def inflate_weights(self, logger):
"""Inflate the swin2d parameters to swin3d.
The differences between swin3d and swin2d mainly lie in an extra
axis. To utilize the pretrained parameters in 2d model,
the weight of swin2d models should be inflated to fit in the shapes of
the 3d counterpart.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
checkpoint = torch.load(self.pretrained, map_location="cpu")
state_dict = checkpoint["model"]
# delete relative_position_index since we always re-init it
relative_position_index_keys = [
k for k in state_dict.keys() if "relative_position_index" in k
]
for k in relative_position_index_keys:
del state_dict[k]
# delete attn_mask since we always re-init it
attn_mask_keys = [k for k in state_dict.keys() if "attn_mask" in k]
for k in attn_mask_keys:
del state_dict[k]
state_dict["patch_embed.proj.weight"] = (
state_dict["patch_embed.proj.weight"]
.unsqueeze(2)
.repeat(1, 1, self.patch_size[0], 1, 1)
/ self.patch_size[0]
)
# bicubic interpolate relative_position_bias_table if not match
relative_position_bias_table_keys = [
k for k in state_dict.keys() if "relative_position_bias_table" in k
]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = self.state_dict()[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
L2 = (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
wd = self.window_size[0]
if nH1 != nH2:
logger.warning(f"Error in loading {k}, passing")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(
1, nH1, S1, S1
),
size=(2 * self.window_size[1] - 1, 2 * self.window_size[2] - 1),
mode="bicubic",
)
relative_position_bias_table_pretrained = relative_position_bias_table_pretrained_resized.view(
nH2, L2
).permute(
1, 0
)
state_dict[k] = relative_position_bias_table_pretrained.repeat(
2 * wd - 1, 1
)
msg = self.load_state_dict(state_dict, strict=False)
logger.info(msg)
logger.info(f"=> loaded successfully '{self.pretrained}'")
del checkpoint
torch.cuda.empty_cache()
def init_weights(self, pretrained=None, revise_keys=[]):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
logger.info(f"load model from: {self.pretrained}")
if self.pretrained2d:
# Inflate 2D model into 3D model.
self.inflate_weights(logger)
else:
# Directly load 3D model.
load_checkpoint(
self,
self.pretrained,
map_location=torch.device("cpu"),
strict=False,
logger=logger,
revise_keys=revise_keys,
)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError("pretrained must be a str or None")
def forward_features(self, x):
"""Forward function."""
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
stage_outputs = []
for i, layer in enumerate(self.layers):
# print(layer.input_resolution)
x = layer(x.contiguous())
B, L, C = x.shape
if i < self.num_layers - 1:
resolution = layer.input_resolution
out = (
x.view(B, resolution[0], resolution[1] // 2, resolution[2] // 2, C)
.permute(0, 4, 1, 2, 3)
.contiguous()
)
else:
out = (
x.view(B, *self.final_resolution, C)
.permute(0, 4, 1, 2, 3)
.contiguous()
)
# print(x.shape)
stage_outputs.append(out)
# x = rearrange(x, "n c d h w -> n d h w c")
x = self.norm(x)
# x = rearrange(x, "n d h w c -> n c d h w")
# print(x.shape)
return x, stage_outputs
def forward(self, x):
x, stage_outputs = self.forward_features(x)
x = self.avgpool(x) # B, C, 1
x = torch.flatten(x, 1) # B, C
x = self.head(x)
return x
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer3D, self).train(mode)
self._freeze_stages()
if __name__ == "__main__":
x = torch.randn(1, 1, 96, 96, 96)
net = SwinTransformer3D(
pretrained=None,
pretrained2d=True,
img_size=(96, 96, 96),
patch_size=(4, 4, 4),
in_chans=1,
num_classes=0,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=(7, 7, 7),
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
frozen_stages=-1,
use_checkpoint=False,
)
x, stage_outputs = net.forward_features(x)
# print(y[0].size())
# y, stage_outputs = net(x)
for output in stage_outputs:
print(output.size())
| Python |
3D | chenz53/MIM-Med3D | code/experiments/ssl/simmim_pretrain_main.py | .py | 2,731 | 79 | import torch
import pytorch_lightning as pl
from pytorch_lightning.utilities.cli import LightningCLI
from models import ViTSimMIM
from torch.nn import L1Loss
from monai.inferers import SlidingWindowInferer
from utils.schedulers import LinearWarmupCosineAnnealingLR
import data
import optimizers
class SimMIMtrainer(pl.LightningModule):
"""Pretraining on 3D Imaging with Masked Auto Encoder"""
def __init__(
self, model_name: str, model_dict: dict,
):
super().__init__()
self.model_name = model_name
self.model_dict = model_dict
self.model = ViTSimMIM(**model_dict)
self.recon_loss = L1Loss()
self.recon_patches = []
# self.save_hyperparameters()
def training_step(self, batch, batch_idx):
# --------------------------
image = batch["image"]
pred_pixel_values, patches, batch_range, masked_indices = self.model(image)
batch_size = pred_pixel_values.shape[0]
loss = self.recon_loss(pred_pixel_values, patches[batch_range, masked_indices])
self.log("train/l1_loss", loss, batch_size=batch_size, sync_dist=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
# --------------------------
image = batch["image"]
pred_pixel_values, patches, batch_range, masked_indices = self.model(image)
batch_size = pred_pixel_values.shape[0]
loss = self.recon_loss(pred_pixel_values, patches[batch_range, masked_indices])
self.log("val/l1_loss", loss, batch_size=batch_size, sync_dist=True)
return {"val_loss": loss, "val_number": batch_size}
def validation_epoch_end(self, outputs):
val_loss, num_items = 0, 0
for output in outputs:
val_loss += output["val_loss"].sum().item()
num_items += output["val_number"]
mean_val_loss = torch.tensor(val_loss / len(outputs))
self.log(
"val/l1_loss_avg", mean_val_loss, sync_dist=True,
)
self.logger.log_hyperparams(
params={
"model": self.model_name,
**self.model_dict,
# "data": self.trainer.datamodule.json_path,
# "ds_ratio": self.trainer.datamodule.downsample_ratio,
"batch_size": self.trainer.datamodule.batch_size,
"distribution": self.trainer.datamodule.dist,
# "benchmark": self.trainer.benchmark,
"max_epochs": self.trainer.max_epochs,
"precision": self.trainer.precision,
},
metrics={"l1_loss": mean_val_loss},
)
if __name__ == "__main__":
cli = LightningCLI(save_config_overwrite=True)
| Python |
3D | chenz53/MIM-Med3D | code/experiments/ssl/simclr_pretrain_main.py | .py | 3,605 | 104 | import data
import optimizers
from models import ViTAutoEnc
from losses import ContrastiveLoss
from torch.nn import L1Loss
import pytorch_lightning as pl
from pytorch_lightning.utilities.cli import LightningCLI
class SimCLRtrainer(pl.LightningModule):
def __init__(
self, batch_size: int, temperature: float, model_name: str, model_dict: dict
):
super().__init__()
self.model_name = model_name
self.model_dict = model_dict
self.temperature = temperature
if model_name.split("_")[0] == "vitautoenc_base":
self.model = ViTAutoEnc(**model_dict)
# self.recon_loss = L1Loss()
self.contrastive_loss = ContrastiveLoss(
batch_size=batch_size * 2, temperature=temperature
)
self.log_kwargs = {
"on_epoch": True,
# "sync_dist": True,
"on_step": True,
"prog_bar": True,
"logger": True,
}
# self.save_hyperparameters()
def training_step(self, batch, batch_idx):
# --------------------------
# REPLACE WITH YOUR OWN
inputs, inputs_2 = batch["image"], batch["image_2"]
batch_size = inputs.shape[0]
outputs_v1, hidden_v1 = self.model(inputs)
outputs_v2, hidden_v2 = self.model(inputs_2)
flat_out_v1 = outputs_v1.flatten(start_dim=1, end_dim=4)
flat_out_v2 = outputs_v2.flatten(start_dim=1, end_dim=4)
cl_loss = self.contrastive_loss(flat_out_v1, flat_out_v2)
# Adjust the CL loss by Recon Loss
# total_loss = r_loss + cl_loss * r_loss
# self.log("train_loss/recon_loss", r_loss, batch_size=batch_size)
self.log("train_loss", cl_loss, batch_size=batch_size, **log_kwargs)
# self.log("train_loss/total_loss", total_loss, batch_size=batch_size)
return cl_loss
# --------------------------
def validation_step(self, batch, batch_idx):
# --------------------------
# REPLACE WITH YOUR OWN
inputs, inputs_2 = batch["image"], batch["image_2"]
batch_size = inputs.shape[0]
outputs_v1, hidden_v1 = self.model(inputs)
outputs_v2, hidden_v2 = self.model(inputs_2)
flat_out_v1 = outputs_v1.flatten(start_dim=1, end_dim=4)
flat_out_v2 = outputs_v2.flatten(start_dim=1, end_dim=4)
val_loss = self.contrastive_loss(flat_out_v1, flat_out_v2)
self.log("val_loss", val_loss, batch_size=batch_size, **log_kwargs)
return {"val_loss": val_loss, "val_number": batch_size}
def validation_epoch_end(self, outputs):
val_loss, num_items = 0, 0
for output in outputs:
val_loss += output["val_loss"].sum().item()
num_items += output["val_number"]
mean_val_loss = torch.tensor(val_loss / num_items)
self.logger.log_hyperparams(
params={
"model": self.model_name,
**self.model_dict,
"temperature": self.temperature,
"data": self.trainer.datamodule.json_path,
"ds_ratio": self.trainer.datamodule.downsample_ratio,
"batch_size": self.trainer.datamodule.batch_size,
"distribution": self.trainer.datamodule.dist,
# "benchmark": self.trainer.benchmark,
"max_epochs": self.trainer.max_epochs,
"precision": self.trainer.precision,
},
metrics={"contrastive_loss": mean_val_loss},
)
if __name__ == "__main__":
cli = LightningCLI(save_config_overwrite=True)
| Python |
3D | chenz53/MIM-Med3D | code/experiments/ssl/mae_pretrain_main.py | .py | 2,712 | 79 | import torch
import pytorch_lightning as pl
from pytorch_lightning.utilities.cli import LightningCLI
from models import MAE
from torch.nn import L1Loss
from monai.inferers import SlidingWindowInferer
from utils.schedulers import LinearWarmupCosineAnnealingLR
import data
import optimizers
class MAEtrainer(pl.LightningModule):
"""Pretraining on 3D Imaging with Masked Auto Encoder"""
def __init__(
self, model_name: str, model_dict: dict,
):
super().__init__()
self.model_name = model_name
self.model_dict = model_dict
self.model = MAE(**model_dict)
self.recon_loss = L1Loss()
self.recon_patches = []
# self.save_hyperparameters()
def training_step(self, batch, batch_idx):
# --------------------------
image = batch["image"]
pred_pixel_values, patches, batch_range, masked_indices = self.model(image)
batch_size = pred_pixel_values.shape[0]
loss = self.recon_loss(pred_pixel_values, patches[batch_range, masked_indices])
self.log("train/l1_loss", loss, batch_size=batch_size, sync_dist=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
# --------------------------
image = batch["image"]
pred_pixel_values, patches, batch_range, masked_indices = self.model(image)
batch_size = pred_pixel_values.shape[0]
loss = self.recon_loss(pred_pixel_values, patches[batch_range, masked_indices])
self.log("val/l1_loss", loss, batch_size=batch_size, sync_dist=True)
return {"val_loss": loss, "val_number": batch_size}
def validation_epoch_end(self, outputs):
val_loss, num_items = 0, 0
for output in outputs:
val_loss += output["val_loss"].sum().item()
num_items += output["val_number"]
mean_val_loss = torch.tensor(val_loss / len(outputs))
self.log(
"val/l1_loss_avg", mean_val_loss, sync_dist=True,
)
self.logger.log_hyperparams(
params={
"model": self.model_name,
**self.model_dict,
"data": self.trainer.datamodule.json_path,
"ds_ratio": self.trainer.datamodule.downsample_ratio,
"batch_size": self.trainer.datamodule.batch_size,
"distribution": self.trainer.datamodule.dist,
# "benchmark": self.trainer.benchmark,
"max_epochs": self.trainer.max_epochs,
"precision": self.trainer.precision,
},
metrics={"l1_loss": mean_val_loss},
)
if __name__ == "__main__":
cli = LightningCLI(save_config_overwrite=True)
| Python |
3D | chenz53/MIM-Med3D | code/experiments/sl/single_seg_main.py | .py | 7,103 | 207 | from typing import Union, Optional, Sequence
from monai.losses import DiceCELoss
from monai.inferers import sliding_window_inference
from monai.transforms import AsDiscrete
from monai.metrics import DiceMetric
from models import UNETR, UperNetSwin, UperNetVAN
from monai.networks.nets import SegResNet
from monai.data import decollate_batch
import numpy as np
import torch
import data
import optimizers
# import mlflow
import pytorch_lightning as pl
# from pytorch_lightning import Trainer
# from pytorch_lightning.loggers import MLFlowLogger
from pytorch_lightning.utilities.cli import LightningCLI
class SingleSegtrainer(pl.LightningModule):
def __init__(self, num_classes: int, model_name: str, model_dict: dict):
super().__init__()
self.model_name = model_name
self.model_dict = model_dict
if model_name.split("_")[0] == "unetr":
self.model = UNETR(**model_dict)
elif model_name == "segresnet":
self.model = SegResNet(**model_dict)
elif model_name.startswith("upernet_swin"):
self.model = UperNetSwin(**model_dict)
elif model_name.startswith("upernet_van"):
self.model = UperNetVAN(**model_dict)
self.loss_function = DiceCELoss(to_onehot_y=True, softmax=True)
self.post_pred = AsDiscrete(argmax=True, to_onehot=num_classes)
self.post_label = AsDiscrete(to_onehot=num_classes)
self.dice_metric = DiceMetric(
include_background=True, reduction="mean", get_not_nans=False
)
self.best_val_dice = 0
self.best_val_epoch = 0
# self.dice_vals = []
self.metric_values = []
self.epoch_loss_values = []
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
images, labels = batch["image"], batch["label"]
batch_size = images.shape[0]
output = self.forward(images)
loss = self.loss_function(output, labels)
# logging
self.log(
"train/dice_loss_step",
loss,
batch_size=batch_size,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=True,
)
return {"loss": loss}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log(
"train/dice_loss_avg",
avg_loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
self.epoch_loss_values.append(avg_loss.detach().cpu().numpy())
def validation_step(self, batch, batch_idx):
images, labels = batch["image"], batch["label"]
batch_size = images.shape[0]
roi_size = (96, 96, 96)
sw_batch_size = 4
outputs = sliding_window_inference(
images,
roi_size,
sw_batch_size,
self.forward, # the output image will be cropped to the original image size
)
loss = self.loss_function(outputs, labels)
# compute dice score
outputs = [self.post_pred(i) for i in decollate_batch(outputs)]
labels = [self.post_label(i) for i in decollate_batch(labels)]
self.dice_metric(y_pred=outputs, y=labels)
dice = self.dice_metric.aggregate().item()
# self.dice_metric.reset()
# compute mean dice score per validation epoch
# self.dice_vals.append(dice)
# logging
self.log(
"val/dice_loss_step",
loss,
batch_size=batch_size,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=True,
)
return {"val_loss": loss, "val_number": len(outputs), "dice": dice}
def validation_epoch_end(self, outputs):
val_loss, num_items = 0, 0
dice_vals = []
for output in outputs:
val_loss += output["val_loss"].sum().item()
num_items += output["val_number"]
dice_vals.append(output["dice"])
mean_val_dice = np.mean(dice_vals)
# self.dice_vals = []
self.dice_metric.reset()
mean_val_loss = torch.tensor(val_loss / num_items)
# logging
self.log(
"val/dice_loss_avg",
mean_val_loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
self.log(
"val/dice_score_avg",
mean_val_dice,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
self.logger.log_hyperparams(
params={
"model": self.model_name,
**self.model_dict,
"data": self.trainer.datamodule.json_path,
"ds_ratio": self.trainer.datamodule.downsample_ratio,
"batch_size": self.trainer.datamodule.batch_size,
"distribution": self.trainer.datamodule.dist,
# "benchmark": self.trainer.benchmark,
"max_epochs": self.trainer.max_epochs,
"precision": self.trainer.precision,
},
metrics={"dice_loss": mean_val_loss, "dice_score": mean_val_dice},
)
if mean_val_dice > self.best_val_dice:
self.best_val_dice = mean_val_dice
self.best_val_epoch = self.current_epoch
print(
f"current epoch: {self.current_epoch} "
f"current mean dice: {mean_val_dice:.4f}"
f"\nbest mean dice: {self.best_val_dice:.4f} "
f"at epoch: {self.best_val_epoch}"
)
self.metric_values.append(mean_val_dice)
def test_step(self, batch, batch_idx):
images, labels = batch["image"], batch["label"]
batch_size = images.shape[0]
roi_size = (96, 96, 96)
sw_batch_size = 4
outputs = sliding_window_inference(
images,
roi_size,
sw_batch_size,
self.forward, # the output image will be cropped to the original image size
)
loss = self.loss_function(outputs, labels)
# compute dice score
outputs = [self.post_pred(i) for i in decollate_batch(outputs)]
labels = [self.post_label(i) for i in decollate_batch(labels)]
self.dice_metric(y_pred=outputs, y=labels)
# dice = self.dice_metric.aggregate().item()
# return {"dice": dice}
def test_epoch_end(self, outputs):
# dice_vals = []
# for output in outputs:
# dice_vals.append(output["dice"])
# mean_val_dice = np.mean(dice_vals)
# mean_val_dice = self.dice_metric_test.aggregate().item()
# self.dice_metric.reset()
# print(f"avg dice score: {mean_val_dice} ")
mean_val_dice = torch.nanmean(self.dice_metric.get_buffer(), dim=0)
print(mean_val_dice)
print(torch.mean(mean_val_dice))
if __name__ == "__main__":
cli = LightningCLI(save_config_overwrite=True)
| Python |
3D | chenz53/MIM-Med3D | code/experiments/sl/multi_seg_main.py | .py | 6,865 | 201 | from monai.losses import DiceCELoss
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from models import UNETR
from monai.networks.nets import SegResNet
from monai.data import decollate_batch
from monai.transforms import Compose, Activations, AsDiscrete, EnsureType
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning.utilities.cli import LightningCLI
import data
import optimizers
class MultiSegtrainer(pl.LightningModule):
def __init__(
self, model_name: str, model_dict: dict,
):
super().__init__()
self.model_name = model_name
self.model_dict = model_dict
if model_name.split("_")[0] == "unetr":
self.model = UNETR(**model_dict)
elif model_name == "segresnet":
self.model = SegResNet(**model_dict)
self.loss_function = DiceCELoss(to_onehot_y=False, sigmoid=True)
# self.post_pred = AsDiscrete(argmax=True, to_onehot=num_classes)
# self.post_label = AsDiscrete(to_onehot=num_classes)
self.post_trans = Compose(
[EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)]
)
self.dice_metric = DiceMetric(
include_background=True, reduction="mean", get_not_nans=False
)
self.best_val_dice = 0
self.best_val_epoch = 0
self.dice_vals = []
# self.dice_vals_tc = []
# self.dice_vals_wt = []
# self.dice_vals_et = []
self.metric_values = []
self.epoch_loss_values = []
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
images, labels = batch["image"], batch["label"]
batch_size = images.shape[0]
output = self.forward(images)
loss = self.loss_function(output, labels)
# logging
self.log(
"train/dice_loss_step",
loss,
batch_size=batch_size,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=True,
)
return {"loss": loss}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log(
"train/dice_loss_avg",
avg_loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
self.epoch_loss_values.append(avg_loss.detach().cpu().numpy())
def validation_step(self, batch, batch_idx):
images, labels = batch["image"], batch["label"]
batch_size = images.shape[0]
roi_size = (96, 96, 96)
sw_batch_size = 4
outputs = sliding_window_inference(
images,
roi_size,
sw_batch_size,
self.forward, # the output image will be cropped to the original image size
)
loss = self.loss_function(outputs, labels)
# compute dice score
outputs = [self.post_trans(i) for i in decollate_batch(outputs)]
# labels = [self.post_label(i) for i in decollate_batch(labels)]
self.dice_metric(y_pred=outputs, y=labels)
dice = self.dice_metric.aggregate().item()
# compute mean dice score per validation epoch
self.dice_vals.append(dice)
# logging
self.log(
"val/dice_loss_step",
loss,
batch_size=batch_size,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=True,
)
return {"val_loss": loss, "val_number": len(outputs)}
def validation_epoch_end(self, outputs):
val_loss, num_items = 0, 0
for output in outputs:
val_loss += output["val_loss"].sum().item()
num_items += output["val_number"]
mean_val_dice = np.mean(self.dice_vals)
self.dice_vals = []
self.dice_metric.reset()
mean_val_loss = torch.tensor(val_loss / num_items)
# logging
self.log(
"val/dice_loss_avg",
mean_val_loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
self.log(
"val/dice_score_avg",
mean_val_dice,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
self.logger.log_hyperparams(
params={
"model": self.model_name,
**self.model_dict,
"data": self.trainer.datamodule.json_path,
"ds_ratio": self.trainer.datamodule.downsample_ratio,
"batch_size": self.trainer.datamodule.batch_size,
"distribution": self.trainer.datamodule.dist,
# "benchmark": self.trainer.benchmark,
"max_epochs": self.trainer.max_epochs,
"precision": self.trainer.precision,
},
metrics={"dice_loss": mean_val_loss, "dice_score": mean_val_dice},
)
if mean_val_dice > self.best_val_dice:
self.best_val_dice = mean_val_dice
self.best_val_epoch = self.current_epoch
print(
f"current epoch: {self.current_epoch} "
f"current mean dice: {mean_val_dice:.4f}"
f"\nbest mean dice: {self.best_val_dice:.4f} "
f"at epoch: {self.best_val_epoch}"
)
self.metric_values.append(mean_val_dice)
def test_step(self, batch, batch_idx):
images, labels = batch["image"], batch["label"]
batch_size = images.shape[0]
roi_size = (96, 96, 96)
sw_batch_size = 4
outputs = sliding_window_inference(
images,
roi_size,
sw_batch_size,
self.forward, # the output image will be cropped to the original image size
)
loss = self.loss_function(outputs, labels)
# compute dice score
outputs = [self.post_trans(i) for i in decollate_batch(outputs)]
# labels = [self.post_label(i) for i in decollate_batch(labels)]
self.dice_metric(y_pred=outputs, y=labels)
# dice = self.dice_metric.aggregate().item()
# return {"dice": dice}
def test_epoch_end(self, outputs):
# dice_vals = []
# for output in outputs:
# dice_vals.append(output["dice"])
# mean_val_dice = np.mean(dice_vals)
# mean_val_dice = self.dice_metric_test.aggregate().item()
# self.dice_metric.reset()
# print(f"avg dice score: {mean_val_dice} ")
mean_val_dice = torch.nanmean(self.dice_metric.get_buffer(), dim=0)
print(mean_val_dice)
print(torch.mean(mean_val_dice))
if __name__ == "__main__":
cli = LightningCLI(save_config_overwrite=True)
| Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.