keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | jianlin-cheng/TransFun | preprocessing/generate_msa.py | .py | 10,510 | 205 | import os, subprocess
from typing import Any, Mapping, MutableMapping, Optional, Sequence, Union
from absl import logging
from tools import jackhmmer, parsers, residue_constants, msa_identifiers, hhblits
import numpy as np
import shutil
from absl import app
from absl import logging
import multiprocessing
from glob import glob
import sys
jackhmmer_binary_path = shutil.which('jackhmmer')
uniref90_database_path = "/data_bp/pycharm/Genetic_Databases/uniref90/uniref90.fasta"
mgnify_database_path = "/data_bp/pycharm/Genetic_Databases/mgnify/mgy_clusters_2018_12.fa"
small_bfd_database_path = "/data_bp/pycharm/Genetic_Databases/small_bfd/bfd-first_non_consensus_sequences.fasta"
hhblits_binary_path = "/data_bp/pycharm/Genetic_Databases/small_bfd/bfd-first_non_consensus_sequences.fasta"
uniclust30_database_path = "/data_bp/pycharm/Genetic_Databases/small_bfd/bfd-first_non_consensus_sequences.fasta"
FeatureDict = MutableMapping[str, np.ndarray]
def make_msa_features(msas: Sequence[parsers.Msa], combined_out_path: str) -> FeatureDict:
"""Constructs a feature dict of MSA features."""
if not msas:
raise ValueError('At least one MSA must be provided.')
int_msa = []
deletion_matrix = []
uniprot_accession_ids = []
species_ids = []
seen_sequences = []
name_identifiers = []
for msa_index, msa in enumerate(msas):
if not msa:
raise ValueError(f'MSA {msa_index} must contain at least one sequence.')
for sequence_index, sequence in enumerate(msa.sequences):
if sequence in seen_sequences:
continue
seen_sequences.append(sequence)
name_identifiers.append(msa.descriptions[sequence_index])
int_msa.append([residue_constants.HHBLITS_AA_TO_ID[res] for res in sequence])
deletion_matrix.append(msa.deletion_matrix[sequence_index])
identifiers = msa_identifiers.get_identifiers(msa.descriptions[sequence_index])
uniprot_accession_ids.append(identifiers.uniprot_accession_id.encode('utf-8'))
species_ids.append(identifiers.species_id.encode('utf-8'))
num_res = len(msas[0].sequences[0])
num_alignments = len(int_msa)
features = {}
features['deletion_matrix_int'] = np.array(deletion_matrix, dtype=np.int32)
features['msa'] = np.array(int_msa, dtype=np.int32)
features['num_alignments'] = np.array([num_alignments] * num_res, dtype=np.int32)
features['msa_uniprot_accession_identifiers'] = np.array(uniprot_accession_ids, dtype=np.object_)
features['msa_species_identifiers'] = np.array(species_ids, dtype=np.object_)
with open(combined_out_path, 'w') as f:
for item in zip(seen_sequences, name_identifiers):
f.write(">%s\n" % item[1])
f.write("%s\n" % item[0])
def run_msa_tool(msa_runner, input_fasta_path: str, msa_out_path: str, msa_format: str, use_precomputed_msas: bool, base_path: str) -> Mapping[str, Any]:
"""Runs an MSA tool, checking if output already exists first."""
if not use_precomputed_msas or not os.path.exists(msa_out_path):
result = msa_runner.query(input_fasta_path, base_path=base_path)[0]
with open(msa_out_path, 'w') as f:
f.write(result[msa_format])
else:
logging.error('Reading MSA from file %s', msa_out_path)
with open(msa_out_path, 'r') as f:
result = {msa_format: f.read()}
return result
class DataPipeline:
"""Runs the alignment tools and assembles the input features."""
def __init__(self, jackhmmer_binary_path: str, hhblits_binary_path: str, uniref90_database_path: str, mgnify_database_path: str, small_bfd_database_path: Optional[str], uniclust30_database_path: str, bfd_database_path: Optional[str]):
"""Initializes the data_bp pipeline."""
self.jackhmmer_uniref90_runner = jackhmmer.Jackhmmer(binary_path=jackhmmer_binary_path, database_path=uniref90_database_path)
#self.jackhmmer_small_bfd_runner = jackhmmer.Jackhmmer(binary_path=jackhmmer_binary_path, database_path=small_bfd_database_path)
self.hhblits_bfd_uniclust_runner = hhblits.HHBlits( binary_path=hhblits_binary_path, databases=[bfd_database_path, uniclust30_database_path])
self.jackhmmer_mgnify_runner = jackhmmer.Jackhmmer(binary_path=jackhmmer_binary_path, database_path=mgnify_database_path)
self.use_precomputed_msas = False
self.mgnify_max_hits = 501
self.uniref_max_hits = 10000
def process(self, input_fasta_path: str, msa_output_dir: str, base_path: str, protein: str, combine: bool, make_diverse: bool) -> FeatureDict:
"""Runs alignment tools on the input sequence and creates features."""
uniref90_msa = "None"
bfd_msa = "None"
mgnify_msa = "None"
combined_out_path = os.path.join(msa_output_dir, 'combined.a3m')
diverse_out_path = os.path.join(msa_output_dir, 'diverse_{}.a3m')
with open(input_fasta_path) as f:
input_fasta_str = f.read()
input_seqs, input_descs = parsers.parse_fasta(input_fasta_str)
if len(input_seqs) != 1:
raise ValueError(f'More than one input sequence found in {input_fasta_path}.')
if os.path.isfile(combined_out_path):
logging.error("Combined already generated for {}".format(input_fasta_path))
else:
uniref90_out_path = os.path.join(msa_output_dir, 'uniref90_hits.sto')
if not os.path.isfile(uniref90_out_path):
logging.error("Generating msa for {} from {}".format(protein, "uniref90"))
jackhmmer_uniref90_result = run_msa_tool(self.jackhmmer_uniref90_runner, input_fasta_path, uniref90_out_path, 'sto', self.use_precomputed_msas, base_path=base_path)
uniref90_msa = parsers.parse_stockholm(jackhmmer_uniref90_result['sto'])
uniref90_msa = uniref90_msa.truncate(max_seqs=self.uniref_max_hits)
else:
if combine and not os.path.isfile(combined_out_path):
logging.error("Loading msa for {} from {} @ {}".format(protein, "uniref90", uniref90_out_path))
with open(uniref90_out_path, 'r') as f:
jackhmmer_uniref90_result = {'sto': f.read()}
uniref90_msa = parsers.parse_stockholm(jackhmmer_uniref90_result['sto'])
uniref90_msa = uniref90_msa.truncate(max_seqs=self.uniref_max_hits)
mgnify_out_path = os.path.join(msa_output_dir, 'mgnify_hits.sto')
if not os.path.isfile(mgnify_out_path):
logging.error("Generating msa for {} from {}".format(protein, "mgnify"))
jackhmmer_mgnify_result = run_msa_tool(self.jackhmmer_mgnify_runner, input_fasta_path, mgnify_out_path, 'sto', self.use_precomputed_msas, base_path=base_path)
mgnify_msa = parsers.parse_stockholm(jackhmmer_mgnify_result['sto'])
mgnify_msa = mgnify_msa.truncate(max_seqs=self.mgnify_max_hits)
else:
if combine and not os.path.isfile(combined_out_path):
logging.error("Loading msa for {} from {} @ {}".format(protein, "mgnify", mgnify_out_path))
with open(mgnify_out_path, 'r') as f:
jackhmmer_mgnify_result = {'sto': f.read()}
mgnify_msa = parsers.parse_stockholm(jackhmmer_mgnify_result['sto'])
mgnify_msa = mgnify_msa.truncate(max_seqs=self.mgnify_max_hits)
bfd_out_path = os.path.join(msa_output_dir, 'bfd_uniclust_hits.a3m')
if not os.path.isfile(bfd_out_path):
logging.error("Generating msa for {} from {}".format(protein, "Bfd"))
hhblits_bfd_uniclust_result = run_msa_tool(self.hhblits_bfd_uniclust_runner, input_fasta_path, bfd_out_path, 'a3m', self.use_precomputed_msas, base_path=base_path)
bfd_msa = parsers.parse_a3m(hhblits_bfd_uniclust_result['a3m'])
# jackhmmer_small_bfd_result = run_msa_tool(self.jackhmmer_small_bfd_runner, input_fasta_path, bfd_out_path, 'sto', self.use_precomputed_msas, base_path=base_path)
# bfd_msa = parsers.parse_stockholm(jackhmmer_small_bfd_result['sto'])
else:
if combine and not os.path.isfile(combined_out_path):
logging.error("Loading msa for {} from {} @ {}".format(protein, "small_bfd", bfd_out_path))
with open(bfd_out_path, 'r') as f:
hhblits_small_bfd_result = {'a3m': f.read()}
bfd_msa = parsers.parse_stockholm(hhblits_small_bfd_result['a3m'])
# jackhmmer_small_bfd_result = {'sto': f.read()}
# bfd_msa = parsers.parse_stockholm(jackhmmer_small_bfd_result['sto'])
msa_features = make_msa_features((uniref90_msa, bfd_msa, mgnify_msa), combined_out_path=combined_out_path)
if make_diverse:
if not os.path.isfile(diverse_out_path.format(64)):
subprocess.call(
'hhfilter -i {} -o {} -diff {}'.format(combined_out_path, diverse_out_path.format(64), 64), shell=True)
if not os.path.isfile(diverse_out_path.format(128)):
subprocess.call(
'hhfilter -i {} -o {} -diff {}'.format(combined_out_path, diverse_out_path.format(128), 128), shell=True)
if not os.path.isfile(diverse_out_path.format(256)):
subprocess.call(
'hhfilter -i {} -o {} -diff {}'.format(combined_out_path, diverse_out_path.format(256), 256), shell=True)
if not os.path.isfile(diverse_out_path.format(512)):
subprocess.call(
'hhfilter -i {} -o {} -diff {}'.format(combined_out_path, diverse_out_path.format(512), 512), shell=True)
# logging.info('Uniref90 MSA size: %d sequences.', len(uniref90_msa))
# logging.info('BFD MSA size: %d sequences.', len(bfd_msa))
# logging.info('MGnify MSA size: %d sequences.', len(mgnify_msa))
def run_main(directory):
pipeline = DataPipeline(jackhmmer_binary_path, hhblits_binary_path, uniref90_database_path, mgnify_database_path, small_bfd_database_path, uniclust30_database_path, bfd_database_path)
base_path = base+"{}".format(directory)
logging.info("Generating for protein {}".format(directory))
input_path = base_path+"/{}.fasta".format(directory)
output_path = base_path+"/msas"
if not os.path.exists(output_path):
os.makedirs(output_path)
pipeline.process(input_fasta_path=input_path, msa_output_dir=output_path, base_path=base_path, protein=directory, combine=True, make_diverse=True)
base = "/storage/htc/bdm/Frimpong/TransFun/msa_files/two/{}/".format(sys.argv[1])
directories = [x for x in os.listdir(base)]
logging.info("Started")
pool = multiprocessing.Pool(4)
pool.map(run_main, directories)
pool.close()
| Python |
3D | jianlin-cheng/TransFun | plots/protein_structure.py | .py | 2,489 | 117 | # import networkx as nx
# import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from biopandas.pdb import PandasPdb
import random
import networkx as nx
import Constants
from Dataset.Dataset import load_dataset
import matplotlib.pyplot as plt
kwargs = {
'prot_ids': ['P83847', ],
'session': 'selected'
}
dataset = load_dataset(root=Constants.ROOT, **kwargs)
protein = dataset[0]
print(protein)
node_coords = protein.pos
edges = protein.edge_index
exit()
# x, y, z = node_coords[:, 0], node_coords[:, 1], node_coords[:, 2]
# # print(x.shape)
# # print(y.shape)
# # print(z.shape)
#
# fig = plt.figure()
# ax = plt.axes(projection='3d')
#
# ax.scatter3D(x, y, z, 'gray')
# # ax.plot3D(x, y, z, 'gray')
#
# fig.tight_layout()
# plt.show()
# import networkx as nx
# import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from biopandas.pdb import PandasPdb
import networkx as nx
import numpy as np
import Constants
from Dataset.Dataset import load_dataset
import matplotlib.pyplot as plt
kwargs = {
'prot_ids': ['A0A023FBW4', ],
'session': 'selected'
}
dataset = load_dataset(root=Constants.ROOT, **kwargs)
protein = dataset[0]
print(protein)
exit()
def plot_residues(protein, add_edges=False, limit=25):
node_coords = protein.pos.numpy()
limit = len(node_coords)
if add_edges:
indicies = random.sample(range(0, limit), 25)
else:
indicies = random.sample(range(0, limit), limit)
edges = protein.edge_index.numpy()
some_edges = []
edges = [i for i in zip(edges[0], edges[1])]
for i, j in edges:
if i in indicies and j in indicies:
some_edges.append(([node_coords[i][0], node_coords[j][0]],
[node_coords[i][1], node_coords[j][1]],
[node_coords[i][2], node_coords[j][2]]))
node_coords = np.array([node_coords[i] for i in indicies])
x, y, z = node_coords[:, 0], node_coords[:, 1], node_coords[:, 2]
# # print(x.shape)
# # print(y.shape)
# # print(z.shape)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(x, y, z)
if add_edges:
for x in some_edges:
ax.plot3D(x[0], x[1], x[2])# , 'gray')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
# fig.tight_layout()
plt.title("Some protein")
plt.show()
plot_residues(protein) | Python |
3D | jianlin-cheng/TransFun | Dataset/Dataset.py | .py | 10,101 | 234 | import math
import os
import pickle
import subprocess
import torch
import os.path as osp
from torch_geometric.data import Dataset, download_url, HeteroData
import Constants
from Dataset.distanceTransform import myDistanceTransform
from Dataset.myKnn import myKNNGraph
from Dataset.myRadiusGraph import myRadiusGraph
from Dataset.utils import find_files, process_pdbpandas, get_knn, generate_Identity_Matrix
import torch_geometric.transforms as T
from torch_geometric.data import Data
from Dataset.AdjacencyTransform import AdjacencyFeatures
from preprocessing.utils import pickle_load, pickle_save, get_sequence_from_pdb, fasta_to_dictionary, collect_test, \
read_test_set, read_test, cafa_fasta_to_dictionary
import pandas as pd
import random
class PDBDataset(Dataset):
"""
Creates a dataset from a list of PDB files.
:param file_list: path to LMDB file containing dataset
:type file_list: list[Union[str, Path]]
:param transform: transformation function for data_bp augmentation, defaults to None
:type transform: function, optional
"""
def __init__(self, root, transform=None, pre_transform=None, pre_filter=None, **kwargs):
self.root = root
self.seq_id = kwargs.get('seq_id', None)
self.ont = kwargs.get('ont', None)
self.session = kwargs.get('session', None)
self.prot_ids = kwargs.get('prot_ids', [])
self.test_file = kwargs.get('test_file', None)
self.pdb_pth = kwargs.get('pdb_path', self.root + "/alphafold/")
self.raw_file_list = []
self.processed_file_list = []
if self.session == "selected":
self.data = self.prot_ids
for i in self.data:
self.raw_file_list.append('{}'.format(i))
self.processed_file_list.append('{}.pt'.format(i))
else:
if self.session == "train":
self.data = pickle_load(self.root + "/{}/{}/{}".format(self.seq_id, self.ont, self.session))
for i in self.data:
for j in self.data[i]:
self.raw_file_list.append('AF-{}-F1-model_v2.pdb.gz'.format(j))
self.processed_file_list.append('{}.pt'.format(j))
elif self.session == "validation":
self.data = list(pickle_load(self.root + "{}/{}".format(self.seq_id, self.session)))
for i in self.data:
self.raw_file_list.append('AF-{}-F1-model_v2.pdb.gz'.format(i))
self.processed_file_list.append('{}.pt'.format(i))
elif self.session == "test":
self.data = self.get_test(self.test_file)
for i in self.data:
self.raw_file_list.append('AF-{}-F1-model_v2.pdb.gz'.format(i))
self.processed_file_list.append('{}.pt'.format(i))
super().__init__(self.root, transform, pre_transform, pre_filter)
@property
def raw_dir(self) -> str:
return self.pdb_pth
@property
def processed_dir(self) -> str:
return self.root + "/processed/"
@property
def raw_file_names(self):
return self.raw_file_list
@property
def processed_file_names(self):
return self.processed_file_list
def download(self):
rem_files = set(self.raw_file_list) - set(find_files(self.raw_dir, suffix="pdb.gz", type="Name"))
for file in rem_files:
src = "/data_bp/pycharm/TransFunData/data_bp/alphafold/AF-{}-F1-model_v2.pdb.gz"
des = self.root + "/raw/{}".format(file)
if os.path.isfile(src.format(file)):
pass
# subprocess.call('cp {} {}'.format(src.format("pdb", file), des), shell=True)
else:
pass
# download
def process(self):
rem_files = set(self.processed_file_list) - set(find_files(self.processed_dir, suffix="pt", type="Name"))
print("{} unprocessed proteins out of {}".format(len(rem_files), len(self.processed_file_list)))
chain_id = 'A'
for file in rem_files:
protein = file.split(".")[0]
print("Processing protein {}".format(protein))
raw_path = self.raw_dir + '{}.pdb.gz'.format(protein)
labels = {
'molecular_function': [],
'biological_process': [],
'cellular_component': []
}
emb = torch.load(self.root + "/esm/{}.pt".format(protein))
embedding_features_per_residue = emb['representations'][33]
embedding_features_per_sequence = emb['mean_representations'][33].view(1, -1)
if raw_path:
node_coords, sequence_features, sequence_letters = process_pdbpandas(raw_path, chain_id)
# else: node_coords, sequence_features, sequence_letters = generate_Identity_Matrix(
# embedding_features_per_residue.shape, self.fasta[protein])
assert embedding_features_per_residue.shape[0] == node_coords.shape[0]
assert embedding_features_per_residue.shape[1] == embedding_features_per_sequence.shape[1]
node_size = node_coords.shape[0]
names = torch.arange(0, node_size, dtype=torch.int8)
data = HeteroData()
data['atoms'].pos = node_coords
data['atoms'].molecular_function = torch.IntTensor(labels['molecular_function'])
data['atoms'].biological_process = torch.IntTensor(labels['biological_process'])
data['atoms'].cellular_component = torch.IntTensor(labels['cellular_component'])
data['atoms'].sequence_features = sequence_features
data['atoms'].embedding_features_per_residue = embedding_features_per_residue
data['atoms'].names = names
data['atoms'].sequence_letters = sequence_letters
data['atoms'].embedding_features_per_sequence = embedding_features_per_sequence
data['atoms'].protein = protein
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
_transforms = []
for i in self.pre_transform:
if i[0] == "KNN":
kwargs = {'mode': i[1], 'sequence_length': node_size}
knn = get_knn(**kwargs)
_transforms.append(myKNNGraph(i[1], k=knn, force_undirected=True, ))
if i[0] == "DIST":
_transforms.append(myRadiusGraph(i[1], r=i[2], loop=False))
_transforms.append(myDistanceTransform(edge_types=self.pre_transform, norm=True))
_transforms.append(AdjacencyFeatures(edge_types=self.pre_transform))
pre_transform = T.Compose(_transforms)
data = pre_transform(data)
torch.save(data, osp.join(self.root + "/processed/", f'{protein}.pt'))
def len(self):
return len(self.data)
def get(self, idx):
if self.session == "train":
rep = random.sample(self.data[idx], 1)[0]
return torch.load(osp.join(self.processed_dir, f'{rep}.pt'))
elif self.session == "validation" or self.session == "selected" or self.session == "test":
rep = self.data[idx]
return torch.load(osp.join(self.processed_dir, f'{rep}.pt'))
def get_test(self, test_file):
# all_test = set(self.all_test.keys())
# all_test = set(self.all_test)
# x = list(set(read_test_set("{}supplementary_data/cafa3/benchmark20171115/groundtruth/{}".format(self.root, test_file))))
# onlystructs_filter = pickle_load("/home/fbqc9/PycharmProjects/TransFun/evaluation/available_structures")
# onlystructs_filter = set([i[0].split(".")[0] for i in onlystructs_filter if i[1] == True])
# x = [i for i in x if i in onlystructs_filter]
data = pd.read_csv(Constants.ROOT + "timebased/test_data", sep="\t")
data = data.loc[data['ONTOLOGY'] == self.ont]
missing = set(pickle_load(Constants.ROOT + "timebased/missing_proteins"))
data = list(set(data['ACC'].to_list()).difference(missing))
# x = list(pickle_load(test_file)[self.ont])
return data
def load_dataset(root=None, **kwargs):
"""
Load files in file_list into corresponding dataset object. All files should be of type filetype.
:param root: path to root
:type file_list: list[Union[str, Path]]
:param raw_path: path to raw path
:type file_list: list[Union[str, Path]]
:return: Pytorch Dataset containing data_bp
:rtype: torch.utils.data_bp.Dataset
"""
if root == None:
raise ValueError('Root path is empty, specify root directory')
# Group; name; operation/cutoff; Description
pre_transform = [("KNN", "sqrt", "sqrt", "K nearest neighbour with sqrt for neighbours"),
("KNN", "cbrt", "cbrt", "K nearest neighbour with sqrt for neighbours"),
("DIST", "dist_3", 3, "Distance of 2angs"),
("DIST", "dist_4", 4, "Distance of 2angs"),
("DIST", "dist_6", 6, "Distance of 2angs"),
("DIST", "dist_10", 10, "Distance of 2angs"),
("DIST", "dist_12", 12, "Distance of 2angs")]
# PDB URL has 1 attached to it
dataset = PDBDataset(root, pre_transform=pre_transform, **kwargs)
return dataset
# create raw and processed list.
def generate_dataset(_group="molecular_function"):
# load sequences as dictionary
if _group == "molecular_function":
x = pickle_load('/data_bp/pycharm/TransFunData/data_bp/molecular_function/{}'.format(_group))
raw = list(set([i for i in x.keys()]))
elif _group == "cellular_component":
pass
elif _group == "biological_process":
pass
if raw:
pickle_save(raw, '/data_bp/pycharm/TransFunData/data_bp/molecular_function/{}'
.format("molecular_function_raw_list"))
| Python |
3D | jianlin-cheng/TransFun | Dataset/myRadiusGraph.py | .py | 1,158 | 34 | from typing import Optional
import torch_geometric
from torch_geometric.transforms import RadiusGraph
class myRadiusGraph(RadiusGraph):
r"""Creates edges based on node positions :obj:`pos` to all points within a
given distance.
"""
def __init__(
self,
name: str,
r: float,
loop: bool = False,
max_num_neighbors: int = 32,
flow: str = 'source_to_target',
):
super().__init__(r, loop, max_num_neighbors, flow)
self.name = name
def __call__(self, data):
data['atoms', self.name, 'atoms'].edge_attr = None
batch = data.batch if 'batch' in data else None
data['atoms', self.name, 'atoms'].edge_index = torch_geometric.nn.radius_graph(
data['atoms'].pos,
self.r, batch, self.loop,
self.max_num_neighbors,
self.flow)
return data
def __repr__(self) -> str:
return f'{self.__class__.__name__}(r={self.r})'
| Python |
3D | jianlin-cheng/TransFun | Dataset/__init__.py | .py | 0 | 0 | null | Python |
3D | jianlin-cheng/TransFun | Dataset/AdjacencyTransform.py | .py | 1,915 | 49 | import torch
from torch_geometric.transforms import BaseTransform
class AdjacencyFeatures(BaseTransform):
r"""Saves the Euclidean distance of linked nodes in its edge attributes.
Args:
norm (bool, optional): If set to :obj:`False`, the output will not be
normalized to the interval :math:`[0, 1]`. (default: :obj:`True`)
max_value (float, optional): If set and :obj:`norm=True`, normalization
will be performed based on this value instead of the maximum value
found in the data. (default: :obj:`None`)
cat (bool, optional): If set to :obj:`False`, all existing edge
attributes will be replaced. (default: :obj:`True`)
"""
def __init__(self, edge_types, cat=True):
self.cat = cat
self.edge_types = edge_types
def __call__(self, data):
for edge_type in self.edge_types:
adjacent_edges = []
(row, col), pseudo = data['atoms', edge_type[1], 'atoms'].edge_index, \
data['atoms', edge_type[1], 'atoms'].get('edge_attr', None)
for i, j in zip(row, col):
assert i != j
if abs(i - j) == 1:
adjacent_edges.append(1)
else:
adjacent_edges.append(0)
adjacent_edges = torch.FloatTensor(adjacent_edges).view(-1, 1)
if pseudo is not None and self.cat:
pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo
data['atoms', edge_type[1], 'atoms'].edge_attr = torch.cat([pseudo, adjacent_edges.type_as(pseudo)],
dim=-1)
else:
data['atoms', edge_type[1], 'atoms'].edge_attr = adjacent_edges
return data
def __repr__(self) -> str:
return f'{self.__class__.__name__} '
| Python |
3D | jianlin-cheng/TransFun | Dataset/distanceTransform.py | .py | 1,246 | 37 | import torch
from torch_geometric.transforms import Distance
class myDistanceTransform(Distance):
r"""
"""
def __init__(self, edge_types, norm=True, max_value=None, cat=True):
super().__init__(norm, max_value, cat)
self.edge_types = edge_types
def __call__(self, data):
for i in self.edge_types:
(row, col), pos, pseudo = data['atoms', i[1], 'atoms'].edge_index, \
data['atoms'].pos, \
data['atoms', i[1], 'atoms'].get('edge_attr', None)
dist = torch.norm(pos[col] - pos[row], p=2, dim=-1).view(-1, 1)
if self.norm and dist.numel() > 0:
dist = dist / (dist.max() if self.max is None else self.max)
if pseudo is not None and self.cat:
pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo
data['atoms', i[1], 'atoms'].edge_attr = torch.cat([pseudo, dist.type_as(pseudo)], dim=-1)
else:
data['atoms', i[1], 'atoms'].edge_attr = dist
return data
def __repr__(self) -> str:
return (f'{self.__class__.__name__}(norm={self.norm}, '
f'max_value={self.max})')
| Python |
3D | jianlin-cheng/TransFun | Dataset/utils.py | .py | 4,610 | 143 | import math
import os
import re
import subprocess
from pathlib import Path
import pickle
import numpy as np
import torch
from biopandas.pdb import PandasPdb
import torch.nn.functional as F
from keras.utils import to_categorical
from keras_preprocessing.sequence import pad_sequences
import Constants
from Constants import residues, amino_acids
def get_input_data():
# test data
input = ["1a0b", "1a0c", "1a0d", "1a0e", "1a0f", "1a0g", "1a0h", "1a0i", "1a0j", "1a0l"]
raw = [s + ".pdb" for s in input]
processed = [s + ".pt" for s in input]
with open('../Dataset/raw.pickle', 'wb') as handle:
pickle.dump(raw, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('../Dataset/proceesed.pickle', 'wb') as handle:
pickle.dump(processed, handle, protocol=pickle.HIGHEST_PROTOCOL)
patterns = {
'pdb': r'pdb[0-9]*$',
'pdb.gz': r'pdb[0-9]*\.gz$',
'mmcif': r'(mm)?cif$',
'sdf': r'sdf[0-9]*$',
'xyz': r'xyz[0-9]*$',
'xyz-gdb': r'xyz[0-9]*$',
'silent': r'out$',
'sharded': r'@[0-9]+',
}
_regexes = {k: re.compile(v) for k, v in patterns.items()}
def is_type(f, filetype):
if filetype in _regexes:
return _regexes[filetype].search(str(f))
else:
return re.compile(filetype + r'$').search(str(f))
def find_files(path, suffix, relative=None, type="Path"):
"""
Find all files in path with given suffix. =
:param path: Directory in which to find files.
:type path: Union[str, Path]
:param suffix: Suffix determining file type to search for.
:type suffix: str
:param relative: Flag to indicate whether to return absolute or relative path.
:return: list of paths to all files with suffix sorted by their names.
:rtype: list[str]
"""
if not relative:
find_cmd = r"find {:} -regex '.*\.{:}' | sort".format(path, suffix)
else:
find_cmd = r"cd {:}; find . -regex '.*\.{:}' | cut -d '/' -f 2- | sort" \
.format(path, suffix)
out = subprocess.Popen(
find_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.getcwd(), shell=True)
(stdout, stderr) = out.communicate()
name_list = stdout.decode().split()
name_list.sort()
if type == "Path":
return sorted([Path(x) for x in name_list])
elif type == "Name":
return sorted([Path(x).name for x in name_list])
def process_pdbpandas(raw_path, chain_id):
pdb_to_pandas = PandasPdb().read_pdb(raw_path)
pdb_df = pdb_to_pandas.df['ATOM']
assert (len(set(pdb_df['chain_id'])) == 1) & (list(set(pdb_df['chain_id']))[0] == chain_id)
pdb_df = pdb_df[(pdb_df['atom_name'] == 'CA') & (pdb_df['chain_id'] == chain_id)]
pdb_df = pdb_df.drop_duplicates()
_residues = pdb_df['residue_name'].to_list()
_residues = [amino_acids[i] for i in _residues if i != "UNK"]
sequence_features = [[residues[residue] for residue in _residues]]
sequence_features = pad_sequences(sequence_features, maxlen=1024, truncating='post', padding='post')
# sequences + padding
sequence_features = torch.tensor(to_categorical(sequence_features, num_classes=len(residues) + 1))
# sequence_features = F.one_hot(sequence_features, num_classes=len(residues) + 1).to(dtype=torch.int64)
node_coords = torch.tensor(pdb_df[['x_coord', 'y_coord', 'z_coord']].values, dtype=torch.float32)
return node_coords, sequence_features, ''.join(_residues)
return residues
def generate_Identity_Matrix(shape, sequence):
node_coords = torch.from_numpy(np.zeros(shape=(shape[0], 3)))
_residues = sequence[3]
# _residues = [amino_acids[i] for i in _residues if i != "UNK"]
sequence_features = [[residues[residue] for residue in list(_residues) if residue not in Constants.INVALID_ACIDS]]
sequence_features = pad_sequences(sequence_features, maxlen=1024, truncating='post', padding='post')
# sequences + padding
sequence_features = torch.tensor(to_categorical(sequence_features, num_classes=len(residues) + 1))
# sequence_features = F.one_hot(sequence_features, num_classes=len(residues) + 1).to(dtype=torch.int64)
return node_coords, sequence_features, str(_residues)
def get_cbrt(a):
return a**(1./3.)
def get_knn(**kwargs):
mode = kwargs["mode"]
seq_length = kwargs["sequence_length"]
if mode == "sqrt":
x = int(math.sqrt(seq_length))
if x % 2 == 0:
return x + 1
return x
elif mode == "cbrt":
x = int(get_cbrt(seq_length))
if x % 2 == 0:
return x + 1
return x
else:
return seq_length | Python |
3D | jianlin-cheng/TransFun | Dataset/myKnn.py | .py | 1,123 | 31 | import torch_geometric
from torch_geometric.transforms import KNNGraph
from torch_geometric.utils import to_undirected
class myKNNGraph(KNNGraph):
r"""Creates a k-NN graph based on node positions :obj:`pos`.
"""
def __init__(self, name: str, k=6, loop=False, force_undirected=False,
flow='source_to_target'):
super().__init__(k, loop, force_undirected, flow)
self.name = name
def __call__(self, data):
data['atoms', self.name, 'atoms'].edge_attr = None
batch = data.batch if 'batch' in data else None
edge_index = torch_geometric.nn.knn_graph(data['atoms'].pos,
self.k, batch,
loop=self.loop,
flow=self.flow)
if self.force_undirected:
edge_index = to_undirected(edge_index, num_nodes=data.num_nodes)
data['atoms', self.name, 'atoms'].edge_index = edge_index
return data
def __repr__(self) -> str:
return f'{self.__class__.__name__}(k={self.k})'
| Python |
3D | twni2016/Elastic-Boundary-Projection | nii2npy.py | .py | 2,073 | 54 | import os
import numpy as np
import nibabel as nib
import shutil
import argparse
parser = argparse.ArgumentParser(description='EBP')
parser.add_argument('--data_path', type=str, default=None, help='data path')
data_path = args.data_path
images_list = os.listdir(os.path.join(data_path, 'imagesTr'))
labels_list = os.listdir(os.path.join(data_path, 'labelsTr'))
assert len(images_list) == len(labels_list) == 41
images_list.sort()
labels_list.sort()
if not os.path.exists(os.path.join(data_path, 'images')):
os.makedirs(os.path.join(data_path, 'images'))
if not os.path.exists(os.path.join(data_path, 'labels')):
os.makedirs(os.path.join(data_path, 'labels'))
for img_name in images_list:
print(img_name)
img = nib.load(os.path.join(data_path, 'imagesTr', img_name))
img_array = img.get_fdata()
img_id = img_name[7:9]
if img_id[1] == '.':
img_id = img_id[0]
np.save(os.path.join(data_path, 'images', 'MD' + img_id.zfill(2) + '.npy'), img_array.astype(np.float32))
for lbl_name in labels_list:
print(lbl_name)
lbl = nib.load(os.path.join(data_path, 'labelsTr', lbl_name))
lbl_array = lbl.get_data()
lbl_id = lbl_name[7:9]
if lbl_id[1] == '.':
lbl_id = lbl_id[0]
np.save(os.path.join(data_path, 'labels', 'MD' + lbl_id.zfill(2) + '.npy'), lbl_array.astype(np.uint8))
# half split the dataset.
image_list = os.listdir(os.path.join(data_path, 'images'))
image_list.sort()
os.makedirs(os.path.join(data_path, 'test_images'))
os.makedirs(os.path.join(data_path, 'test_labels'))
os.makedirs(os.path.join(data_path, 'train_images'))
os.makedirs(os.path.join(data_path, 'train_labels'))
for i, npy in enumerate(image_list):
print(i, npy)
if i < 20:
shutil.copy2(os.path.join(data_path, 'images', npy), os.path.join(data_path, 'test_images', npy))
shutil.copy2(os.path.join(data_path, 'labels', npy), os.path.join(data_path, 'test_labels', npy))
else:
shutil.copy2(os.path.join(data_path, 'images', npy), os.path.join(data_path, 'train_images', npy))
shutil.copy2(os.path.join(data_path, 'labels', npy), os.path.join(data_path, 'train_labels', npy))
| Python |
3D | twni2016/Elastic-Boundary-Projection | data_generation.py | .py | 13,911 | 344 | import os
import time
import numpy as np
from scipy.spatial import cKDTree
import argparse
PAD = 10
MAX_EUCLID = 10
MAX_MANHAT = int(np.ceil(np.sqrt(3) * MAX_EUCLID)) + 2
MAX_VALUE = MAX_MANHAT + 2
ITER_TH = 10
HEIGHT = 120
WIDTH = 120
STEP = 6 # for x,y axis
STEP_Z = 3 # due to small z_len
INIT_D = 5.0
CT_INF = -125
CT_SUP = 275
Y_SCALE = 2
IN_SLICE = 5
def arg_parser():
parser = argparse.ArgumentParser(description='EBP')
parser.add_argument('--data_path', type=str, default=None, help='data path')
parser.add_argument('--organ_id', type=int, default=1, help='organ id')
parser.add_argument('--slices', type=int, default=5, help='neighboring slices')
parser.add_argument('--folds', type=int, default=1)
args = parser.parse_args()
print(args.data_path, args.organ_id, args.slices, args.folds)
return args
class DATA_GENERATION():
def __init__(self):
''' prepare the dataset list (including training and testing cases) '''
self.organ_id = args.organ_id
self.slices = args.slices
if self.slices % 2 == 0:
raise ValueError('slices should be an odd number')
self.images_path = os.path.join(args.data_path, 'images')
self.labels_path = os.path.join(args.data_path, 'labels')
self.dataset_name = 'dataset_organ' + str(self.organ_id)
self.relabels_path = os.path.join(args.data_path, 'relabel', self.dataset_name)
self.output_dataset_path = os.path.join(args.data_path, self.dataset_name)
os.makedirs(self.relabels_path, exist_ok=True)
os.makedirs(self.output_dataset_path, exist_ok=True)
self.images_list = os.listdir(self.images_path)
self.labels_list = os.listdir(self.labels_path)
self.images_list.sort()
self.labels_list.sort()
if len(self.images_list) != len(self.labels_list):
raise RuntimeError('len of images should be equal to that of labels')
self.direction_x, self.direction_y, self.direction_z = self.set_sphere_projection()
self.pivot_stat = np.zeros((4),dtype=np.int64)
start_all = time.time()
for case_num in self.images_list:
self.pivot_stat += self.generate(case_num)
print('summary of pivot stat:', self.pivot_stat)
print('#' * 20, 'all time costs %ds'%(time.time() - start_all))
def generate(self, case_num):
'''generate dataset per case
'''
self.case_num = case_num
print('#' * 20, 'processing', self.case_num)
self.image = np.load(os.path.join(self.images_path, case_num))
self.label = np.load(os.path.join(self.labels_path, case_num))
self.label[self.label != self.organ_id] = 0 # 0/1 label
self.label[self.label == self.organ_id] = 1
(self.height, self.width, self.depth) = self.label.shape
self.case_path = os.path.join(self.output_dataset_path, self.case_num[:-4])
os.makedirs(self.case_path, exist_ok=True)
self.bbox = self.get_bbox()
print(self.case_num, 'bounding box', self.bbox)
self.man_dist = self.Manhattan_Dist()
self.relabel = self.Euclidean_Dist()
# self.relabel = np.load(os.path.join(self.relabels_path, self.case_num)) if you have generated relabel
self.pivot_num = 0
self.pivot_list = []
self.iterate()
return self.pivot_stats()
def get_bbox(self):
'''get 3D bounding box with padding'''
non_zero = np.nonzero(self.label)
return [max(0, non_zero[0].min() - PAD), min(self.height, non_zero[0].max() + PAD),
max(0, non_zero[1].min() - PAD), min(self.width, non_zero[1].max() + PAD),
max(0, non_zero[2].min() - PAD), min(self.depth, non_zero[2].max() + PAD)]
def Manhattan_Dist(self):
'''Step 1: Compute Manhattan Distance.'''
start = time.time()
(h, w, d) = self.label.shape
# inside voxels
dist1 = np.zeros(self.label.shape, dtype=np.int8)
graph = - MAX_VALUE * self.label
mask = MAX_VALUE * (np.ones(self.label.shape, dtype=np.int8) * (graph < 0))
for K in range(MAX_MANHAT):
tmp1 = graph.copy()
tmp1[0:h-1,:,:] = np.maximum(tmp1[0:h-1,:,:], tmp1[1:h,:,:])
tmp1[1:h,:,:] = np.maximum(tmp1[0:h-1,:,:], tmp1[1:h,:,:])
tmp2 = graph.copy()
tmp2[:,0:w-1,:] = np.maximum(tmp2[:,0:w-1,:], tmp2[:,1:w,:])
tmp2[:,1:w,:] = np.maximum(tmp2[:,0:w-1,:], tmp2[:,1:w,:])
tmp3 = graph.copy()
tmp3[:,:,0:d-1] = np.maximum(tmp3[:,:,0:d-1], tmp3[:,:,1:d])
tmp3[:,:,1:d] = np.maximum(tmp3[:,:,0:d-1], tmp3[:,:,1:d])
graph = np.maximum.reduce([tmp1, tmp2, tmp3])
graph = np.minimum(graph + 1, mask)
dist1 = np.maximum(dist1, graph)
mask = MAX_VALUE * (np.ones(self.label.shape, dtype=np.int8) * (graph < 0))
# outside voxels
dist2 = np.zeros(self.label.shape, dtype=np.int8)
graph = - MAX_VALUE * (1 - self.label)
mask = MAX_VALUE * (np.ones(self.label.shape, dtype=np.int8) * (graph < 0))
for K in range(MAX_MANHAT):
tmp1 = graph.copy()
tmp1[0:h-1,:,:] = np.maximum(tmp1[0:h-1,:,:], tmp1[1:h,:,:])
tmp1[1:h,:,:] = np.maximum(tmp1[0:h-1,:,:], tmp1[1:h,:,:])
tmp2 = graph.copy()
tmp2[:,0:w-1,:] = np.maximum(tmp2[:,0:w-1,:], tmp2[:,1:w,:])
tmp2[:,1:w,:] = np.maximum(tmp2[:,0:w-1,:], tmp2[:,1:w,:])
tmp3 = graph.copy()
tmp3[:,:,0:d-1] = np.maximum(tmp3[:,:,0:d-1], tmp3[:,:,1:d])
tmp3[:,:,1:d] = np.maximum(tmp3[:,:,0:d-1], tmp3[:,:,1:d])
graph = np.maximum.reduce([tmp1, tmp2, tmp3])
graph = np.minimum(graph + 1, mask)
dist2 = np.maximum(dist2, graph)
mask = MAX_VALUE * (np.ones(self.label.shape, dtype=np.int8) * (graph < 0))
man_dist = np.maximum(dist1, dist2) # merge dist
man_dist = man_dist.astype(np.int8)
print(self.case_num, 'Step 1: Compute Manhattan Distance. time elapsed %.2fs'%(time.time()-start))
return man_dist
def Euclidean_Dist(self):
'''Step 2: use selected Manhantan voxels to compute Euclidean dist.'''
start = time.time()
label_sum = self.label \
+ np.roll(self.label, shift=1, axis=0) \
+ np.roll(self.label, shift=-1, axis=0) \
+ np.roll(self.label, shift=1, axis=1) \
+ np.roll(self.label, shift=-1, axis=1) \
+ np.roll(self.label, shift=1, axis=2) \
+ np.roll(self.label, shift=-1, axis=2)
inner_surface = np.where((label_sum > 0) & (label_sum < 7) & (self.label == 1))
outer_surface = np.where((label_sum > 0) & (label_sum < 7) & (self.label == 0))
outer_selected = np.where((self.man_dist > 0) & (self.label == 0))
inner_selected = np.where((self.man_dist > 0) & (self.label == 1))
relabel = np.zeros(self.label.shape, dtype=np.float32)
relabel[self.label == 1] = +MAX_EUCLID
relabel[self.label == 0] = -MAX_EUCLID
osel = np.asarray(outer_selected)
osel = osel.transpose()
osur = np.asarray(outer_surface)
osur = osur.transpose()
tree = cKDTree(osur)
mindist, minid = tree.query(osel)
relabel[outer_selected] = -mindist
isel = np.asarray(inner_selected)
isel = isel.transpose()
isur = np.asarray(inner_surface)
isur = isur.transpose()
tree = cKDTree(isur)
mindist, minid = tree.query(isel)
relabel[inner_selected] = mindist
relabel[outer_surface] = -1 / 2
relabel[inner_surface] = +1 / 2
relabel[relabel > MAX_EUCLID] = +MAX_EUCLID
relabel[relabel < -MAX_EUCLID] = -MAX_EUCLID
print(self.case_num, "Step 2: use selected Manhantan voxels to compute Euclidean dist. time elapsed %.2fs"%(time.time()-start))
np.save(os.path.join(self.relabels_path, self.case_num), relabel)
return relabel
def iterate(self):
'''Iteration entrance'''
start = time.time()
for i in range(self.bbox[0], self.bbox[1], STEP):
for j in range(self.bbox[2], self.bbox[3], STEP):
for k in range(self.bbox[4], self.bbox[5], STEP_Z):
if self.relabel[i, j, k] > -MAX_EUCLID: # constraint
self.store(*self.get_surface(a=i, b=j, c=k))
self.pivot_num += 1
self.pivot_list.append([i, j, k])
print(self.case_num, '***** total iteration time elapsed %.4fs *****'%(time.time()-start))
def get_surface(self, a, b, c):
'''Given pivot (a,b,c), to get its final shell iteratively'''
start_pivot = time.time()
D = INIT_D * np.ones((ITER_TH, HEIGHT, WIDTH), dtype=np.float32)
X = np.zeros((ITER_TH, self.slices + IN_SLICE, HEIGHT, WIDTH), dtype=np.float32)
Y = np.zeros((ITER_TH, HEIGHT, WIDTH), dtype=np.float32)
for iter in range(ITER_TH):
start = time.time()
for i in range(self.slices):
U = i - ((self.slices - 1) // 2)
X[iter, i] = self.interp3(self.image,
a + np.maximum(D[iter] + U, 0) * self.direction_x,
b + np.maximum(D[iter] + U, 0) * self.direction_y,
c + np.maximum(D[iter] + U, 0) * self.direction_z)
for i in range(0, IN_SLICE):
U = (i + 1) * D[iter] / (IN_SLICE + 1)
X[iter, self.slices + i] = self.interp3(self.image,
a + U * self.direction_x,
b + U * self.direction_y,
c + U * self.direction_z)
Y[iter] = self.interp3(self.relabel,
a + D[iter] * self.direction_x,
b + D[iter] * self.direction_y,
c + D[iter] * self.direction_z)
np.clip(Y[iter], a_min=-Y_SCALE, a_max=Y_SCALE, out=Y[iter])
norm_Y = np.linalg.norm(Y[iter])
norm_D = np.linalg.norm(D[iter])
print("Case %s Pivot point (%d,%d,%d), Iteration:%02d, norm_Y: %.2f, norm_D: %.2f, min/max D: %.2f/%.2f, time elapsed %.4fs" \
%(self.case_num, a, b, c, iter, norm_Y, norm_D, D[iter].min(), D[iter].max(), time.time()-start))
if iter + 1 >= ITER_TH:
break
D[iter + 1] = D[iter] + Y[iter]
np.clip(D[iter + 1], a_min=0, a_max=None, out=D[iter + 1]) # for outer pivots
print('----------- Case%s Pivot point (%d,%d,%d), relabel is %.4f, total time elapsed %.4fs ----------' \
%(self.case_num, a, b, c, self.relabel[a,b,c], time.time() - start_pivot))
return X, Y, D, self.relabel[a,b,c], a, b, c
def store(self, X, Y, D, key, a, b, c):
'''prepare dataset. normalization and reshape for training stage.
must save per pivot, otherwise > 100G MEM.
(ITER_TH, slices + in_slice + 2, HEIGHT, WIDTH)
'''
np.clip(X, a_min=CT_INF, a_max=CT_SUP, out=X)
X -= CT_INF
X /= (CT_SUP - CT_INF)
XYD_cat = np.concatenate((X, np.expand_dims(Y, 1), np.expand_dims(D, 1)), axis=1)
XYD_cat = XYD_cat.astype(np.float32)
if key >= INIT_D:
kind = 'p0' # disjoint inner pivots
elif key >= 0:
kind = 'p1' # joint inner pivots
elif key > - INIT_D:
kind = 'p2' # joint outer pivots
else:
kind = 'p3' # disjoint outer pivots
np.save(os.path.join(self.case_path, 'XYD' + str(a).zfill(3) + str(b).zfill(3) + str(c).zfill(3) \
+ kind + '.npy'), XYD_cat)
def pivot_stats(self):
npy_list = os.listdir(self.case_path)
npy_list.sort()
pivot_stat = np.zeros((4), dtype=np.int64)
for i in range(len(npy_list)):
pivot_stat[int(npy_list[i][-5])] += 1
print('pivot_stats', pivot_stat)
return pivot_stat
def set_sphere_projection(self):
'''initialize the (x,y,z) unit sphere coordinate'''
direction_x = np.zeros((HEIGHT,WIDTH))
direction_y = np.zeros((HEIGHT,WIDTH))
direction_z = np.zeros((HEIGHT,WIDTH))
p = np.arccos((2 * np.arange(1, HEIGHT+1) / (HEIGHT+1)) -1)
q = 2 * np.pi * np.arange(WIDTH) / WIDTH
direction_x = np.outer(np.sin(p), np.cos(q)) # col vector * row vector
direction_y = np.outer(np.sin(p), np.sin(q))
direction_z += np.cos(p)[:, np.newaxis] # col vector, horizontal broadcast
return direction_x, direction_y, direction_z
def interp3(self, Data, a, b, c):
'''3D interpolation'''
floor_a = np.floor(a)
floor_b = np.floor(b)
floor_c = np.floor(c)
np.clip(floor_a, a_min=0, a_max=self.height-2, out=floor_a)
np.clip(floor_b, a_min=0, a_max=self.width-2, out=floor_b)
np.clip(floor_c, a_min=0, a_max=self.depth-2, out=floor_c)
floor_a = floor_a.astype(np.uint16)
floor_b = floor_b.astype(np.uint16)
floor_c = floor_c.astype(np.uint16)
da = a - floor_a
db = b - floor_b
dc = c - floor_c
return (1-da) * ((1-db) * (Data[floor_a,floor_b,floor_c] * (1-dc) + \
Data[floor_a,floor_b,floor_c+1] * (dc)) + \
(db) * (Data[floor_a,floor_b+1,floor_c] * (1-dc) + \
Data[floor_a,floor_b+1,floor_c+1] * (dc))) + \
(da) * ((1-db) * (Data[floor_a+1,floor_b,floor_c] * (1-dc) + \
Data[floor_a+1,floor_b,floor_c+1] * (dc)) + \
(db) * (Data[floor_a+1,floor_b+1,floor_c] * (1-dc) + \
Data[floor_a+1,floor_b+1,floor_c+1] * (dc)))
def save_lists():
dataset_path = data_generation.output_dataset_path
total_cases_list = os.listdir(dataset_path)
total_cases_list.sort()
case_len = len(total_cases_list)
lists_path = os.path.join(args.data_path, 'lists', data_generation.dataset_name)
os.makedirs(lists_path, exist_ok=True)
current_fold = 0 # for MSD spleen dataset, only one fold.
print('FD %d/%d:'%(current_fold, args.folds))
file_name = os.path.join(lists_path, \
'S' + str(args.slices) + 'FD' + str(args.folds) + str(current_fold) + 'valid.txt')
output = open(file_name, 'a+')
valid_cases_list = total_cases_list[:(case_len // 2)]
print('valid cases list:', valid_cases_list)
for case_num in valid_cases_list:
npy_list = os.listdir(os.path.join(dataset_path, case_num))
npy_list.sort()
for npy in npy_list:
output.write(os.path.join(case_num, npy) + '\n')
output.close()
file_name = os.path.join(lists_path, \
'S' + str(args.slices) + 'FD' + str(args.folds) + str(current_fold) + 'train.txt')
output = open(file_name, 'a+')
train_cases_list = total_cases_list[(case_len // 2):]
print('train cases list:', train_cases_list)
for case_num in train_cases_list:
npy_list = os.listdir(os.path.join(dataset_path, case_num))
npy_list.sort()
for npy in npy_list:
output.write(os.path.join(case_num, npy) + '\n')
output.close()
def mk_dir():
os.makedirs(os.path.join(args.data_path, 'logs', data_generation.dataset_name), exist_ok=True)
os.makedirs(os.path.join(args.data_path, 'models', data_generation.dataset_name), exist_ok=True)
os.makedirs(os.path.join(args.data_path, 'results', data_generation.dataset_name), exist_ok=True)
if __name__ == '__main__':
args = arg_parser()
data_generation = DATA_GENERATION()
mk_dir()
save_lists()
| Python |
3D | twni2016/Elastic-Boundary-Projection | test_util.py | .py | 5,929 | 145 | import os
import time
import numpy as np
from sklearn.neighbors.kde import KernelDensity
from scipy.spatial import Delaunay, distance
from scipy.ndimage.morphology import binary_fill_holes
import trimesh
import fast_functions as ff
# for KDE_tri(). may be tuned
KDE_bandwidth = 1.0
KDE_log_prob_th = -14
# for mesh3d(). may be tuned
ALPHA = 5
def KDE_tri(pred_point_cloud, bbox, text):
''' use KDE to filter outliers in predict point cloud '''
(h0, h1, w0, w1, d0, d1) = bbox
X,Y,Z = np.mgrid[h0:h1, w0:w1, d0:d1]
positions = np.vstack([X.ravel(), Y.ravel(), Z.ravel()])
# 1. KDE
start = time.time()
kde = KernelDensity(kernel='epanechnikov', bandwidth=KDE_bandwidth).fit(pred_point_cloud.T)
score = kde.score_samples(positions.T)
score = score.reshape(h1-h0, w1-w0, d1-d0)
filtered_pred_point_cloud = np.where(score > KDE_log_prob_th)
points_list = [filtered_pred_point_cloud[0] + h0,
filtered_pred_point_cloud[1] + w0,
filtered_pred_point_cloud[2] + d0]
print('KDE filter done', time.time() - start)
print('filtered_pred_point_cloud (', filtered_pred_point_cloud[0].shape[0], '* 3 )')
text.write('filtered_pred_point_cloud: ' + str(filtered_pred_point_cloud[0].shape[0]) + ' * 3 \n')
text.flush()
# 2. Delaunay triangulation
start = time.time()
points = np.asarray(points_list).T
tri = Delaunay(points)
print('Delaunay triangulation done', time.time() - start)
return points, tri
def mesh3d(points, tri, alpha, label, text):
'''Obtain alpha shape, voxelize and fill holes '''
# 3. alpha shape
start = time.time()
alpha_complex = np.asarray(list(
filter(lambda simplex: 0 < circumsphere(points, simplex) < alpha, tri.simplices)))
print(alpha, 'alpha shape done')
# 4. voxelize the mesh. most time consuming and compute DSC
tri_faces = [[tetra[[0,1,2]], tetra[[0,1,3]], tetra[[0,2,3]], tetra[[1,2,3]]]
for tetra in alpha_complex]
tri_faces = np.asarray(tri_faces).reshape(-1, 3)
mesh = trimesh.base.Trimesh(vertices = points, faces = tri_faces)
voxel_mesh = mesh.voxelized(pitch = 1) # multi-CPU and huge CPU memory
print('voxel_mesh.matrix_solid.shape', voxel_mesh.matrix_solid.shape)
print('voxel_mesh.origin', voxel_mesh.origin)
pred = np.zeros(label.shape, dtype = np.bool)
pred[voxel_mesh.origin[0]:voxel_mesh.origin[0]+voxel_mesh.shape[0],
voxel_mesh.origin[1]:voxel_mesh.origin[1]+voxel_mesh.shape[1],
voxel_mesh.origin[2]:voxel_mesh.origin[2]+voxel_mesh.shape[2]] = voxel_mesh.matrix_solid
DSC, inter_sum, pred_sum, label_sum = DSC_computation(label, pred)
print('DSC', DSC, inter_sum, pred_sum, label_sum)
text.write('Initial DSC: ' + \
str(DSC) + ' = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + str(label_sum) + ')\n')
# 5. fill the holes
filled_pred = binary_fill_holes(pred)
DSC, inter_sum, pred_sum, label_sum = DSC_computation(label, filled_pred)
print('After fill holes', DSC, inter_sum, pred_sum, label_sum)
print('time cost', time.time() - start)
text.write('After filling holes, DSC: ' + \
str(DSC) + ' = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + str(label_sum) + ')\n')
return filled_pred
def post_processing(pred_point_cloud, label, bbox, voxel_path, case_idx, text):
''' Entrance for test_util.py. Return final DSC.
Given the predicted volume, find the largest component and trim it.'''
text.write('#' * 10 + ' ' + case_idx + ' TESTING \n')
points, tri = KDE_tri(pred_point_cloud, bbox, text)
pred = mesh3d(points, tri, ALPHA, label, text)
# 6. find the largest component
pred = largest_component(pred)
DSC, inter_sum, pred_sum, label_sum = DSC_computation(label, pred)
print('After keeping the largest component', DSC, inter_sum, pred_sum, label_sum)
text.write('After keeping the largest component, DSC : ' + \
str(DSC) + ' = 2 * ' + str(inter_sum) + ' / ' + str(pred_sum) + ' + ' + str(label_sum) + '\n')
np.save(os.path.join(voxel_path, case_idx[:-4] + '_pred.npy'), pred.astype(np.uint8))
# 7. delete one piece of boundary
delete_boundary(pred) # it can be done multiple times.
DSC, inter_sum, pred_sum, label_sum = DSC_computation(label, pred)
print('Delete boundary', DSC, inter_sum, pred_sum, label_sum)
text.write('Delete boundary, DSC: ' + \
str(DSC) + ' = 2 * ' + str(inter_sum) + ' / (' + str(pred_sum) + ' + ' + str(label_sum) + ')\n')
return DSC
def circumsphere(points, simplex):
''' return the circumradius of a tetrahedron.
The beautiful formula: Area of (aa1, bb1, cc1) = 6 * V * R
Proof: https://cms.math.ca/crux/v27/n4/page246-247.pdf'''
coord_matrix = np.ones((4,4))
for i in range(4):
coord_matrix[i, :3] = points[simplex[i]]
Volume_6 = np.abs(np.linalg.det(coord_matrix))
if Volume_6 == 0:
return 0
dist_matrix = distance.cdist(coord_matrix[:, :3], coord_matrix[:, :3])
aa1 = dist_matrix[0,1] * dist_matrix[2,3]
bb1 = dist_matrix[0,2] * dist_matrix[1,3]
cc1 = dist_matrix[0,3] * dist_matrix[1,2]
tri_peri = (aa1 + bb1 + cc1) / 2
if tri_peri < max(aa1, bb1, cc1):
return 0
tri_area = np.sqrt(tri_peri * (tri_peri - aa1) * (tri_peri - bb1) * (tri_peri - cc1))
return tri_area / Volume_6
def DSC_computation(label, pred):
pred_sum = pred.sum()
label_sum = label.sum()
inter_sum = np.logical_and(pred, label).sum()
return 2 * float(inter_sum) / (pred_sum + label_sum), inter_sum, pred_sum, label_sum
def delete_boundary(voxel):
voxel_sum = np.zeros(voxel.shape, dtype = np.int32)
voxel_sum = voxel \
+ np.roll(voxel, shift=1, axis=0) \
+ np.roll(voxel, shift=-1, axis=0) \
+ np.roll(voxel, shift=1, axis=1) \
+ np.roll(voxel, shift=-1, axis=1) \
+ np.roll(voxel, shift=1, axis=2) \
+ np.roll(voxel, shift=-1, axis=2)
boundary_list = np.where((voxel_sum > 0) & (voxel_sum < 7) & (voxel == 1))
voxel[boundary_list] = 0
def largest_component(voxel):
''' Call ff '''
voxel = voxel.astype(np.uint8)
ff.post_processing(voxel, voxel, 1, False)
return voxel.astype(np.int32)
| Python |
3D | twni2016/Elastic-Boundary-Projection | vnetg_data_load.py | .py | 2,552 | 58 | import torch
import torch.utils.data as data
from data_generation import *
# npy shape (ITER_TH, SLICES + IN_SLICE + 2, HEIGHT, WIDTH)
# total samples ITER_TH * len(npy_list)
class EBP(data.Dataset):
def __init__(self, train, data_path, folds, current_fold, organ_id, slices):
self.train = train
self.data_path = data_path
self.folds = folds
self.current_fold = current_fold
self.slices = slices
self.X_slices = self.slices + IN_SLICE
self.dataset_name = 'dataset_organ' + str(organ_id)
self.dataset_path = os.path.join(data_path, self.dataset_name)
self.set_sphere_projection()
if self.train:
self.npy_list = open(os.path.join(data_path, 'lists', self.dataset_name, \
'S' + str(slices) + 'FD' + str(folds) + str(current_fold) + 'train.txt'), 'r').read().splitlines()
else:
self.npy_list = open(os.path.join(data_path, 'lists', self.dataset_name, \
'S' + str(slices) + 'FD' + str(folds) + str(current_fold) + 'valid.txt'), 'r').read().splitlines()
self.npy_list_len = len(self.npy_list)
def __getitem__(self, index):
''' return (slices + 3 + IN_SLICE + 3, H, W), (1, H, W), (p), (i) '''
sample_pivot = index // ITER_TH
sample_id = index % ITER_TH
XYD_cat = np.load(os.path.join(self.dataset_path, \
self.npy_list[sample_pivot]), mmap_mode='r')
X = np.array(XYD_cat[sample_id, :self.slices])
X_in = np.array(XYD_cat[sample_id, self.slices:self.X_slices])
Y = np.array(XYD_cat[sample_id, self.X_slices])[np.newaxis, :]
pivot_kind = torch.tensor([int(self.npy_list[sample_pivot][-5])])
iter_kind = torch.tensor([int(sample_id)])
return torch.cat((torch.from_numpy(X), self.direction, \
torch.from_numpy(X_in), self.direction)), \
torch.from_numpy(Y), pivot_kind, iter_kind
def __len__(self):
return ITER_TH * self.npy_list_len
def set_sphere_projection(self):
'''initialize the (x,y,z) unit sphere coordinate'''
self.x = np.zeros((HEIGHT,WIDTH))
self.y = np.zeros((HEIGHT,WIDTH))
self.z = np.zeros((HEIGHT,WIDTH))
self.p = np.arccos((2 * np.arange(1, HEIGHT+1) / (HEIGHT+1)) -1)
self.q = 2 * np.pi * np.arange(WIDTH) / WIDTH
self.x = np.outer(np.sin(self.p), np.cos(self.q)) # col vector * row vector
self.y = np.outer(np.sin(self.p), np.sin(self.q))
self.z += np.cos(self.p)[:, np.newaxis] # col vector, horizontal broadcast
self.direction_np = np.concatenate((self.x[np.newaxis, :], self.y[np.newaxis, :], self.z[np.newaxis, :]))
self.direction_np = self.direction_np.astype(np.float32)
self.direction = torch.from_numpy(self.direction_np)
| Python |
3D | twni2016/Elastic-Boundary-Projection | fast_functions.py | .py | 2,882 | 104 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_fast_functions', [dirname(__file__)])
except ImportError:
import _fast_functions
return _fast_functions
if fp is not None:
try:
_mod = imp.load_module('_fast_functions', fp, pathname, description)
finally:
fp.close()
return _mod
_fast_functions = swig_import_helper()
del swig_import_helper
else:
import _fast_functions
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def post_processing(F, S, threshold, top2):
return _fast_functions.post_processing(F, S, threshold, top2)
post_processing = _fast_functions.post_processing
def DSC_computation(A, G, P):
return _fast_functions.DSC_computation(A, G, P)
DSC_computation = _fast_functions.DSC_computation
# This file is compatible with both classic and new-style classes.
| Python |
3D | twni2016/Elastic-Boundary-Projection | test_voxel.py | .py | 8,072 | 194 | from queue import *
from test_iter import *
from test_util import *
# for find_target_component(). may be tuned
DSC_TH = 0.60
TARGET_RATIO = 0.10
# for remove_by_D_value()
NONZERO_TH = 10000
MEAN_TH = 2.0
def set_sphere_projection():
'''initialize the (x,y,z) unit sphere coordinate'''
direction_x = np.zeros((HEIGHT,WIDTH))
direction_y = np.zeros((HEIGHT,WIDTH))
direction_z = np.zeros((HEIGHT,WIDTH))
p = np.arccos((2 * np.arange(1, HEIGHT+1) / (HEIGHT+1)) -1)
q = 2 * np.pi * np.arange(WIDTH) / WIDTH
direction_x = np.outer(np.sin(p), np.cos(q)) # col vector * row vector
direction_y = np.outer(np.sin(p), np.sin(q))
direction_z += np.cos(p)[:, np.newaxis] # col vector, horizontal broadcast
return direction_x, direction_y, direction_z
def bbox_info():
'''get 3D bounding box with padding, compute lower bound of target volume, and border pivots'''
non_zero = np.nonzero(label)
bbox = [max(0, non_zero[0].min() - PAD), min(height, non_zero[0].max() + PAD),
max(0, non_zero[1].min() - PAD), min(width, non_zero[1].max() + PAD),
max(0, non_zero[2].min() - PAD), min(depth, non_zero[2].max() + PAD)]
bbox_volume = 1
bbox_border_pivots = set()
for i in range(3):
bbox_volume *= (bbox[2 * i + 1] - bbox[2 * i])
bbox_border_pivots |= set(np.where(pivot_list[:, i] == bbox[2 * i])[0])
if i < 2:
oppo_border = ((bbox[2 * i + 1] - bbox[2 * i]) // STEP) * STEP + bbox[2 * i]
else:
oppo_border = ((bbox[2 * i + 1] - bbox[2 * i]) // STEP_Z) * STEP_Z + bbox[2 * i]
bbox_border_pivots |= set(np.where(pivot_list[:, i] == oppo_border)[0])
return bbox, (bbox_volume / (STEP * STEP * STEP_Z)) * TARGET_RATIO, bbox_border_pivots
def find_idx(pivot):
idx = np.nonzero((pivot_list == np.array(pivot)).all(axis=1))[0]
if len(idx) == 0:
return -1
return idx[0]
def BFS_by_neighbor(binary_matrix, DSC_TH):
'''BFS on pivot node graph (simple 6-neighbor Euclidean graph)
where edge weights are given by binary matrix,
to find the all valid components where weights are over DSC_TH. '''
neigh = np.array([[0,0,0,0,STEP,-STEP],
[0,0,STEP,-STEP,0,0],
[STEP_Z,-STEP_Z,0,0,0,0]]).T
marked = np.ones((PIVOT_POINTS), dtype=np.bool)
marked[binary_matrix.sum(axis=0) > DSC_TH] = False
components = []
head = np.argmax(marked == False)
while True:
q = Queue()
q.put(head)
connection = set()
connection.add(head)
marked[head] = True
while (not q.empty()):
p = q.get()
for k in range(6):
pair = pivot_list[p] + neigh[k]
pair_id = find_idx(pair)
if pair_id >= 0 and \
binary_matrix[p, pair_id] > DSC_TH and marked[pair_id] == False:
connection.add(pair_id)
q.put(pair_id)
marked[pair_id] = True
components.append(connection)
rest = np.where(marked == False)[0]
if rest.shape[0] == 0:
break
head = rest[0]
print('BFS by 6 neignbor to get components done')
return components
def find_target_component(binary_matrix, DSC_TH):
''' return the component candidate, which satisfies:
(1) size is over a lower bound (2) has the minimal border pivots. '''
components = BFS_by_neighbor(binary_matrix, DSC_TH)
id_list = []
border_num_list = []
candidates = 0
for id, comp in enumerate(components):
if len(comp) > PROPER_SIZE / 3:
candidates += 1
id_list.append(id)
border_num_list.append(len(comp & bbox_border_pivots))
if candidates == 0:
print('DSC TH is too large')
return -1, None
border_num_array = np.asarray(border_num_list)
target_id = id_list[np.argmin(border_num_array)]
return components[target_id]
def remove_by_D_value(pred_pivot, D):
'''trim the component by D. A minor step. '''
nonzero_cnt = np.count_nonzero(D, axis=(1,2))
nonzero_list = [id for id in range(PIVOT_POINTS) if nonzero_cnt[id] >= NONZERO_TH]
mean_cnt = np.mean(D, axis=(1,2))
mean_list = [id for id in range(PIVOT_POINTS) if mean_cnt[id] >= MEAN_TH]
intersect_list = list(set(nonzero_list) & set(mean_list) & pred_pivot)
intersect_list.sort()
print('*'*10, 'total pivots:', PIVOT_POINTS, 'after filtering')
print('non_zero', len(nonzero_list), 'mean', len(mean_list), 'pred_pivot_by_parts', len(pred_pivot))
print('intersect', len(intersect_list))
return intersect_list
def voxelize(pivot_id_list, D):
''' Voxelize the predicted point cloud. Call test_util.post_processing '''
pred_point_cloud = [[], [], []]
for p in pivot_id_list:
pred_point_cloud[0].append(pivot_list[p,0] + (D[p] * direction_x).reshape(-1))
pred_point_cloud[1].append(pivot_list[p,1] + (D[p] * direction_y).reshape(-1))
pred_point_cloud[2].append(pivot_list[p,2] + (D[p] * direction_z).reshape(-1))
for i in range(3):
pred_point_cloud[i] = np.asarray(pred_point_cloud[i]).reshape(-1)
pred_point_cloud = np.asarray(pred_point_cloud) # 3 * n
np.save(os.path.join(voxel_path, case_idx[:-4] + '_pred_point_cloud.npy'), pred_point_cloud)
return post_processing(pred_point_cloud, label, bbox, voxel_path, case_idx, final_DSC_text)
def arg_parser():
parser = argparse.ArgumentParser(description='EBP')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--organ_id', type=int, default=1)
parser.add_argument('--organ_name', type=str, default='sp') # MSD spleen
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--slices', type=int, default=5)
parser.add_argument('-e', '--epoch', type=int, default=1, help='number of epochs to train')
parser.add_argument('-t', '--timestamp', type=str, default=None, help='snapshot model')
parser.add_argument('--folds', type=int, default=1)
parser.add_argument('-f', '--current_fold', type=int, default=0)
parser.add_argument('--train_cases', type=int, default=21)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parser()
direction_x, direction_y, direction_z = set_sphere_projection()
model_name = 'vnetg_e' + str(args.epoch) + 'S' + str(args.slices) + 'FD' + str(args.folds) + str(args.current_fold) + '_' + args.timestamp
results_path = os.path.join(args.data_path, 'results', args.organ_name, 'train' + str(args.train_cases) + '_' + model_name)
test_cases_path = os.path.join(args.data_path, 'test_labels')
test_cases_list = os.listdir(test_cases_path)
test_cases_list.sort()
voxel_path = os.path.join(results_path,
'components_NONZERO' + str(NONZERO_TH) + ',MEAN' + str(MEAN_TH))
os.makedirs(voxel_path, exist_ok=True)
final_DSC_text = open(os.path.join(voxel_path, 'results.txt'), 'a+')
final_DSC_array = np.zeros((len(test_cases_list)))
start_all = time.time()
for id, case_idx in enumerate(test_cases_list):
start = time.time()
print('#' * 10, 'EBP final part:', 'organ id', args.organ_id, 'based on', results_path)
print('#' * 10, case_idx, 'starts!')
label = np.load(os.path.join(test_cases_path, case_idx))
label[label != args.organ_id] = 0
label[label == args.organ_id] = 1
label = label.astype(np.bool)
(height, width, depth) = label.shape
relabel = np.load(os.path.join(args.data_path,
'relabel', 'dataset_organ' + str(args.organ_id), case_idx))
pivot_list = np.load(os.path.join(results_path,
case_idx[:-4] + '_pivot_list.npy'))
selected_model_D = np.load(os.path.join(results_path,
case_idx[:-4] + '_selected_model_D.npy'))
binary_matrix = np.load(os.path.join(results_path,
case_idx[:-4] + '_binary_matrix.npy'))
PIVOT_POINTS = len(pivot_list)
bbox, PROPER_SIZE, bbox_border_pivots = bbox_info()
pred_pivot = find_target_component(binary_matrix, DSC_TH)
intersect_list = remove_by_D_value(pred_pivot, selected_model_D)
final_DSC_array[id] = voxelize(intersect_list, selected_model_D)
print(case_idx, 'ends. time costs: %dmin\n'%((time.time() - start) / 60))
np.save(os.path.join(voxel_path, 'FINAL_DSC_ARRAY.npy'), final_DSC_array)
DSC_mean, DSC_std = final_DSC_array.mean(), final_DSC_array.std()
print('\n\n', '!' * 10, 'FINAL DSC RESULT FOR EBP PROJECT')
print('DSC_mean:', DSC_mean, 'DSC_std', DSC_std)
final_DSC_text.write('DSC mean = ' + str(DSC_mean) + 'and std = ' + str(DSC_std) + '\n')
print('TOTAL TIME COSTS %dmin' % ((time.time() - start_all) / 60))
| Python |
3D | twni2016/Elastic-Boundary-Projection | vnetg.py | .py | 9,361 | 231 | import os
import time
import numpy as np
import argparse
import torch
import torch.nn as nn
from vnetg_data_load import *
def arg_parser():
parser = argparse.ArgumentParser(description='EBP')
parser.add_argument('--data_path', type=str, default=None, help='data path')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--slices', type=int, default=5, help='neighboring slices')
parser.add_argument('--organ_id', type=int, default=1)
parser.add_argument('-t', '--timestamp', type=str, default=None)
parser.add_argument('--folds', type=int, default=1)
parser.add_argument('-f', '--current_fold', type=int, default=0)
parser.add_argument('-b', '--batch', type=int, default=32, help='input batch size for training')
parser.add_argument('-e', '--epoch', type=int, default=1, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
return parser.parse_args()
if __name__ == '__main__':
args = arg_parser()
# HyperParameter
epoch = args.epoch
batch_size = args.batch
lr = args.lr
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu_id)
np.set_printoptions(precision=3, suppress=True)
# build pytorch dataset
training_set = EBP(train=True, data_path=args.data_path, folds=args.folds, current_fold=args.current_fold, organ_id=args.organ_id, slices=args.slices)
testing_set = EBP(train=False, data_path=args.data_path, folds=args.folds, current_fold=args.current_fold, organ_id=args.organ_id, slices=args.slices)
trainloader = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=True, num_workers=12)
testloader = torch.utils.data.DataLoader(testing_set, batch_size=batch_size, shuffle=False, num_workers=12)
class DownTransition(nn.Module):
def __init__(self,inchan,outchan,layer,dilation_=1):
super(DownTransition, self).__init__()
self.dilation_ = dilation_
self.outchan = outchan
self.layer = layer
self.down = nn.Conv2d(in_channels=inchan,out_channels=self.outchan,kernel_size=3,padding=1,stride=2, groups=2) # /2
self.bn = nn.BatchNorm2d(num_features=self.outchan,affine=True)
self.conv = self.make_layers()
self.relu = nn.ELU(inplace=True)
def make_layers(self):
layers = []
for i in range(self.layer):
layers.append(nn.ELU(inplace=True))
layers.append(nn.Conv2d(self.outchan,self.outchan,kernel_size=3,padding=self.dilation_,stride=1,dilation=self.dilation_,groups=2))
layers.append(nn.BatchNorm2d(num_features=self.outchan,affine=True))
return nn.Sequential(*layers)
def forward(self,x):
out1 = self.down(x)
out2 = self.conv(self.bn(out1))
out2 = self.relu(torch.add(out1,out2))
return out2
class UpTransition(nn.Module):
def __init__(self,inchan,outchan,layer,last=False):
super(UpTransition, self).__init__()
self.last = last
self.outchan = outchan
self.layer = layer
self.up = nn.ConvTranspose2d(in_channels=inchan,out_channels=self.outchan,kernel_size=4,padding=1,stride=2) # *2
self.bn = nn.BatchNorm2d(num_features=self.outchan,affine=True)
self.conv = self.make_layers()
self.relu = nn.ELU(inplace=True)
if self.last is True:
self.conv1 = nn.Conv2d(self.outchan,1,kernel_size=1) # 1*1 conv. one channel
def make_layers(self):
layers = []
for i in range(self.layer):
layers.append(nn.ELU(inplace=True))
layers.append(nn.Conv2d(self.outchan,self.outchan,kernel_size=3,padding=1,stride=1,groups=2))
layers.append(nn.BatchNorm2d(num_features=self.outchan,affine=True))
return nn.Sequential(*layers)
def forward(self,x):
out1 = self.up(x)
out = self.conv(self.bn(out1))
out = self.relu(torch.add(out1,out))
if self.last is True:
out = self.conv1(out) # NCHW, C=1
out = torch.clamp(out, min=-Y_SCALE, max=Y_SCALE)
return out
class Vnet(nn.Module):
def __init__(self, slices, inchans, outchans, down_layers, up_layers, dilations):
super(Vnet,self).__init__()
self.layer0 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=7, stride=1, padding=3, groups=2, bias=False),
nn.BatchNorm2d(16,affine=True),
nn.ELU(inplace=True)
)
self.block_num = len(inchans)
self.down = nn.ModuleList()
self.up = nn.ModuleList()
for i in range(self.block_num):
self.down.append(DownTransition(inchan=inchans[i], outchan=outchans[i], layer=down_layers[i], dilation_=dilations[i]))
if i==0 :
self.up.append(UpTransition(inchan=outchans[i], outchan=inchans[i], layer=up_layers[i], last=True))
else:
self.up.append(UpTransition(inchan=outchans[i], outchan=inchans[i], layer=up_layers[i]))
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self,x):
x = self.layer0(x)
out_down = []
out_down.append(self.down[0](x))
for i in range(1,self.block_num):
out_down.append(self.down[i](out_down[i-1]))
out_up = self.up[self.block_num-1](out_down[self.block_num-1])
for i in reversed(range(self.block_num-1)):
out_up = self.up[i](torch.add(out_up,out_down[i]))
return out_up
class OHEM(nn.Module):
def __init__(self, top_k=0.7):
super(OHEM, self).__init__()
self.criterion = nn.MSELoss(reduce=False)
self.top_k = top_k
def forward(self, input, target):
batch = input.shape[0]
loss = self.criterion(input.view(batch, -1), target.view(batch, -1))
values, _ = torch.topk(torch.mean(loss, dim=1), int(self.top_k * batch))
return torch.mean(values)
def train():
for e in range(epoch):
model.train()
total_loss = np.zeros((4, ITER_TH))
period_loss = np.zeros((4, ITER_TH))
total_pivot_kind = np.zeros((4, ITER_TH), dtype=np.int32)
period_pivot_kind = np.zeros((4, ITER_TH), dtype=np.int32)
start = time.time()
for index, (image, target, pivot_kind, iter_kind) in enumerate(trainloader):
batch = image.shape[0]
image, target = image.cuda().float(), target.cuda().float()
optimizer.zero_grad()
output = model(image)
loss = valid_criterion(output.view(batch, -1), target.view(batch, -1))
loss = torch.mean(loss, dim=1)
for p in range(batch):
total_pivot_kind[pivot_kind[p].item(), iter_kind[p].item()] += 1
period_pivot_kind[pivot_kind[p].item(), iter_kind[p].item()] += 1
total_loss[pivot_kind[p].item(), iter_kind[p].item()] += loss[p].item()
period_loss[pivot_kind[p].item(), iter_kind[p].item()] += loss[p].item()
OHEM_loss = train_criterion(output, target)
OHEM_loss.backward()
optimizer.step()
if index % period == (period - 1):
print ("CNN Train Epoch[%d/%d], Iter[%05d], Time elapsed %ds" %(e+1, epoch, index, time.time()-start))
print ('avg loss:', period_loss / period_pivot_kind)
period_loss = np.zeros((4, ITER_TH))
period_pivot_kind = np.zeros((4, ITER_TH), dtype=np.int32)
print ('#'*10, "CNN Train TOTAL Epoch[%d/%d], Time elapsed %ds" %(e+1, epoch, time.time()-start))
print ('#'*10, 'avg loss:', total_loss / total_pivot_kind)
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.5
print('#'*10, 'lr decay')
with torch.no_grad():
model.eval()
total_loss = np.zeros((4, ITER_TH))
period_loss = np.zeros((4, ITER_TH))
total_pivot_kind = np.zeros((4, ITER_TH), dtype=np.int32)
period_pivot_kind = np.zeros((4, ITER_TH), dtype=np.int32)
start = time.time()
for index, (image, target, pivot_kind, iter_kind) in enumerate(testloader):
batch = image.shape[0]
image, target = image.cuda().float(), target.cuda().float()
output = model(image)
loss = valid_criterion(output.view(batch, -1), target.view(batch, -1))
loss = torch.mean(loss, dim=1)
for p in range(batch):
total_pivot_kind[pivot_kind[p].item(), iter_kind[p].item()] += 1
period_pivot_kind[pivot_kind[p].item(), iter_kind[p].item()] += 1
total_loss[pivot_kind[p].item(), iter_kind[p].item()] += loss[p].item()
period_loss[pivot_kind[p].item(), iter_kind[p].item()] += loss[p].item()
if index % period == (period - 1):
print ("CNN Valid Epoch[%d/%d], Iter[%05d], Time elapsed %ds" %(e+1, epoch, index, time.time()-start))
print ('avg loss:', period_loss / period_pivot_kind)
period_loss = np.zeros((4, ITER_TH))
period_pivot_kind = np.zeros((4, ITER_TH), dtype=np.int32)
print ('*'*10, "CNN Valid TOTAL Epoch[%d/%d], Time elapsed %ds" %(e+1, epoch, time.time()-start))
print ('*'*10, 'avg loss:', total_loss / total_pivot_kind)
torch.save(model.state_dict(), os.path.join(args.data_path, 'models', 'dataset_organ' + str(args.organ_id), \
'vnetg_e' + str(e) + 'S' + str(args.slices) + 'FD' + str(args.folds) + str(args.current_fold) + '_' + args.timestamp + '.pkl'))
print('#' * 10 , 'end of training stage!')
if __name__ == '__main__':
model = Vnet(slices=args.slices, inchans=[16,64,128], outchans=[64,128,256], down_layers=[3,3,3], up_layers=[3,3,3], dilations=[2,2,2])
model = model.cuda()
train_criterion = OHEM()
valid_criterion = nn.MSELoss(reduce=False)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0.0001)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('model parameters:', params, 'training batches', len(trainloader), 'valid batches', len(testloader))
period = 20
train()
| Python |
3D | twni2016/Elastic-Boundary-Projection | test_iter.py | .py | 15,860 | 360 | from vnetg import Vnet
from data_generation import *
ITER_TH = 11 # may be tuned
POINT_STEP = 3
EPSILON = 0.00001
INTER_DSC_TH = 0.99
def arg_parser():
parser = argparse.ArgumentParser(description='EBP')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--organ_id', type=int, default=1)
parser.add_argument('--organ_name', type=str, default='sp') # MSD spleen
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--slices', type=int, default=5)
parser.add_argument('-e', '--epoch', type=int, default=1, help='number of epochs to train')
parser.add_argument('-t', '--timestamp', type=str, default=None, help='snapshot model')
parser.add_argument('--folds', type=int, default=1)
parser.add_argument('-f', '--current_fold', type=int, default=0)
parser.add_argument('--train_cases', type=int, default=21)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parser()
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu_id)
model = Vnet(slices=args.slices, inchans=[16,64,128], outchans=[64,128,256], down_layers=[3,3,3], up_layers=[3,3,3], dilations=[2,2,2])
model = model.cuda()
dataset_name = 'dataset_organ' + str(args.organ_id)
model_name = 'vnetg_e' + str(args.epoch) + 'S' + str(args.slices) + 'FD' + str(args.folds) + str(args.current_fold) + '_' + args.timestamp
model_path = os.path.join(args.data_path, 'models', dataset_name, model_name + '.pkl')
model.load_state_dict(torch.load(model_path))
model.eval()
class TEST():
def __init__(self):
self.organ_id = args.organ_id
self.slices = args.slices
self.images_path = os.path.join(args.data_path, 'test_images')
self.labels_path = os.path.join(args.data_path, 'test_labels')
self.results_path = os.path.join(args.data_path, 'results', args.organ_name,
'train' + str(args.train_cases) + '_' + model_name)
os.makedirs(self.results_path, exist_ok=True)
self.valid_cases_list = os.listdir(self.images_path)
self.valid_cases_list.sort()
self.direction_x, self.direction_y, self.direction_z = self.set_sphere_projection()
self.DSC_valid_lists = []
self.DSC99_P0, self.DSC99_P1, self.DSC99_P2, self.DSC99_P3 = [], [], [], []
self.D_DSC_text = open(os.path.join(self.results_path, 'D_DSC.txt'), 'a+')
start_all = time.time()
print('Now starts test: ' + args.organ_name + ' id ' + str(args.organ_id) + ' has ' + str(len(self.valid_cases_list)) + ' cases ...')
print('Using training model ' + model_name + ' from ' + str(args.train_cases) + ' cases')
for case_idx in self.valid_cases_list:
start = time.time()
self.test_case(case_idx)
print('Test %s end: time elapsed %ds'%(case_idx, time.time() - start))
self.summary()
print('total time costs: %ds'%(time.time() - start_all))
def test_case(self, case_idx):
'''Entrance'''
self.case_idx = case_idx
self.image = np.load(os.path.join(self.images_path, self.case_idx)).astype(np.float32)
self.label = np.load(os.path.join(self.labels_path, self.case_idx)).astype(np.float32)
self.label[self.label != self.organ_id] = 0
self.label[self.label == self.organ_id] = 1
self.relabel = np.load(os.path.join(args.data_path, 'relabel', dataset_name, self.case_idx))
(self.height, self.width, self.depth) = self.image.shape
print('Test Stage: %s begin!'%(self.case_idx))
print('shape', self.image.shape, self.label.shape, self.relabel.shape)
self.bbox = self.get_bbox()
print(self.case_idx, 'bounding box', self.bbox)
self.relabel_D = []
self.model_D = []
self.pivot_list = []
self.PIVOT_POINTS = 0
self.P0, self.P1, self.P2, self.P3 = [], [], [], []
self.iterate()
self.model_D = np.asarray(self.model_D)
self.model_D = self.model_D.reshape(-1, ITER_TH, HEIGHT, WIDTH)
self.model_D = self.model_D.astype(np.float32)
self.relabel_D = np.asarray(self.relabel_D)
self.relabel_D = self.relabel_D.astype(np.float32)
self.relabel_D = self.relabel_D.reshape(-1, HEIGHT, WIDTH)
self.pivot_list = np.asarray(self.pivot_list)
self.selected_model_D = self.inter_DSC()
self.binary_matrix = self.get_binary_matrix(self.selected_model_D)
self.save_iteration_results()
def summary(self):
DSC_lists = np.asarray(self.DSC_valid_lists).mean(axis = 0)
print('D_DSC Summary: for all pivots')
print('all mean DSC', DSC_lists)
print('P0 Inter_DSC 0.99:', np.mean(self.DSC99_P0))
print('P1 Inter_DSC 0.99:', np.mean(self.DSC99_P1))
print('P2 Inter_DSC 0.99:', np.mean(self.DSC99_P2))
print('P3 Inter_DSC 0.99:', np.mean(self.DSC99_P3))
self.D_DSC_text.write('D_DSC SUMMARY: \n')
self.D_DSC_text.write('P0 Inter_DSC 0.99: ' + str(np.mean(self.DSC99_P0)) + '.\n')
self.D_DSC_text.write('P1 Inter_DSC 0.99: ' + str(np.mean(self.DSC99_P1)) + '.\n')
self.D_DSC_text.write('P2 Inter_DSC 0.99: ' + str(np.mean(self.DSC99_P2)) + '.\n')
self.D_DSC_text.write('P3 Inter_DSC 0.99: ' + str(np.mean(self.DSC99_P3)) + '.\n')
def set_sphere_projection(self):
'''initialize the (x,y,z) unit sphere coordinate'''
direction_x = np.zeros((HEIGHT,WIDTH))
direction_y = np.zeros((HEIGHT,WIDTH))
direction_z = np.zeros((HEIGHT,WIDTH))
p = np.arccos((2 * np.arange(1, HEIGHT+1) / (HEIGHT+1)) -1)
q = 2 * np.pi * np.arange(WIDTH) / WIDTH
direction_x = np.outer(np.sin(p), np.cos(q)) # col vector * row vector
direction_y = np.outer(np.sin(p), np.sin(q))
direction_z += np.cos(p)[:, np.newaxis] # col vector, horizontal broadcast
self.direction = np.concatenate((direction_x[np.newaxis, :], direction_y[np.newaxis, :], direction_z[np.newaxis, :]))
self.direction = torch.from_numpy(self.direction.astype(np.float32))
return direction_x, direction_y, direction_z
def get_bbox(self):
'''get 3D bounding box with padding'''
non_zero = np.nonzero(self.label)
return [max(0, non_zero[0].min() - PAD), min(self.height, non_zero[0].max() + PAD),
max(0, non_zero[1].min() - PAD), min(self.width, non_zero[1].max() + PAD),
max(0, non_zero[2].min() - PAD), min(self.depth, non_zero[2].max() + PAD)]
def iterate(self):
for i in range(self.bbox[0], self.bbox[1], STEP):
for j in range(self.bbox[2], self.bbox[3], STEP):
for k in range(self.bbox[4], self.bbox[5], STEP_Z):
print('relabel[%d,%d,%d] = %.2f'%(i, j, k, self.relabel[i,j,k]))
self.get_surface_by_relabel(a=i, b=j, c=k)
self.get_surface_by_model(a=i, b=j, c=k)
self.pivot_list.append([i, j, k])
self.update_P_stat(a=i, b=j, c=k)
self.PIVOT_POINTS += 1
def get_surface_by_relabel(self, a, b, c):
'''ground-truth iteration'''
D = INIT_D * np.ones((HEIGHT,WIDTH), dtype=np.float32)
Y = np.zeros((HEIGHT,WIDTH), dtype=np.float32)
iter = 0
while True:
start_iter = time.time()
Y = self.interp3(self.relabel, a + D * self.direction_x, b + D * self.direction_y, c + D * self.direction_z)
np.clip(Y, a_min=-Y_SCALE, a_max=Y_SCALE, out=Y)
D += Y
np.clip(D, a_min=0, a_max=None, out=D) # avoid negative
norm_Y = np.linalg.norm(Y)
if self.PIVOT_POINTS % 50 == 0 or self.relabel[a,b,c] > -INIT_D:
print('RELABEL', "Pivot point (%d,%d,%d), Iteration:%02d, normY is %.2f, min/mean/max/non0 D is %.2f/%.2f/%.2f/%d," \
"time elapsed %.4fs"% (a, b, c, iter, norm_Y, D.min(), D.mean(), D.max(), np.count_nonzero(D), time.time()-start_iter))
iter += 1
if iter >= ITER_TH: # we need the last D.
self.relabel_D.append(D.copy())
break
def get_surface_by_model(self, a, b, c):
'''model predicted iteration'''
D = INIT_D * np.ones((HEIGHT, WIDTH), dtype=np.float32)
X = np.zeros((self.slices + IN_SLICE, HEIGHT, WIDTH), dtype=np.float32)
iter = 0
while True:
start_iter = time.time()
for i in range(self.slices):
U = i - ((self.slices - 1) // 2)
X[i] = self.interp3(self.image,
a + np.maximum(D + U, 0) * self.direction_x,
b + np.maximum(D + U, 0) * self.direction_y,
c + np.maximum(D + U, 0) * self.direction_z)
for i in range(IN_SLICE):
U = (i + 1) * D / (IN_SLICE + 1)
X[self.slices + i] = self.interp3(self.image,
a + U * self.direction_x,
b + U * self.direction_y,
c + U * self.direction_z)
np.clip(X, a_min=CT_INF, a_max=CT_SUP, out=X) # first interp, then normalize
X += (-CT_INF)
X /= (CT_SUP - CT_INF)
Y = model((torch.cat((torch.from_numpy(X[:self.slices]), self.direction,
torch.from_numpy(X[self.slices:]), self.direction)) \
.view(1,self.slices + IN_SLICE + 6,HEIGHT,WIDTH)).cuda().float())
Y = Y.view(HEIGHT,WIDTH).data.cpu().numpy()
self.model_D.append(D.copy()) # pay attention
D += Y
np.clip(D, a_min=0, a_max=None, out=D) # avoid negative
norm_Y = np.linalg.norm(Y) # model's prediction of Y, not ground truth
if self.PIVOT_POINTS % 50 == 0 or self.relabel[a,b,c] > -INIT_D:
print('MODEL', "Pivot point (%d,%d,%d), Iteration:%02d, normY is %.2f, min/mean/max/non0 D is %.2f/%.2f/%.2f/%d," \
"time elapsed %.4fs"% (a, b, c, iter, norm_Y, D.min(), D.mean(), D.max(), np.count_nonzero(D), time.time()-start_iter))
iter += 1
if iter >= ITER_TH:
break
def interp3(self, Data, a, b, c):
'''3D interpolation'''
floor_a = np.floor(a)
floor_b = np.floor(b)
floor_c = np.floor(c)
np.clip(floor_a, a_min=0, a_max=self.height-2, out=floor_a)
np.clip(floor_b, a_min=0, a_max=self.width-2, out=floor_b)
np.clip(floor_c, a_min=0, a_max=self.depth-2, out=floor_c)
floor_a = floor_a.astype(np.uint16)
floor_b = floor_b.astype(np.uint16)
floor_c = floor_c.astype(np.uint16)
da = a - floor_a
db = b - floor_b
dc = c - floor_c
return (1-da) * ((1-db) * (Data[floor_a,floor_b,floor_c] * (1-dc) + \
Data[floor_a,floor_b,floor_c+1] * (dc)) + \
(db) * (Data[floor_a,floor_b+1,floor_c] * (1-dc) + \
Data[floor_a,floor_b+1,floor_c+1] * (dc))) + \
(da) * ((1-db) * (Data[floor_a+1,floor_b,floor_c] * (1-dc) + \
Data[floor_a+1,floor_b,floor_c+1] * (dc)) + \
(db) * (Data[floor_a+1,floor_b+1,floor_c] * (1-dc) + \
Data[floor_a+1,floor_b+1,floor_c+1] * (dc)))
def D_DSC(self, D1, D2):
''' DSC metric applied to D, a 3D shell '''
return (2 * (np.minimum(D1, D2) ** 3).sum() + EPSILON) \
/ ((D1 ** 3 + D2 ** 3).sum() + EPSILON)
def update_P_stat(self, a, b, c):
value = self.relabel[a,b,c]
if value >= INIT_D:
self.P0.append(self.PIVOT_POINTS)
elif value > 0:
self.P1.append(self.PIVOT_POINTS)
elif value > - INIT_D:
self.P2.append(self.PIVOT_POINTS)
else:
self.P3.append(self.PIVOT_POINTS)
def inter_DSC(self):
''' STAT:
(1) DSC between groud-truth D and i-th predicted D in each pivot
(2) DSC between (i-1)-th predicted D and i-th predicted D in each pivot
'''
DSC_list = np.zeros((self.PIVOT_POINTS, ITER_TH))
selected_model_D = np.zeros((self.PIVOT_POINTS, HEIGHT, WIDTH))
INTER_D_DSC = np.zeros((6, self.PIVOT_POINTS))
# first dimension means inter_DSC in [0.90, 0.93, 0.95, 0.97, 0.98, 0.99]
for p in range(self.PIVOT_POINTS):
for i in range(ITER_TH):
DSC_list[p,i] = self.D_DSC(self.relabel_D[p], self.model_D[p,i])
if i == 0:
inter_DSC = 0
else:
inter_DSC = self.D_DSC(self.model_D[p,i-1], self.model_D[p,i])
for idx, thres in enumerate([0.90, 0.93, 0.95, 0.97, 0.98, 0.99]):
if INTER_D_DSC[idx, p] == 0 and (i == ITER_TH - 1 or inter_DSC >= thres):
INTER_D_DSC[idx, p] = DSC_list[p,i]
if not selected_model_D[p].any() and (i == ITER_TH - 1 or inter_DSC >= INTER_DSC_TH):
selected_model_D[p] = self.model_D[p,i]
print(self.case_idx, 'all pivots')
print('mean_D_DSC', DSC_list.mean(axis=0))
self.D_DSC_text.write('*' * 10 + ' ' + self.case_idx + '\n')
for idx, thres in enumerate([0.90, 0.93, 0.95, 0.97, 0.98, 0.99]):
print('Inter_D_DSC ' + str(thres), INTER_D_DSC[idx].mean())
self.D_DSC_text.write('Inter_D_DSC ' + str(thres) + ' ' + str(INTER_D_DSC[idx].mean()) + '\n')
self.DSC_valid_lists.append(DSC_list.mean(axis=0))
# P0, P1, P2, P3. D DSC 0.99 stats:
print('*' * 10, 'P0,P1,P2,P3 summary of Inter DSC 0.99')
print('P0: Inter_D_DSC 0.99:', INTER_D_DSC[-1, self.P0].mean())
print('P1: Inter_D_DSC 0.99:', INTER_D_DSC[-1, self.P1].mean())
print('P2: Inter_D_DSC 0.99:', INTER_D_DSC[-1, self.P2].mean())
print('P3: Inter_D_DSC 0.99:', INTER_D_DSC[-1, self.P3].mean())
self.DSC99_P0.append(INTER_D_DSC[-1, self.P0].mean())
self.DSC99_P1.append(INTER_D_DSC[-1, self.P1].mean())
self.DSC99_P2.append(INTER_D_DSC[-1, self.P2].mean())
self.DSC99_P3.append(INTER_D_DSC[-1, self.P3].mean())
self.D_DSC_text.write('*' * 10 + ' P0,P1,P2,P3 summary of Inter DSC\n')
self.D_DSC_text.write('P0: Inter_D_DSC 0.99: ' + str(INTER_D_DSC[-1, self.P0].mean()) + '\n')
self.D_DSC_text.write('P1: Inter_D_DSC 0.99: ' + str(INTER_D_DSC[-1, self.P1].mean()) + '\n')
self.D_DSC_text.write('P2: Inter_D_DSC 0.99: ' + str(INTER_D_DSC[-1, self.P2].mean()) + '\n')
self.D_DSC_text.write('P3: Inter_D_DSC 0.99: ' + str(INTER_D_DSC[-1, self.P3].mean()) + '\n\n\n')
self.D_DSC_text.flush()
return selected_model_D
def save_iteration_results(self):
np.save(os.path.join(self.results_path,
self.case_idx[:-4] + '_selected_model_D.npy'), self.selected_model_D.astype(np.float32))
np.save(os.path.join(self.results_path,
self.case_idx[:-4] + '_relabel_D.npy'), self.relabel_D)
np.save(os.path.join(self.results_path,
self.case_idx[:-4] + '_pivot_list.npy'), self.pivot_list)
np.save(os.path.join(self.results_path,
self.case_idx[:-4] + '_binary_matrix.npy'), self.binary_matrix)
print('save iteration results successfully!')
def set_DSC(self, s1, s2):
return 2 * len(s1 & s2) / (len(s1) + len(s2) + EPSILON) # DSC([], []) is 0
def nearest_spherical_dir(self, vec):
if not vec.any():
return -1, -1
inner_prod = self.direction_x * vec[0] + self.direction_y * vec[1] + self.direction_z * vec[2]
max_dir = np.argmax(inner_prod.reshape(-1)) # no abs
return max_dir // WIDTH, max_dir % WIDTH # h, w
def get_binary_matrix(self, D):
'''prepare a relation matrix between the shells of any pivot pair.
We sample a dense set of points in ROI and then count how many points are within the shells
to build a set of inner points for each pivot.
Then the relation is defined as DSC (IoU) between the sets of any pivot pair.
'''
# sample points
start = time.time()
point_list = []
for i in range(self.bbox[0], self.bbox[1], POINT_STEP):
for j in range(self.bbox[2], self.bbox[3], POINT_STEP):
for k in range(self.bbox[4], self.bbox[5], POINT_STEP):
point_list.append([i, j, k])
point_list = np.asarray(point_list)
# count inner points for each pivot
in_shell_list = []
start = time.time()
for p in range(self.PIVOT_POINTS):
in_pivot_set = set()
rad = min(30, D[p].max()) # diameter 60 pixels. I think that's enough
if rad == 0:
in_shell_list.append(in_pivot_set)
continue
near_pivot_point_list = np.where(np.linalg.norm(point_list - self.pivot_list[p], axis=1) < rad)[0]
for id in near_pivot_point_list:
vec = point_list[id] - self.pivot_list[p]
h, w = self.nearest_spherical_dir(vec)
dist = np.linalg.norm(vec)
if h == -1 or D[p, h, w] > dist:
in_pivot_set.add(id)
in_shell_list.append(in_pivot_set)
print('in_shell_list done', time.time() - start)
# compute set DSC for any two pivots
start = time.time()
assert self.PIVOT_POINTS == len(in_shell_list)
binary_matrix = np.zeros((self.PIVOT_POINTS, self.PIVOT_POINTS), dtype=np.float32)
for p in range(self.PIVOT_POINTS):
for q in range(p + 1, self.PIVOT_POINTS):
binary_matrix[p, q] = self.set_DSC(in_shell_list[p], in_shell_list[q])
binary_matrix += np.swapaxes(binary_matrix, 0, 1) # copy upper tri to lower
print('binary matrix done', time.time() - start)
return binary_matrix
if __name__ == '__main__':
test = TEST()
| Python |
3D | twni2016/Elastic-Boundary-Projection | run.sh | .sh | 1,627 | 55 | # Elastic Boundary Projection for 3D Medical Image Segmentation, CVPR 2019
# Author: Tianwei Ni.
# turn on these switches to execute each module
ENABLE_DATA_DOWNLOAD=0
ENABLE_DATA_GENERATION=0
ENABLE_TRAINING=0
ENABLE_TESTING=0
DATA_PATH='/mnt/data0/tianwei/EBP_MD_Spleen/'
GPU_ID=0
CURRENT_FOLD=0
FOLDS=1
ORGAN_ID=1
SLICES=5
if [ "$ENABLE_DATA_DOWNLOAD" = "1" ]; then
python nii2npy.py --data_path $DATA_PATH
fi
# data generation
if [ "$ENABLE_DATA_GENERATION" = "1" ]; then
GENERATION_TIMESTAMP=$(date +'%Y%m%d')
GENERATION_LOG=${DATA_PATH}logs/GENERATION_FD${FOLDS}_${GENERATION_TIMESTAMP}.txt
python -u data_generation.py \
--data_path $DATA_PATH --organ_id $ORGAN_ID --slices $SLICES --folds $FOLDS \
2>&1 | tee $GENERATION_LOG
fi
# training stage
BATCH=32
EPOCH=5
LR=0.001
if [ "$ENABLE_TRAINING" = "1" ]; then
TRAINING_TIMESTAMP=$(date +'%Y%m%d_%H%M%S')
TRAINING_LOG=${DATA_PATH}logs/S${SLICES}FD${FOLDS}${CURRENT_FOLD}_vnetg_${TRAINING_TIMESTAMP}.txt
python -u vnetg.py \
--data_path $DATA_PATH --slices $SLICES --organ_id $ORGAN_ID --folds $FOLDS -f $CURRENT_FOLD \
-b $BATCH -e $EPOCH --lr $LR --gpu_id $GPU_ID -t $TRAINING_TIMESTAMP \
2>&1 | tee $TRAINING_LOG
else
TRAINING_TIMESTAMP=_
fi
# testing stage
if [ "$ENABLE_TESTING" = "1" ]; then
python test_iter.py \
--data_path $DATA_PATH --slices $SLICES --organ_id $ORGAN_ID --folds $FOLDS -f $CURRENT_FOLD \
-e $EPOCH --gpu_id $GPU_ID -t $TRAINING_TIMESTAMP
python test_voxel.py \
--data_path $DATA_PATH --slices $SLICES --organ_id $ORGAN_ID --folds $FOLDS -f $CURRENT_FOLD \
-e $EPOCH --gpu_id $GPU_ID -t $TRAINING_TIMESTAMP
fi
| Shell |
3D | potpov/New-Maxillo-Dataset-Segmentation | train.py | .py | 4,043 | 108 | import torch
import logging
from tqdm import tqdm
from torch import nn
import torchio as tio
import torch.distributed as dist
def train2D(model, train_loader, loss_fn, optimizer, epoch, writer, evaluator, phase='Train'):
model.train()
evaluator.reset_eval()
losses = []
for i, (images, labels, names, partition_weights, _) in tqdm(enumerate(train_loader), total=len(train_loader),
desc='train epoch {}'.format(str(epoch))):
images = images.cuda()
labels = labels.cuda()
partition_weights = partition_weights.cuda()
optimizer.zero_grad()
outputs = model(images) # BS, Classes, H, W
loss = loss_fn(outputs, labels, partition_weights)
losses.append(loss.item())
loss.backward()
optimizer.step()
# final predictions
if outputs.shape[1] > 1:
outputs = torch.argmax(torch.nn.Softmax(dim=1)(outputs), dim=1).cpu().numpy()
else:
outputs = nn.Sigmoid()(outputs) # BS, 1, H, W
outputs[outputs > .5] = 1
outputs[outputs != 1] = 0
outputs = outputs.squeeze().cpu().detach().numpy() # BS, H, W
labels = labels.squeeze().cpu().numpy() # BS, Z, H, W
evaluator.compute_metrics(outputs, labels, images, names, phase)
epoch_train_loss = sum(losses) / len(losses)
epoch_iou, epoch_dice, epoch_haus = evaluator.mean_metric(phase=phase)
if writer is not None:
writer.add_scalar(f'Loss/{phase}', epoch_train_loss, epoch)
writer.add_scalar(f'{phase}', epoch_iou, epoch)
# logging.info(
# f'{phase} Epoch [{epoch}], '
# f'{phase} Mean Loss: {epoch_train_loss}, '
# f'{phase} Mean Metric (IoU): {epoch_iou}'
# f'{phase} Mean Metric (Dice): {epoch_dice}'
# f'{phase} Mean Metric (haus): {epoch_haus}'
# )
return epoch_train_loss, epoch_iou
def train(model, train_loader, loss_fn, optimizer, epoch, writer, evaluator, phase='Train'):
model.train()
evaluator.reset_eval()
losses = []
for i, d in tqdm(enumerate(train_loader), total=len(train_loader), desc=f'{phase} epoch {str(epoch)}'):
images = d['data'][tio.DATA].float().cuda()
labels = d['label'][tio.DATA].cuda()
partition_weights = torch.ones(images.shape[0]).cuda()
optimizer.zero_grad()
outputs = model(images) # output -> B, C, Z, H, W
assert outputs.ndim == labels.ndim, f"Gt and output dimensions are not the same before loss. {outputs.ndim} vs {labels.ndim}"
loss = loss_fn(outputs, labels, partition_weights)
losses.append(loss.item())
loss.backward()
optimizer.step()
# final predictions
# shape B, C, xyz -> softmax -> B, xyz
# shape 1, C, xyz -> softmax -> 1, xyz
# shape B, 1, xyz -> sigmoid + sqz -> B, xyz
# shape B, 1, xyz -> sigmoid + sqz -> xyz
if outputs.shape[1] > 1:
outputs = torch.argmax(torch.nn.Softmax(dim=1)(outputs), dim=1).cpu().numpy()
else:
outputs = nn.Sigmoid()(outputs) # BS, 1, Z, H, W
outputs[outputs > .5] = 1
outputs[outputs != 1] = 0
outputs = outputs.squeeze().cpu().detach().numpy() # BS, Z, H, W
labels = labels.squeeze().cpu().numpy() # BS, Z, H, W
evaluator.compute_metrics(outputs, labels, images, str(i), phase)
epoch_train_loss = sum(losses) / len(losses)
epoch_iou, epoch_dice, epoch_haus = evaluator.mean_metric(phase=phase)
if writer is not None:
writer.add_scalar(f'Loss/{phase}', epoch_train_loss, epoch)
writer.add_scalar(f'{phase}', epoch_iou, epoch)
# logging.info(
# f'{phase} Epoch [{epoch}], '
# f'{phase} Mean Loss: {epoch_train_loss}, '
# f'{phase} Mean Metric (IoU): {epoch_iou}'
# f'{phase} Mean Metric (Dice): {epoch_dice}'
# f'{phase} Mean Metric (haus): {epoch_haus}'
# )
return epoch_train_loss, epoch_iou | Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | alpha_shape.py | .py | 5,430 | 153 | import json
from hull.voxelize.voxelize import voxelize
from scipy.spatial import Delaunay
import numpy as np
from collections import defaultdict
from scipy.ndimage import binary_fill_holes
import os
import pathlib
from glob import glob
def alpha_shape_3D(pos, alpha):
"""
Compute the alpha shape (concave hull) of a set of 3D points.
Parameters:
pos - np.array of shape (n,3) points.
alpha - alpha value.
return
outer surface vertex indices, edge indices, and triangle indices
"""
tetra = Delaunay(pos)
# Find radius of the circumsphere.
# By definition, radius of the sphere fitting inside the tetrahedral needs
# to be smaller than alpha value
# http://mathworld.wolfram.com/Circumsphere.html
tetrapos = np.take(pos, tetra.vertices, axis=0)
normsq = np.sum(tetrapos ** 2, axis=2)[:, :, None]
ones = np.ones((tetrapos.shape[0], tetrapos.shape[1], 1))
a = np.linalg.det(np.concatenate((tetrapos, ones), axis=2))
Dx = np.linalg.det(np.concatenate((normsq, tetrapos[:, :, [1, 2]], ones), axis=2))
Dy = -np.linalg.det(np.concatenate((normsq, tetrapos[:, :, [0, 2]], ones), axis=2))
Dz = np.linalg.det(np.concatenate((normsq, tetrapos[:, :, [0, 1]], ones), axis=2))
c = np.linalg.det(np.concatenate((normsq, tetrapos), axis=2))
r = np.sqrt(Dx ** 2 + Dy ** 2 + Dz ** 2 - 4 * a * c) / (2 * np.abs(a))
# Find tetrahedrals
tetras = tetra.vertices[r < alpha, :]
# triangles
TriComb = np.array([(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)])
Triangles = tetras[:, TriComb].reshape(-1, 3)
Triangles = np.sort(Triangles, axis=1)
# Remove triangles that occurs twice, because they are within shapes
TrianglesDict = defaultdict(int)
for tri in Triangles: TrianglesDict[tuple(tri)] += 1
Triangles = np.array([tri for tri in TrianglesDict if TrianglesDict[tri] == 1])
# edges
EdgeComb = np.array([(0, 1), (0, 2), (1, 2)])
Edges = Triangles[:, EdgeComb].reshape(-1, 2)
Edges = np.sort(Edges, axis=1)
Edges = np.unique(Edges, axis=0)
Vertices = np.unique(Edges)
return Vertices, Edges, Triangles
def bilinear_interpolation(plane, x_func, y_func):
"""
bilinear interpolation between four pixels of the image given a float set of coords
Args:
x_func (float): x coordinate
y_func (float): y coordinate
Returns:
(float) interpolated value according to https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
x1, x2 = int(np.floor(x_func)), int(np.floor(x_func) + 1)
y1, y2 = int(np.floor(y_func)), int(np.floor(y_func) + 1)
dx, dy = x_func - x1, y_func - y1
P1 = plane[:, y1, x1] * (1 - dx) * (1 - dy)
P2 = plane[:, y2, x1] * (1 - dx) * dy
P3 = plane[:, y1, x2] * dx * (1 - dy)
P4 = plane[:, y2, x2] * dx * dy
return P1 + P2 + P3 + P4
def concave_hull(coords, shape, alpha=5):
verts, faces, triangles = alpha_shape_3D(coords, alpha=alpha)
f = []
for t in triangles:
f.append(np.stack((coords[t[0]], coords[t[1]], coords[t[2]])))
f = np.stack(f)
alpha_vol = np.zeros(shape)
for z, y, x in voxelize(np.stack(f)):
alpha_vol[z, y, x] = 1
return alpha_vol.astype(int), binary_fill_holes(alpha_vol).astype(int)
def convex_hull(gt):
from hull.smoother import delaunay as mydelaunay
from scipy.ndimage import binary_erosion
convex_hull = mydelaunay(gt)
convex_hull = binary_fill_holes(convex_hull).astype(int)
reduced = binary_erosion(convex_hull, iterations=2)
return reduced
def read_from_file(patient):
try:
with open(
os.path.join(patient, 'masks.json')
) as f:
mask_config = json.load(f)
planes = np.load(os.path.join(patient, 'planes.npy'), allow_pickle=True)
except Exception as e:
print(f"WARNING: patient {patient} \nmiss folders. {e}")
return
# gt = convert_to_two_labels(gt)
planes = planes[:, ::-1, ...] # X Y Z to Z Y X
########
# MOVING THE CONTROL POINTS TO THE VOLUME
########
assert len(mask_config['masks']) == planes.shape[0], f'different number of masks and planes -> unable to compute alpha shape, use exported volume for {patient}'
voxel_cp = []
for i, cps in enumerate(mask_config['masks']):
if cps is None:
continue
plane = planes[i]
for cp in cps['cp']:
x = cp['x'] / 4
y = cp['y'] / 4
xyz = bilinear_interpolation(plane, x, y)
voxel_cp.append(xyz)
coords = np.stack(voxel_cp)
return coords
if __name__ == '__main__':
TMP_DIR = r'\dense_export_dir' # where we save results before moving them to our dataset
patients = ['list_of_patients_dirs_here!']
for numpatient, patient in enumerate(patients):
print(f"making patient: {patient}")
try:
coords = read_from_file(patient)
gt = np.load(os.path.join(patient, 'data.npy'))
_, result_filled = concave_hull(coords, gt.shape)
except:
print("for this patient planes and coords dont match. using the tool exported volume.")
result_filled = gt
# save result
log_dir = pathlib.Path(os.path.join(TMP_DIR, patient))
log_dir.mkdir(parents=True, exist_ok=True)
np.save(os.path.join(TMP_DIR, patient, 'gt_alpha.npy'), result_filled)
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | eval.py | .py | 6,275 | 142 | from statistics import mean
import torch
import pathlib
import numpy as np
from skimage import metrics
import os
import pandas as pd
import zipfile
class Eval:
def __init__(self, loader_config, project_dir, skip_dump=False):
self.iou_list = []
self.dice_list = []
self.config = loader_config
self.project_dir = project_dir
self.eps = 1e-06
self.classes = loader_config['labels']
self.hausdord_splits = 6
self.hausdord_verbose = []
self.hausdorf_list = []
self.test_ids = []
self.skip_dump = skip_dump
def reset_eval(self):
self.iou_list.clear()
self.dice_list.clear()
self.hausdord_verbose = []
self.hausdorf_list.clear()
self.test_ids.clear()
def mean_metric(self, phase):
if phase not in ["Train", "Validation", "Test", "Final"]:
raise Exception(f"this phase is not valid {phase}")
iou = 0 if len(self.iou_list) == 0 else mean(self.iou_list)
dice = 0 if len(self.dice_list) == 0 else mean(self.dice_list)
haus = 0 if len(self.hausdorf_list) == 0 else max(self.hausdorf_list)
if phase == "Final":
excl_dest = os.path.join(self.project_dir, 'logs', 'results.xlsx')
cols = [f"s{n}" for n in range(self.hausdord_splits - 1)] + ["L entire"] + [f"s{n}" for n in range(self.hausdord_splits - 1)] + ["R entire"]
df = pd.DataFrame(np.stack(self.hausdord_verbose), columns=cols)
df.replace([np.inf, 0], -1, inplace=True)
df = df.loc[:, df.max() > -1] # removing column with empty hausdorf
df.insert(0, 'PATIENT', self.test_ids, True)
df['haus tot'] = np.round(self.hausdorf_list, 2)
df['IoU'] = np.round(self.iou_list, 2)
df['dice'] = np.round(self.dice_list, 2)
df.to_excel(excl_dest, index=False)
self.save_zip() # zip volumes with predictions
self.reset_eval()
return iou, dice, haus
def compute_metrics(self, pred, gt, images, names, phase):
if phase not in ["Train", "Validation", "Test", "Final"]:
raise Exception(f"this phase is not valid {phase}")
pred = pred[None, ...] if pred.ndim == 3 else pred
gt = gt[None, ...] if gt.ndim == 3 else gt
images = images[None, ...] if images.ndim == 3 else images
assert pred.ndim == gt.ndim, f"Gt and output dimensions are not the same before eval. {pred.ndim} vs {gt.ndim}"
excluded = ['BACKGROUND', 'UNLABELED']
labels = [v for k, v in self.classes.items() if k not in excluded] # exclude background from here
names = names if isinstance(names, list) else [names]
self.test_ids += names
for batch_id in range(pred.shape[0]):
self.iou_list.append(self.iou(pred[batch_id], gt[batch_id], labels))
self.dice_list.append(self.dice_coefficient(pred[batch_id], gt[batch_id], labels))
self.hausdorf_list.append(self.hausdorf(pred[batch_id], gt[batch_id], phase))
if phase == 'Final' and not self.skip_dump:
self.dump(gt[batch_id], pred[batch_id], images[batch_id], names[batch_id])
def hausdorf(self, pred, gt, phase, pixel_spacing=0.3):
if phase == "Final":
left = []
right = []
width = gt.shape[1]
splits = np.linspace(0, width, self.hausdord_splits).astype(int)
half = gt.shape[2] // 2
for i in range(len(splits) - 1):
left.append(metrics.hausdorff_distance(
gt[:, splits[i]:splits[i + 1], :half],
pred[:, splits[i]:splits[i + 1], :half]
) * pixel_spacing)
right.append(metrics.hausdorff_distance(
gt[:, splits[i]:splits[i + 1], half:],
pred[:, splits[i]:splits[i + 1], half:]
) * pixel_spacing)
right.append(metrics.hausdorff_distance(gt[..., half:], pred[..., half:]) * pixel_spacing)
left.append(metrics.hausdorff_distance(gt[..., :half], pred[..., :half]) * pixel_spacing)
self.hausdord_verbose.append(np.round(np.concatenate((left, right)).astype(float), 2))
return metrics.hausdorff_distance(gt, pred) * pixel_spacing
def iou(self, pred, gt, labels):
"""
:param image: SHAPE MUST BE (Z, H W) or (BS, Z, H, W)
:param gt: SHAPE MUST BE (Z, H W) or (BS, Z, H, W)
:return:
"""
c_score = []
for c in labels:
gt_class_idx = np.argwhere(gt.flatten() == c)
intersection = np.sum(pred.flatten()[gt_class_idx] == c)
union = np.argwhere(gt.flatten() == c).size + np.argwhere(pred.flatten() == c).size - intersection
c_score.append((intersection + self.eps) / (union + self.eps))
return sum(c_score) / len(labels)
def dice_coefficient(self, pred, gt, labels):
c_score = []
for c in labels:
gt_class_idx = np.argwhere(gt.flatten() == c)
intersection = np.sum(pred.flatten()[gt_class_idx] == c)
dice_union = np.argwhere(gt.flatten() == c).size + np.argwhere(pred.flatten() == c).size
c_score.append((2 * intersection + self.eps) / (dice_union + self.eps))
return sum(c_score) / len(labels)
def dump(self, gt_volume, prediction, images, patient_name):
save_dir = os.path.join(self.project_dir, 'numpy', f'{patient_name}')
pathlib.Path(save_dir).mkdir(parents=True, exist_ok=True)
np.save(os.path.join(save_dir, 'gt.npy'), gt_volume)
np.save(os.path.join(save_dir, 'pred.npy'), prediction)
np.save(os.path.join(save_dir, 'input.npy'), images)
def save_zip(self):
zipf = zipfile.ZipFile(os.path.join(self.project_dir, 'numpy.zip'), 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(os.path.join(self.project_dir, 'numpy')):
for file in files:
zipf.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
os.path.join(os.path.join(self.project_dir))))
zipf.close() | Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | losses.py | .py | 5,505 | 133 | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
class JaccardLoss(torch.nn.Module):
def __init__(self, weight=None, size_average=True, per_volume=False, apply_sigmoid=False,
min_pixels=5):
super().__init__()
self.size_average = size_average
self.weight = weight
self.per_volume = per_volume
self.apply_sigmoid = apply_sigmoid
self.min_pixels = min_pixels
def forward(self, pred, target):
if self.apply_sigmoid:
pred = torch.sigmoid(pred)
return self.jaccard(pred, target)
def jaccard(self, outputs, targets):
batch_size = outputs.size()[0]
eps = 1e-3
if not self.per_volume:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
losses = 1 - (intersection + eps) / (torch.sum(dice_output + dice_target, dim=1) - intersection + eps)
return losses
class DiceLoss(nn.Module):
def __init__(self, classes, device, partition_weights):
super().__init__()
self.eps = 1e-06
self.classes = classes
self.device = device
self.weights = partition_weights
self.weights = self.weights.to(self.device)
def forward(self, pred, gt):
included = [v for k, v in self.classes.items() if k not in ['UNLABELED']]
gt_onehot = torch.nn.functional.one_hot(gt.squeeze().long(), num_classes=len(self.classes))
if gt.shape[0] == 1: # we need to add a further axis after the previous squeeze()
gt_onehot = gt_onehot.unsqueeze(0)
gt_onehot = torch.movedim(gt_onehot, -1, 1)
input_soft = F.softmax(pred, dim=1)
dims = (2, 3, 4)
intersection = torch.sum(input_soft * gt_onehot, dims)
cardinality = torch.sum(input_soft + gt_onehot, dims)
dice_score = 2. * intersection / (cardinality + self.eps)
assert self.weights.shape[0] == dice_score.shape[0], "weights should have size of batch size"
dice_score = self.weights * dice_score
return 1. - dice_score[:, included]
def one_hot_encode(volume, shape, device):
B, C, Z, H, W = shape
flat = volume.reshape(-1).unsqueeze(dim=1) # 1xB*Z*H*W
onehot = torch.zeros(size=(B * Z * H * W, C), dtype=torch.float).to(device) # 1xB*Z*H*W destination tensor
onehot.scatter_(1, flat, 1) # writing the conversion in the destination tensor
return torch.squeeze(onehot).reshape(B, Z, H, W, C) # reshaping to the original shape
class LossFn:
def __init__(self, loss_config, loader_config, weights):
if not isinstance(loss_config['name'], list):
self.name = [loss_config['name']]
else:
self.name = loss_config['name']
self.loader_config = loader_config
self.classes = loader_config['labels']
self.weights = weights
def factory_loss(self, pred, gt, name, partition_weights):
if name == 'CrossEntropyLoss':
pred = torch.nn.Sigmoid()(pred) # sigmoid here which is already built-in in other losses
loss_fn = nn.CrossEntropyLoss(weight=self.weights).to(self.device)
elif name == 'BCEWithLogitsLoss':
if pred.shape[1] == 1:
pred = pred.squeeze()
gt = gt.float()
loss_fn = nn.BCEWithLogitsLoss(pos_weight=1/self.weights[0]).to(self.device)
else:
# one hot encoding for cross entropy with digits. Bx1xHxW -> BxCxHxW
B, C, Z, H, W = pred.shape
gt_flat = gt.reshape(-1).unsqueeze(dim=1) # 1xB*Z*H*W
gt_onehot = torch.zeros(size=(B * Z * H * W, C), dtype=torch.float).to(self.device) # 1xB*Z*H*W destination tensor
gt_onehot.scatter_(1, gt_flat, 1) # writing the conversion in the destination tensor
gt = torch.squeeze(gt_onehot).reshape(B, Z, H, W, C) # reshaping to the original shape
pred = pred.permute(0, 2, 3, 4, 1) # for BCE we want classes in the last axis
loss_fn = nn.BCEWithLogitsLoss(pos_weight=self.weights).to(self.device)
elif name == 'Jaccard':
assert pred.shape[1] == 1, 'this loss works with a binary prediction'
loss_fn = JaccardLoss(weight=self.weights, apply_sigmoid=True, per_volume=True)
elif name == 'DiceLoss':
# pred = torch.argmax(torch.nn.Softmax(dim=1)(pred), dim=1)
# pred = pred.data.cpu().numpy()
# gt = gt.cpu().numpy()
loss_fn = DiceLoss(self.classes, self.device, partition_weights)
else:
raise Exception("specified loss function cant be found.")
loss = loss_fn(pred, gt)
loss = loss * partition_weights
return loss.mean()
def __call__(self, pred, gt, partition_weights):
"""
SHAPE MUST BE Bx1xHxW
:param pred:
:param gt:
:return:
"""
assert pred.device == gt.device
assert gt.device != 'cpu'
cur_loss = []
for name in self.name:
loss = self.factory_loss(pred, gt, name, partition_weights)
if torch.isnan(loss):
raise ValueError('Loss is nan during training...')
cur_loss.append(loss)
return torch.sum(torch.stack(cur_loss))
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | augmentations.py | .py | 12,560 | 347 | import importlib
from torchvision import transforms
import numpy as np
import torch
from scipy.ndimage import rotate, map_coordinates, gaussian_filter
import torchvision.transforms.functional as TF
import cv2
from torch.nn.functional import interpolate
class ToPilImage:
def __init__(self):
pass
def __call__(self, data):
image = TF.to_pil_image(data[0])
mask = TF.to_pil_image(data[1])
return [image, mask]
class RandomHorizontalFlip:
def __init__(self, execution_probability=0.5):
self.execution_probability = execution_probability
def __call__(self, data):
if np.random.uniform(0, 1) < self.execution_probability:
image, mask = data
image = np.flip(image, axis=1)
mask = np.flip(mask, axis=1)
return [image, mask]
return data
class RandomVerticalFlip:
def __init__(self, execution_probability=0.5):
self.execution_probability = execution_probability
def __call__(self, data):
if np.random.uniform(0, 1) < self.execution_probability:
image, mask = data
image = np.flip(image, axis=0)
mask = np.flip(mask, axis=0)
return [image, mask]
return data
class ToTensor:
def __init__(self):
pass
def __call__(self, volume):
volume = torch.from_numpy(volume)
if volume.ndim == 2:
volume = torch.unsqueeze(volume, dim=0)
return volume
class RandomContrast:
"""
increase the contrast of an image using https://www.sciencedirect.com/science/article/pii/B9780123361561500616
NOT AFFECTING LABELS!
Args:
image (numpy array): 0-1 floating image
Returns:
result (numpy array): image with higer contrast
"""
def __init__(self, alpha=(0.8, 2), execution_probability=0.1, **kwargs):
assert len(alpha) == 2
self.alpha = alpha
self.execution_probability = execution_probability
def __call__(self, data):
if np.random.uniform(0, 1) < self.execution_probability:
image, mask = data
# assert image.shape == mask.shape
assert image.ndim == 3
if image.max() > 1 or image.min() < 0:
image, _ = Normalize()([image, mask])
clip_limit = np.random.uniform(self.alpha[0], self.alpha[1])
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
sharp = image * 255 # [0 - 255]
sharp = sharp.astype(np.uint8)
C = image.shape[0]
for i in range(C):
sharp[i] = clahe.apply(sharp[i])
sharp = sharp.astype(np.float32) / 255 # back to [0-1]
return [np.clip(sharp, 0, 1), mask]
return data
class RandomRotate:
"""
Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval.
Rotation axis is picked at random from the list of provided axes.
"""
def __init__(self, angle_spectrum=8, mode='nearest', order=0, execution_probability=0.5, **kwargs):
self.angle_spectrum = angle_spectrum
self.mode = mode
self.order = order
self.execution_probability = execution_probability
def __call__(self, data):
if np.random.uniform(0, 1) < self.execution_probability:
angle = np.random.randint(-self.angle_spectrum, self.angle_spectrum)
image, mask = data
# assert image.shape == mask.shape
assert image.ndim == 3
image = rotate(image, angle, axes=(0, 2), reshape=False, order=self.order, mode=self.mode)
mask = rotate(mask, angle, axes=(0, 2), reshape=False, order=0, mode=self.mode)
return [image, mask]
return data
# it's relatively slow, i.e. ~1s per patch of size 64x200x200, so use multiple workers in the DataLoader
# remember to use spline_order=0 when transforming the labels
class ElasticDeformation:
"""
Apply elasitc deformations of 3D patches on a per-voxel mesh. Assumes ZYX axis order (or CZYX if the data is 4D).
Based on: https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62
"""
def __init__(self, spline_order=2, alpha=1, sigma=5, execution_probability=0.1,
**kwargs):
"""
:param spline_order: the order of spline interpolation (use 0 for labeled images)
:param alpha: scaling factor for deformations (random -> between 0 and alfa)
:param sigma: smoothing factor for Gaussian filter (random -> between 0 and sigma)
:param execution_probability: probability of executing this transform
:param apply_3d: if True apply deformations in each axis
"""
self.spline_order = spline_order
self.alpha = np.random.uniform() * alpha
self.sigma = np.random.uniform() * sigma
self.execution_probability = execution_probability
def deformate(self, volume, spline_order=0):
y_dim, x_dim = volume[0].shape
y, x = np.meshgrid(np.arange(y_dim), np.arange(x_dim), indexing='ij')
C = volume.shape[0]
for i in range(C):
dy, dx = [
gaussian_filter(
np.random.randn(*volume[0].shape),
self.sigma, mode="reflect"
) * self.alpha for _ in range(2)
]
indices = y + dy, x + dx
volume[i] = map_coordinates(volume[i], indices, order=spline_order, mode='reflect')
return volume
def __call__(self, data):
if np.random.uniform(0, 1) < self.execution_probability:
image, mask = data
# assert image.shape == mask.shape
assert image.ndim == 3
image = self.deformate(image.copy(), self.spline_order)
mask = self.deformate(mask.copy(), 0)
return [image, mask]
return data
class Normalize:
"""
Apply simple min-max scaling to a given input tensor, i.e. shrinks the range of the data in a fixed range of [0, 1].
DO NOT AFFECT MASKS!
"""
def __init__(self, **kwargs):
pass
def __call__(self, data):
image, mask = data
value_range = image.max() - image.min()
norm_0_1 = (image - image.min()) / value_range
return [np.clip(norm_0_1, 0, 1), mask]
class Rescale:
def __init__(self, scale_factor=None, size=None, interp_fn='trilinear', **kwargs):
assert (scale_factor is not None) ^ (size is not None), "please specify a size OR a factor"
self.scale_factor = scale_factor
self.size = size
self.interp_fn = interp_fn
# using a custom function to avoid the align corner warnings/errors
if self.interp_fn == 'nearest':
self.scale_fn = lambda img: interpolate(img, size=self.size, scale_factor=self.scale_factor, mode=self.interp_fn, recompute_scale_factor=False)
else:
self.scale_fn = lambda img: interpolate(img, size=self.size, scale_factor=self.scale_factor, mode=self.interp_fn, align_corners=False, recompute_scale_factor=False)
def __call__(self, data):
if self.scale_factor == 1:
return data
tensor_flag = torch.is_tensor(data)
image = ToTensor()(data) if not tensor_flag else data
assert image.ndim == 3
image = image.unsqueeze(0).unsqueeze(0)
image = self.scale_fn(image).squeeze()
if not tensor_flag:
return image.numpy()
return image
class Resize:
def __init__(self, labels, divisor, **kwargs):
self.labels = labels
self.divisor = divisor
def closestDistanceForDivision(self, number):
q = np.ceil(number / self.divisor).astype(np.int)
# possible closest numbers
n1 = self.divisor * q
return n1
# if you want to choose between the lower and upper bound use the following
# n2 = (self.divisor * (q + 1))
# choices = np.stack((number - n1, number - n2))
# idx = np.argmin(np.abs(choices), axis=0)
# return choices[idx, np.indices(idx.shape)[0]]
def reshape(self, volume, new_shape, pad_val=0):
target_Z, target_H, target_W = new_shape
Z, H, W = volume.shape
# if dest shape is bigger than current shape needs to pad
H_pad = max(target_H - H, 0) // 2
W_pad = max(target_W - W, 0) // 2
Z_pad = max(target_Z - Z, 0) // 2
# if dest shape is lower than current shape needs to crop
H_crop = max(H - target_H, 0) // 2
W_crop = max(W - target_W, 0) // 2
Z_crop = max(Z - target_Z, 0) // 2
if isinstance(volume, np.ndarray):
result = np.full((target_Z, target_H, target_W), fill_value=pad_val, dtype=volume.dtype)
else:
result = torch.full((target_Z, target_H, target_W), fill_value=pad_val, dtype=volume.dtype)
result[Z_pad:Z + Z_pad, H_pad:H + H_pad, W_pad:W + W_pad] = volume[Z_crop:target_Z + Z_crop, H_crop:target_H + H_crop, W_crop:target_W + W_crop]
return result
def __call__(self, data):
image, mask = data
# compatible with 1 label task
# ref = self.labels['CONTOUR'] if 'CONTOUR' in self.labels else self.labels['INSIDE']
#
# up_b = np.max(np.argwhere(mask == ref), axis=0) + self.divisor // 2
# low_b = np.min(np.argwhere(mask == ref), axis=0)
# diff = self.closestDistanceForDivision(up_b - low_b)
# up_b = up_b - diff
#
# image = image[low_b[0]:up_b[0], low_b[1]:up_b[1], low_b[2]:up_b[2]]
# mask = mask[low_b[0]:up_b[0], low_b[1]:up_b[1], low_b[2]:up_b[2]]
orig_shape = np.asarray(image.shape)
bounds = self.closestDistanceForDivision(orig_shape)
low_bound = np.floor(bounds/2).astype(np.int)
high_bound = (orig_shape - np.ceil(bounds/2)).astype(np.int)
return [
image[low_bound[0]:high_bound[0], low_bound[1]:high_bound[1], low_bound[2]:high_bound[2]],
mask[low_bound[0]:high_bound[0], low_bound[1]:high_bound[1], low_bound[2]:high_bound[2]]
]
class Relabel:
"""
Relabel a numpy array of labels into a consecutive numbers, e.g.
[10,10, 0, 6, 6] -> [2, 2, 0, 1, 1]. Useful when one has an instance segmentation volume
at hand and would like to create a one-hot-encoding for it. Without a consecutive labeling the task would be harder.
"""
def __init__(self, **kwargs):
pass
def __call__(self, m):
_, unique_labels = np.unique(m, return_inverse=True)
m = unique_labels.reshape(m.shape)
return m
class CenterPad:
def __init__(self, final_shape):
self.size = final_shape
def __call__(self, image, pad_val=None):
if pad_val is None:
pad_val = image.min()
tensor_flag = torch.is_tensor(image)
image = ToTensor()(image) if not tensor_flag else image
z_offset = self.size[0] - image.shape[-3]
y_offset = self.size[1] - image.shape[-2]
x_offset = self.size[2] - image.shape[-1]
z_offset = int(np.floor(z_offset / 2.)), int(np.ceil(z_offset / 2.))
y_offset = int(np.floor(y_offset / 2.)), int(np.ceil(y_offset / 2.))
x_offset = int(np.floor(x_offset / 2.)), int(np.ceil(x_offset / 2.))
padded = torch.nn.functional.pad(image, [x_offset[0], x_offset[1], y_offset[0], y_offset[1], z_offset[0], z_offset[1]], value=pad_val)
if not tensor_flag:
return padded.numpy()
return padded
class CenterCrop:
def __init__(self, target_shape):
self.target_shape = target_shape
def __call__(self, image, gt=None):
z_offset = image.shape[-3] - self.target_shape[0]
y_offset = image.shape[-2] - self.target_shape[1]
x_offset = image.shape[-1] - self.target_shape[2]
z_offset = int(np.floor(z_offset / 2.)), image.shape[-3] - int(np.ceil(z_offset / 2.))
y_offset = int(np.floor(y_offset / 2.)), image.shape[-2] - int(np.ceil(y_offset / 2.))
x_offset = int(np.floor(x_offset / 2.)), image.shape[-1] - int(np.ceil(x_offset / 2.))
crop_img = image[..., z_offset[0]:z_offset[1], y_offset[0]:y_offset[1], x_offset[0]:x_offset[1]]
if gt is not None:
assert image.shape == gt.shape
gt = gt[..., z_offset[0]:z_offset[1], y_offset[0]:y_offset[1], x_offset[0]:x_offset[1]]
return crop_img, gt
return crop_img | Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | model.py | .py | 2,730 | 73 | import torch
import torch.nn as nn
class Competitor(nn.Module):
def __init__(self, n_classes):
super(Competitor, self).__init__()
self.ec0 = self.conv3Dblock(1, 32)
self.ec1 = self.conv3Dblock(32, 32)
self.ec2 = self.conv3Dblock(32, 64, kernel_size=3, stride=2) # third dimension to even val
self.ec3 = self.conv3Dblock(64, 64)
self.ec4 = self.conv3Dblock(64, 64)
self.ec5 = self.conv3Dblock(64, 128, kernel_size=3, stride=2)
self.ec6 = self.conv3Dblock(128, 128)
self.ec7 = self.conv3Dblock(128, 128)
self.ec8 = self.conv3Dblock(128, 256, kernel_size=3, stride=2)
self.ec9 = self.conv3Dblock(256, 256)
self.ec10 = self.conv3Dblock(256, 256)
self.dc9 = nn.ConvTranspose3d(256, 128, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=1, output_padding=1) # we have a concat here
self.dc8 = self.conv3Dblock(256, 128)
self.dc7 = self.conv3Dblock(128, 128)
self.dc6 = nn.ConvTranspose3d(128, 64, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=1, output_padding=1) # we have a concat here
self.dc5 = self.conv3Dblock(128, 64)
self.dc4 = self.conv3Dblock(64, 64)
self.dc3 = nn.ConvTranspose3d(64, 32, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=1, output_padding=1) # we have a concat here
self.dc2 = self.conv3Dblock(64, 32)
self.dc1 = self.conv3Dblock(32, 32)
self.final = nn.Conv3d(32, n_classes, kernel_size=(1, 1, 1), stride=(1, 1, 1))
def conv3Dblock(self, in_channels, out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)):
return nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding),
nn.BatchNorm3d(out_channels),
nn.ReLU()
)
def forward(self, x):
x = x[:, 0:1] # keep one channel
h = self.ec0(x)
feat_0 = self.ec1(h)
residual = self.ec2(feat_0 + x)
h = self.ec3(residual)
feat_1 = self.ec4(h)
residual = self.ec5(feat_1 + residual)
h = self.ec6(residual)
feat_2 = self.ec7(h)
residual = self.ec8(feat_2 + residual)
h = self.ec9(residual)
h = self.ec10(h)
# decoder
residual = self.dc9(h + residual)
h = self.dc8(torch.cat((residual, feat_2), dim=1))
h = self.dc7(h)
residual = self.dc6(h + residual)
h = self.dc5(torch.cat((residual, feat_1), dim=1))
h = self.dc4(h)
residual = self.dc3(h + residual)
h = self.dc2(torch.cat((residual, feat_0), dim=1))
h = self.dc1(h)
return self.final(h + residual)
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | dataset.py | .py | 5,843 | 131 | import numpy as np
import os
from tqdm import tqdm
import logging
import torchio as tio
import utils
import random
class Loader3D():
def __init__(self, config, do_train=True, additional_dataset=False, is_competitor=False, skip_primary=False):
self.config = config
self.subjects = {
'train': [],
'syntetic': [],
'test': [],
'val': []
}
splits = {}
if do_train:
if not skip_primary:
splits['train'] = 'sparse' if is_competitor else 'dense'
if additional_dataset:
splits['syntetic'] = 'sparse'
splits['val'] = 'sparse' if is_competitor else 'dense'
splits['test'] = 'dense' # always!
for split, dataset_type in splits.items():
logging.info(f"loading {split} dataset from {os.path.join(config['file_path'], dataset_type, split)}")
tmp_gt = np.load(os.path.join(config['file_path'], dataset_type, split, 'gt.npy'), allow_pickle=True)
tmp_data = np.load(os.path.join(config['file_path'], dataset_type, split, 'data.npy'), allow_pickle=True)
for p in tqdm(range(tmp_gt.shape[0])):
assert np.max(tmp_data[p]) <= 1 # data should be normalized by default
assert np.unique(tmp_gt[p]).size <= len(self.config['labels'])
label = tio.LabelMap(tensor=tmp_gt[p].astype(np.uint8)[None]) if split in ['train', 'syntetic'] else tmp_gt[p].astype(np.uint8)
self.subjects[split].append(tio.Subject(
data=tio.ScalarImage(tensor=tmp_data[p][None].astype(np.float)),
label=label,
))
# if do_train:
# # PRE-TRAINING
# if additional_dataset:
# logging.info(f"using additional dataset")
# tmp_gt = np.load(os.path.join(config['file_path'], 'sparse', 'syntetic', 'gt.npy'))
# tmp_data = np.load(os.path.join(config['file_path'], 'sparse', 'syntetic', 'data.npy'))
# for p in tqdm(range(tmp_gt.shape[0])):
# self.subjects['syntetic'].append(tio.Subject(
# data=tio.ScalarImage(tensor=tmp_data[p][None].astype(np.float)),
# label=tio.LabelMap(tensor=tmp_gt[p].astype(np.uint8)[None]),
# ))
# else:
# logging.info("additional dataset SKIPPED here")
#
# # TRAINING & VAL
# for split in ['train', 'val', 'test']:
# subdir = 'sparse' if is_competitor and split != 'test' else 'dense'
# logging.info(f"loading {split} dataset from {os.path.join(config['file_path'], subdir, split)}")
# tmp_gt = np.load(os.path.join(config['file_path'], subdir, split, 'gt.npy'), allow_pickle=True)
# tmp_data = np.load(os.path.join(config['file_path'], subdir, split, 'data.npy'), allow_pickle=True)
# for p in tqdm(range(tmp_gt.shape[0])):
# assert np.max(tmp_data[p]) <= 1 # data should be normalized by default
# assert np.unique(tmp_gt[p]).size <= len(self.config['labels'])
# label = tio.LabelMap(tensor=tmp_gt[p].astype(np.uint8)[None]) if split == 'train' else tmp_gt[p].astype(np.uint8)
# self.subjects[split].append(tio.Subject(
# data=tio.ScalarImage(tensor=tmp_data[p][None].astype(np.float)),
# label=label
# ))
self.do_train = do_train
self.additional_dataset = additional_dataset
aug_filepath = config.get("augmentations_file", None)
auglist = [] if aug_filepath is None else utils.load_config_yaml(aug_filepath)
augment = AugFactory(auglist)
augment.log() # write what we are using to logfile
self.transforms = augment.get_transform()
def get_aggregator(self):
sampler = self.get_sampler()
return tio.inference.GridAggregator(sampler)
def get_sampler(self):
return tio.GridSampler(patch_size=(32, 32, 32), patch_overlap=0)
def split_dataset(self, rank=0, world_size=1):
training_set = self.subjects['train'] + self.subjects['syntetic']
random.shuffle(training_set)
train = tio.SubjectsDataset(training_set[rank::world_size], transform=self.transforms) if self.do_train else None
test = [tio.GridSampler(subject, patch_size=(32, 32, 32), patch_overlap=0) for subject in self.subjects['test']]
val = [tio.GridSampler(subject, patch_size=(32, 32, 32), patch_overlap=0) for subject in self.subjects['val']]
return train, test, val
class AugFactory:
def __init__(self, aug_list):
self.aug_list = aug_list
self.transforms = self.factory(self.aug_list, [])
def log(self):
"""
save the list of aug for this experiment to the default log file
:param path:
:return:
"""
logging.info('going to use the following augmentations:: %s', self.aug_list)
def factory(self, auglist, transforms):
for aug in auglist:
if aug == 'OneOf':
transforms.append(tio.OneOf(self.factory(auglist[aug], [])))
else:
try:
kwargs = {}
for param, value in auglist[aug].items():
kwargs[param] = value
transforms.append(getattr(tio, aug)(**kwargs))
except:
raise Exception(f"this transform is not valid: {aug}")
return transforms
def get_transform(self):
"""
return the transform object
:return:
"""
return tio.Compose(self.transforms) | Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | test.py | .py | 4,470 | 99 | import torch
from torch import nn
from tqdm import tqdm
from torch.nn.functional import interpolate
from augmentations import CenterCrop
import numpy as np
import torchio as tio
import logging
from utils import resample
import cc3d
def test(model, test_loader, epoch, writer, evaluator, phase):
model.eval()
with torch.no_grad():
evaluator.reset_eval()
for i, (subject, loader) in tqdm(enumerate(test_loader), total=len(test_loader), desc='val epoch {}'.format(str(epoch))):
aggr = tio.inference.GridAggregator(subject, overlap_mode='average')
for subvolume in loader:
# batchsize with torchio affects the number of grids we extract from a patient.
# when we aggragate the patient the volume is just one.
images = subvolume['data'][tio.DATA].float().cuda() # BS, 3, Z, H, W
output = model(images) # BS, Classes, Z, H, W
aggr.add_batch(output, subvolume[tio.LOCATION])
output = aggr.get_output_tensor() # C, Z, H, W
labels = subject[0]['label'] # original labels from storage
output = interpolate(output.unsqueeze(0), size=tuple(labels.shape), mode='trilinear', align_corners=False)
origi_vol = interpolate(subject.subject['data'][tio.DATA].unsqueeze(0), size=tuple(labels.shape), mode='trilinear', align_corners=False)
origi_vol = origi_vol.squeeze().cpu().detach().numpy()
output = output.squeeze().cpu().detach().numpy()
assert output.shape == labels.shape, f"{output.shape} != {labels.shape}"
# final predictions
if output.ndim > 3:
output = torch.argmax(torch.nn.Softmax(dim=0)(output), dim=0).numpy()
else:
output = nn.Sigmoid()(torch.from_numpy(output)) # BS, 1, Z, H, W
output = torch.where(output > 0.5, 1, 0)
output = output.squeeze().cpu().detach().numpy() # BS, Z, H, W
# post-processing for noise
if phase in ['Test', 'Final']:
output = cc3d.connected_components(output)
two_biggest = np.argsort([np.sum(output == l) for l in np.unique(output)])[-3:-1]
output = np.where(np.logical_and(output != two_biggest[0], output != two_biggest[1]), 0, 1)
evaluator.compute_metrics(output, labels, origi_vol, str(i), phase)
# TB DUMP FOR BINARY CASE!
# images = np.clip(images, 0, None)
# images = (images.asphase(np.float))/images.max()
# if writer is not None:
# unempty_idx = np.argwhere(np.sum(labels != config['labels']['BACKGROUND'], axis=(0, 2)) > 0)
# randidx = np.random.randint(0, unempty_idx.size - 1, 5)
# rand_unempty_idx = unempty_idx[randidx].squeeze() # random slices from unempty ones
#
# dump_img = np.concatenate(np.moveaxis(images[:, rand_unempty_idx], 0, 1))
#
# dump_gt = np.concatenate(np.moveaxis(labels[:, rand_unempty_idx], 0, 1))
# dump_pred = np.concatenate(np.moveaxis(output[:, rand_unempty_idx], 0, 1))
#
# dump_img = np.stack((dump_img, dump_img, dump_img), axis=-1)
# a = dump_img.copy()
# a[dump_pred == config['labels']['INSIDE']] = (0, 0, 1)
# b = dump_img.copy()
# b[dump_gt == config['labels']['INSIDE']] = (0, 0, 1)
# dump_img = np.concatenate((a, b), axis=-2)
# writer.add_image(
# "3D_results",
# dump_img,
# len(test_loader) * epoch + i,
# dataformats='HWC'
# )
# END OF THE DUMP
epoch_iou, epoch_dice, epoch_haus = evaluator.mean_metric(phase=phase)
if writer is not None and phase != "Final":
writer.add_scalar(f'{phase}/IoU', epoch_iou, epoch)
writer.add_scalar(f'{phase}/Dice', epoch_dice, epoch)
writer.add_scalar(f'{phase}/Hauss', epoch_haus, epoch)
if phase in ['Test', 'Final']:
logging.info(
f'{phase} Epoch [{epoch}], '
f'{phase} Mean Metric (IoU): {epoch_iou}'
f'{phase} Mean Metric (Dice): {epoch_dice}'
f'{phase} Mean Metric (haus): {epoch_haus}'
)
return epoch_iou, epoch_dice, epoch_haus
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | utils.py | .py | 7,777 | 196 | from torch.utils.data import DistributedSampler
from scipy.ndimage import binary_fill_holes
import pathlib
import torchio as tio
import logging
import os
import numpy as np
import yaml
import sys
import torch
from tqdm import tqdm
import SimpleITK as sitk
import json
from scipy.linalg import norm
def set_logger(log_path=None):
"""
Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
if not log_path:
# Logging to console
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
else:
# Logging to a file
file_handler = logging.FileHandler(os.path.join(log_path))
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
def load_config_yaml(config_file):
return yaml.load(open(config_file, 'r'), yaml.FullLoader)
def resample(ctvol, is_label, original_spacing=.3, out_spacing=.4):
original_spacing = (original_spacing, original_spacing, original_spacing)
out_spacing = (out_spacing, out_spacing, out_spacing)
ctvol_itk = sitk.GetImageFromArray(ctvol)
ctvol_itk.SetSpacing(original_spacing)
original_size = ctvol_itk.GetSize()
out_shape = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
# Perform resampling:
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_shape)
resample.SetOutputDirection(ctvol_itk.GetDirection())
resample.SetOutputOrigin(ctvol_itk.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(ctvol_itk.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
resampled_ctvol = resample.Execute(ctvol_itk)
return sitk.GetArrayFromImage(resampled_ctvol)
def create_dataset(splits_todo, is_competitor, saving_dir):
split_filepath = "path/to/splits.json"
with open(split_filepath) as f:
folder_splits = json.load(f)
# for split in ['train', 'syntetic', 'val']:
if is_competitor:
base = '/path/to/data/SPARSE'
else:
base = "/path/to/data/DENSE"
for split in splits_todo:
dirs = [os.path.join(base, p) for p in
folder_splits[split]]
dataset = {'data': [], 'gt': []}
for i, dir in tqdm(enumerate(dirs), total=len(dirs), desc=f"processing {split}"):
gt_dir = os.path.join(dir, 'syntetic.npy') if is_competitor else os.path.join(dir, 'gt_alpha.npy')
data_dir = os.path.join(dir, 'data.npy')
image = np.load(data_dir)
gt_orig = np.load(gt_dir)
# rescale
image = resample(image, is_label=False)
gt = resample(gt_orig, is_label=True)
# DICOM_MAX = 3095 if is_competitor else 2100
DICOM_MAX = 2100
DICOM_MIN = 0
image = np.clip(image, DICOM_MIN, DICOM_MAX)
image = (image.astype(float) + DICOM_MIN) / (DICOM_MAX + DICOM_MIN) # [0-1] with shifting
if split not in ["test", "val"]:
s = tio.Subject(
data=tio.ScalarImage(tensor=image[None]),
label=tio.LabelMap(tensor=gt[None]),
)
grid_sampler = tio.inference.GridSampler(
s,
patch_size=(32, 32, 32),
patch_overlap=(10, 10, 10),
)
patch_loader = torch.utils.data.DataLoader(grid_sampler, batch_size=1)
for a in patch_loader:
image = a['data'][tio.DATA].squeeze().numpy()
gt = a['label'][tio.DATA].squeeze().numpy()
if np.sum(gt) != 0:
dataset['data'].append(image)
dataset['gt'].append(gt)
else: # do not cut volumes for testing - we do this at runtime
dataset['data'].append(image)
dataset['gt'].append(gt_orig)
log_dir = pathlib.Path(os.path.join(saving_dir, split))
log_dir.mkdir(parents=True, exist_ok=True)
for partition in ['data', 'gt']:
a = np.empty(len(dataset[partition]), dtype=object)
for i in range(len(dataset[partition])):
a[i] = dataset[partition][i]
np.save(os.path.join(saving_dir, split, f'{partition}.npy'), a)
print(f"split {split} completed. created {len(dataset['data'])} subvolumes")
def create_syntetic():
data_dir = "path/toyour/SPARSE/npy_files"
for folder in os.listdir(data_dir):
print(f"processing {folder}")
gt = np.load(os.path.join(data_dir, folder, "gt_sparse.npy"))
example = np.zeros_like(gt)
points = np.argwhere(gt == 1)
splits = [
points[points[:, -1] < gt.shape[-1] // 2],
points[points[:, -1] > gt.shape[-1] // 2]
]
for jj in range(2):
points = splits[jj]
points = points[np.lexsort((points[:, 2], points[:, 0], points[:, 1]))]
for i in range(points.shape[0] - 2):
# axis and radius
p0 = np.array(points[i])
p1 = np.array(points[i + 1])
R = 1.6
# vector in direction of axis
v = p1 - p0
# find magnitude of vector
mag = norm(v)
# unit vector in direction of axis
v = v / mag
# make some vector not in the same direction as v
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
# make vector perpendicular to v
n1 = np.cross(v, not_v)
# normalize n1
n1 /= norm(n1)
# make unit vector perpendicular to v and n1
n2 = np.cross(v, n1)
# surface ranges over t from 0 to length of axis and 0 to 2*pi
t = np.linspace(0, mag, 100)
theta = np.linspace(0, 2 * np.pi, 100)
# use meshgrid to make 2d arrays
t, theta = np.meshgrid(t, theta)
# generate coordinates for surface
Z, Y, X = [p0[i] + v[i] * t + R * np.sin(theta) * n1[i] + R * np.cos(theta) * n2[i] for i in [0, 1, 2]]
example[(Z+4).astype(int), Y.astype(int), X.astype(int)] = 1
example = binary_fill_holes(example).astype(int)
np.save(os.path.join(data_dir, folder, 'syntetic.npy'), example)
if __name__ == '__main__':
# generate cicle expanded dataset - set your paths!
create_syntetic()
print("syntetic dataset has been created!")
# generate training and syntetic datasets (32x32x32) and the test set (resampling to 0.3 voxel space)
create_dataset(['train', 'syntetic', 'val', 'test'], is_competitor=True, saving_dir="saving_dir/sparse")
create_dataset(['train', 'val', 'test'], is_competitor=False, saving_dir="saving_dir/dense")
print("subvolumes for training have been created!")
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | main.py | .py | 7,912 | 193 | import argparse
import os
import pathlib
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
import utils
from eval import Eval as Evaluator
from losses import LossFn
from test import test
import sys
import numpy as np
from os import path
import socket
import random
from torch.backends import cudnn
import torch
import logging
from train import train
from torch import nn
from dataset import Loader3D
from model import Competitor
def save_weights(epoch, model, optim, score, path):
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optim.state_dict(),
'metric': score
}
torch.save(state, path)
def main(experiment_name, args):
assert torch.cuda.is_available()
logging.info(f"This model will run on {torch.cuda.get_device_name(torch.cuda.current_device())}")
## DETERMINISTIC SET-UP
seed = config.get('seed', 47)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# END OF DETERMINISTIC SET-UP
loader_config = config.get('data-loader', None)
train_config = config.get('trainer', None)
model = Competitor(n_classes=1)
logging.info('using data parallel')
model = nn.DataParallel(model).cuda()
train_params = model.parameters()
optim_config = config.get('optimizer')
optim_name = optim_config.get('name', None)
if not optim_name or optim_name == 'Adam':
optimizer = torch.optim.Adam(params=train_params, lr=optim_config['learning_rate'])
elif optim_name == 'SGD':
optimizer = torch.optim.SGD(params=train_params, lr=optim_config['learning_rate'])
else:
raise Exception("optimizer not recognized")
sched_config = config.get('lr_scheduler')
scheduler_name = sched_config.get('name', None)
if scheduler_name == 'MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=sched_config['milestones'],
gamma=sched_config.get('factor', 0.1),
)
elif scheduler_name == 'Plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', verbose=True, patience=7)
else:
scheduler = None
evaluator = Evaluator(loader_config, project_dir, skip_dump=args.skip_dump)
loss = LossFn(config.get('loss'), loader_config, weights=None) # TODO: fix this, weights are disabled now
start_epoch = 0
if train_config['checkpoint_path'] is not None:
try:
checkpoint = torch.load(train_config['checkpoint_path'])
model.load_state_dict(checkpoint['state_dict'])
start_epoch = checkpoint['epoch'] + 1
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info(f"Checkpoint loaded successfully at epoch {start_epoch}, score:{checkpoint.get('metric', 'unavailable')})")
except OSError as e:
logging.info("No checkpoint exists from '{}'. Skipping...".format(train_config['checkpoint_path']))
# DATA LOADING
data_utils = Loader3D(loader_config, train_config.get("do_train", True), args.additional_dataset, args.competitor, args.skip_primary)
train_d, test_d, val_d = data_utils.split_dataset()
test_loader = [(test_p, data.DataLoader(test_p, loader_config['batch_size'], num_workers=loader_config['num_workers'])) for test_p in test_d]
val_loader = [(val_p, data.DataLoader(val_p, loader_config['batch_size'], num_workers=loader_config['num_workers'])) for val_p in val_d]
if train_config['do_train']:
train_loader = data.DataLoader(train_d, loader_config['batch_size'], num_workers=loader_config['num_workers'])
# creating training writer (purge on)
writer = SummaryWriter(log_dir=os.path.join(config['tb_dir'], experiment_name), purge_step=start_epoch)
best_val = 0
best_test = 0
for epoch in range(start_epoch, train_config['epochs']):
train(model, train_loader, loss, optimizer, epoch, writer, evaluator, phase="Train")
val_model = model.module
val_iou, val_dice, val_haus = test(val_model, val_loader, epoch, writer, evaluator, phase="Validation")
if val_iou < 1e-05 and epoch > 15:
logging.info('WARNING: drop in performances detected.')
if scheduler is not None:
if optim_name == 'SGD' and scheduler_name == 'Plateau':
scheduler.step(val_iou)
else:
scheduler.step(epoch)
save_weights(epoch, model, optimizer, val_iou, os.path.join(project_dir, 'checkpoints', 'last.pth'))
if val_iou > best_val:
best_val = val_iou
save_weights(epoch, model, optimizer, best_val, os.path.join(project_dir, 'best.pth'))
if epoch % 5 == 0 and epoch != 0:
test_iou, _, _ = test(val_model, test_loader, epoch, writer, evaluator, phase="Test")
best_test = best_test if best_test > test_iou else test_iou
logging.info('BEST TEST METRIC IS {}'.format(best_test))
val_model = model.module
final_iou, final_dice, _ = test(val_model, test_loader, epoch="Final", writer=None, evaluator=evaluator, phase="Final")
if __name__ == '__main__':
RESULTS_DIR = r'/localpath/results'
BASE_YAML_PATH = os.path.join('configs', 'config.yaml')
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--base_config', default="config.yaml", help='path to the yaml config file')
arg_parser.add_argument('--verbose', action='store_true', help="if true sdout is not redirected, default: false")
arg_parser.add_argument('--skip_dump', action='store_true', help="dump test data, default: false")
arg_parser.add_argument('--test', action='store_true', help="set up test params, default: false")
arg_parser.add_argument('--competitor', action='store_true', help='competitor trains on sparse, default: false')
arg_parser.add_argument('--additional_dataset', action='store_true', help='add also the syntetic dataset, default: false')
arg_parser.add_argument('--reload', action='store_true', help='reload experiment?, default: false')
arg_parser.add_argument('--skip_primary', action='store_true', help='do not load primary train data, default: false')
args = arg_parser.parse_args()
yaml_path = args.base_config
if path.exists(yaml_path):
print(f"loading config file in {yaml_path}")
config = utils.load_config_yaml(yaml_path)
experiment_name = config.get('title')
project_dir = os.path.join(RESULTS_DIR, experiment_name)
else:
config = utils.load_config_yaml(BASE_YAML_PATH) # load base config (remote or local)
experiment_name = config.get('title', 'test')
print('this experiment is on debug. no folders are going to be created.')
project_dir = os.path.join(RESULTS_DIR, 'test')
log_dir = pathlib.Path(os.path.join(project_dir, 'logs'))
log_dir.mkdir(parents=True, exist_ok=True)
if not args.verbose:
# redirect streams to project dir
sys.stdout = open(os.path.join(log_dir, 'std.log'), 'a+')
sys.stderr = sys.stdout
utils.set_logger(os.path.join(log_dir, 'logging.log'))
else:
# not create folder here, just log to console
utils.set_logger()
if args.test:
config['trainer']['do_train'] = False
config['data-loader']['num_workers'] = 0
config['trainer']['checkpoint_path'] = os.path.join(project_dir, 'checkpoints', 'last.pth')
if args.reload:
logging.info("RELOAD! setting checkpoint path to last.pth")
config['trainer']['checkpoint_path'] = os.path.join(project_dir, 'checkpoints', 'last.pth')
main(experiment_name, args)
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/__init__.py | .py | 0 | 0 | null | Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/smoother.py | .py | 2,171 | 62 | import numpy as np
from scipy import spatial as sp_spatial
from hull.voxelize.voxelize import voxelize
from visualize_results import MultiView
from matplotlib import pyplot as plt
from scipy.ndimage import binary_fill_holes
from scipy.ndimage.morphology import binary_erosion
def delaunay(volume):
coords = np.argwhere(volume == 1)
min_z, min_y, min_x = coords[:, 0].min(), coords[:, 1].min(), coords[:, 2].min()
max_z, max_y, max_x = coords[:, 0].max(), coords[:, 1].max(), coords[:, 2].max()
kernel_size = 22
stride = 18
th = 9000
smooth_vol = np.zeros_like(volume)
z_start = min_z
while z_start < max_z:
y_start = min_y
while y_start < max_y:
x_start = min_x
while x_start < max_x:
v = coords[
(coords[:, 1] > y_start) & (coords[:, 1] < y_start + kernel_size) &
(coords[:, 0] > z_start) & (coords[:, 0] < z_start + kernel_size) &
(coords[:, 2] > x_start) & (coords[:, 2] < x_start + kernel_size)
]
# meshing is executed if we have at least 3 points
if v.size < 9:
# if v.size > 0:
# smooth_vol[v[:, 0], v[:, 1], v[:, 2]] = 1
x_start += stride
continue
if v[:, 0].max() == v[:, 0].min() or v[:, 1].max() == v[:, 1].min() or v[:, 2].max() == v[:, 2].min():
x_start += stride
continue
hull = sp_spatial.ConvexHull(v, incremental=True).simplices
# mlab.triangular_mesh(v[:, 2], v[:, 1], v[:, 0], hull, color=(0, 1, 0))
# filtering biggest tringles
# tri = [v for v in v[hull] if abs(np.linalg.det(v))/2 < th]
# tri = np.stack(tri)
tri = v[hull]
# voxellization
if tri.size > 0:
for z, y, x in voxelize(tri):
smooth_vol[z, y, x] = 1
x_start += stride
y_start += stride
z_start += stride
return smooth_vol | Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/__init__.py | .py | 51 | 3 | __author__ = 'Peter Hofmann'
__version__ = '0.0.5'
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/voxelize.py | .py | 7,000 | 185 | import argparse
import sys
import math
import numpy as np
from tqdm import tqdm
from .common.progressbar import print_progress_bar
from .voxelintersect.triangle import Triangle, t_c_intersection, INSIDE, vertexes_to_c_triangle, triangle_lib
from .mesh import get_scale_and_shift, scale_and_shift_triangle
class BoundaryBox(object):
"""
@type minimum: list[int]
@type maximum: list[int]
"""
minimum = None
maximum = None
def get_center(self):
assert self.minimum, "BoundaryBox not initialized"
return [
int((self.maximum[0] + self.minimum[0]) / 2.0),
int((self.maximum[1] + self.minimum[1]) / 2.0),
int((self.maximum[2] + self.minimum[2]) / 2.0)
]
def from_triangle(self, triangle):
"""
@type triangle: Triangle
"""
self.minimum[0] = math.floor(triangle.min(0))
self.minimum[1] = math.floor(triangle.min(1))
self.minimum[2] = math.floor(triangle.min(2))
self.maximum[0] = math.ceil(triangle.max(0))
self.maximum[1] = math.ceil(triangle.max(1))
self.maximum[2] = math.ceil(triangle.max(2))
def from_vertexes(self, vertex_1, vertex_2, vertex_3):
"""
@type vertex_1: (float, float, float)
@type vertex_2: (float, float, float)
@type vertex_3: (float, float, float)
"""
if self.minimum is None:
self.minimum = [0, 0, 0]
self.maximum = [0, 0, 0]
self.minimum[0] = math.floor(min([vertex_1[0], vertex_2[0], vertex_3[0]]))
self.minimum[1] = math.floor(min([vertex_1[1], vertex_2[1], vertex_3[1]]))
self.minimum[2] = math.floor(min([vertex_1[2], vertex_2[2], vertex_3[2]]))
self.maximum[0] = math.ceil(max([vertex_1[0], vertex_2[0], vertex_3[0]]))
self.maximum[1] = math.ceil(max([vertex_1[1], vertex_2[1], vertex_3[1]]))
self.maximum[2] = math.ceil(max([vertex_1[2], vertex_2[2], vertex_3[2]]))
else:
self.minimum[0] = math.floor(min([vertex_1[0], vertex_2[0], vertex_3[0], self.minimum[0]]))
self.minimum[1] = math.floor(min([vertex_1[1], vertex_2[1], vertex_3[1], self.minimum[1]]))
self.minimum[2] = math.floor(min([vertex_1[2], vertex_2[2], vertex_3[2], self.minimum[2]]))
self.maximum[0] = math.ceil(max([vertex_1[0], vertex_2[0], vertex_3[0], self.maximum[0]]))
self.maximum[1] = math.ceil(max([vertex_1[1], vertex_2[1], vertex_3[1], self.maximum[1]]))
self.maximum[2] = math.ceil(max([vertex_1[2], vertex_2[2], vertex_3[2], self.maximum[2]]))
n_range = {-1, 0, 1}
def get_intersecting_voxels_depth_first(vertex_1, vertex_2, vertex_3):
"""
@type vertex_1: numpy.ndarray
@type vertex_2: numpy.ndarray
@type vertex_3: numpy.ndarray
@rtype: list[(int, int, int)]
"""
c_lib = triangle_lib
result_positions = []
tmp_triangle = None
searched = set()
stack = set()
seed = (int(vertex_1[0]), int(vertex_1[1]), int(vertex_1[2]))
for x in n_range:
for y in n_range:
for z in n_range:
neighbour = (seed[0] + x, seed[1] + y, seed[2] + z)
if neighbour not in searched:
stack.add(neighbour)
tmp = np.array([0.0, 0.0, 0.0])
tmp_vertex_1 = np.array([0.0, 0.0, 0.0])
tmp_vertex_2 = np.array([0.0, 0.0, 0.0])
tmp_vertex_3 = np.array([0.0, 0.0, 0.0])
if not c_lib:
tmp_triangle = Triangle()
tmp_triangle.set(tmp_vertex_1, tmp_vertex_2, tmp_vertex_3)
while len(stack) > 0:
position = stack.pop()
searched.add(position)
tmp[0] = 0.5 + position[0]
tmp[1] = 0.5 + position[1]
tmp[2] = 0.5 + position[2]
# move raster to origin, test assumed triangle in relation to origin
np.subtract(vertex_1, tmp, tmp_vertex_1)
np.subtract(vertex_2, tmp, tmp_vertex_2)
np.subtract(vertex_3, tmp, tmp_vertex_3)
try:
if c_lib:
is_inside = c_lib.t_c_intersection(
vertexes_to_c_triangle(tmp_vertex_1, tmp_vertex_2, tmp_vertex_3)) == INSIDE
else:
is_inside = t_c_intersection(tmp_triangle) == INSIDE
except Exception:
c_lib = None
tmp_triangle = Triangle()
tmp_triangle.set(tmp_vertex_1, tmp_vertex_2, tmp_vertex_3)
is_inside = t_c_intersection(tmp_triangle) == INSIDE
if is_inside:
result_positions.append(position)
neighbours = set()
if tmp_vertex_2[0] < 0:
neighbours.add((position[0] - 1, position[1], position[2]))
if tmp_vertex_3[0] > 0:
neighbours.add((position[0] + 1, position[1], position[2]))
else:
neighbours.add((position[0] + 1, position[1], position[2]))
if tmp_vertex_3[0] < 0:
neighbours.add((position[0] - 1, position[1], position[2]))
if tmp_vertex_2[1] < 0:
neighbours.add((position[0], position[1] - 1, position[2]))
if tmp_vertex_3[1] > 0:
neighbours.add((position[0], position[1] + 1, position[2]))
else:
neighbours.add((position[0], position[1] + 1, position[2]))
if tmp_vertex_3[1] < 0:
neighbours.add((position[0], position[1] - 1, position[2]))
if tmp_vertex_2[2] < 0:
neighbours.add((position[0], position[1], position[2] - 1))
if tmp_vertex_3[2] > 0:
neighbours.add((position[0], position[1], position[2] + 1))
else:
neighbours.add((position[0], position[1], position[2] + 1))
if tmp_vertex_3[2] < 0:
neighbours.add((position[0], position[1], position[2] - 1))
for neighbour in neighbours:
if neighbour not in searched:
stack.add(neighbour)
del searched, stack
return result_positions
def voxelize(list_of_triangles):
voxels = set()
bounding_box = BoundaryBox()
for (vertex_1, vertex_2, vertex_3) in list_of_triangles:
bounding_box.from_vertexes(vertex_1, vertex_2, vertex_3)
voxels.update(get_intersecting_voxels_depth_first(vertex_1, vertex_2, vertex_3))
center = bounding_box.get_center()
while len(voxels) > 0:
(x, y, z) = voxels.pop()
yield x, y, z
# yield x-center[0], y-center[1], z-center[2]
if __name__ == '__main__':
# parse cli args
parser = argparse.ArgumentParser(description='stl/obj file to voxels converter')
parser.add_argument('input')
parser.add_argument('resolution', type=int)
args = parser.parse_args()
for pos_x, pos_y, pos_z in voxelize(args.input, args.resolution):
sys.stdout.write("{}\t{}\t{}\n".format(pos_x, pos_y, pos_z))
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/mesh.py | .py | 1,333 | 45 | import numpy as np
# functions are loosly based
def get_scale_and_shift(mesh, resolution):
"""
@type mesh: list[((float, float, float), (float, float, float), (float, float, float))]
@type resolution: int
@rtype: (float, list[float], int)
"""
triangle_count = 0
mins = list(mesh[0][0])
maxs = list(mesh[0][0])
for triangle in mesh:
triangle_count += 1
for index, point in enumerate(triangle):
if point[index] < mins[index]:
mins[index] = point[index]
if point[index] > maxs[index]:
maxs[index] = point[index]
shift = [-minimum for minimum in mins]
scale = float(resolution - 1) / (max(maxs[0] - mins[0], maxs[1] - mins[1], maxs[2] - mins[2]))
return scale, shift, triangle_count
def scale_and_shift_triangle(triangle, scale, shift):
"""
@type triangle: ((float, float, float), (float, float, float), (float, float, float))
@type scale: float
@type shift: list[float
@rtype: list[(float, float, float)] | None
"""
shifted_triangle = []
for point in triangle:
new_point = np.array([.0, .0, .0])
for i in range(3):
new_point[i] = (point[i] + shift[i]) * scale
shifted_triangle.append(new_point)
del triangle
return shifted_triangle
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/voxelintersect/__init__.py | .py | 29 | 2 | __author__ = 'Peter Hofmann'
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/voxelintersect/triangle.py | .py | 14,805 | 450 | import sys
import os
import numpy as np
from ctypes import cdll, Structure, c_float
class Point3(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
("z", c_float)
]
class Triangle3(Structure):
_fields_ = [
("v1", Point3),
("v2", Point3),
("v3", Point3)
]
triangle_lib = None
script_dir = os.path.dirname(os.path.realpath(__file__))
try:
if sys.platform.startswith('linux') and sys.maxsize == 9223372036854775807:
file_path_library = os.path.join(script_dir, 'triangleCube_linux64.so')
if os.path.exists(file_path_library):
triangle_lib = cdll.LoadLibrary(file_path_library)
elif sys.platform.startswith("win") and sys.maxsize == 2147483647:
file_path_library = os.path.join(script_dir, 'triangleCube_win32.so')
if os.path.exists(file_path_library):
triangle_lib = cdll.LoadLibrary(file_path_library)
except OSError:
triangle_lib = None
"""
Code conversion into python from:
'https://github.com/erich666/GraphicsGems/blob/master/gemsiii/triangleCube.c'
"""
INSIDE = 0
OUTSIDE = 1
EPS = 1e-5
# EPS = 0.0
# print(EPS)
def cross_product(a, b):
return (
a[1] * b[2] - a[2] * b[1],
-a[0] * b[2] + a[2] * b[0],
a[0] * b[1] - a[1] * b[0])
def sign3(point):
sign_code = 0
if point[0] < EPS:
sign_code |= 4
if point[0] > -EPS:
sign_code |= 32
if point[1] < EPS:
sign_code |= 2
if point[1] > -EPS:
sign_code |= 16
if point[2] < EPS:
sign_code |= 1
if point[2] > -EPS:
sign_code |= 8
return sign_code
def lerp(alpha, a, b):
return a + alpha * (b - a)
class Triangle(object):
"""
@type v1: numpy.ndarray
@type v2: numpy.ndarray
@type v3: numpy.ndarray
"""
def __init__(self):
"""
"""
self.v1 = 0
self.v2 = 0
self.v3 = 0
def set(self, vertex_1, vertex_2, vertex_3):
"""
@type vertex_1: numpy.ndarray
@type vertex_2: numpy.ndarray
@type vertex_3: numpy.ndarray
"""
self.v1 = vertex_1
self.v2 = vertex_2
self.v3 = vertex_3
def min(self, index):
if self.v1[index] < self.v2[index] and self.v1[index] < self.v3[index]:
return self.v1[index]
elif self.v2[index] < self.v3[index]:
return self.v2[index]
else:
return self.v3[index]
def max(self, index):
if self.v1[index] > self.v2[index] and self.v1[index] > self.v3[index]:
return self.v1[index]
elif self.v2[index] > self.v3[index]:
return self.v2[index]
else:
return self.v3[index]
def vertexes_to_c_triangle(vertex_1, vertex_2, vertex_3):
return Triangle3(
Point3(vertex_1[0], vertex_1[1], vertex_1[2]),
Point3(vertex_2[0], vertex_2[1], vertex_2[2]),
Point3(vertex_3[0], vertex_3[1], vertex_3[2])
)
def face_plane(point):
"""
Which of the six face-plane(s) is point P outside of?
@type point: numpy.ndarray | (float, float, float)
"""
face_plane_code = 0
if point[0] >= .5:
face_plane_code |= 0x01
if point[0] < -.5:
face_plane_code |= 0x02
if point[1] >= .5:
face_plane_code |= 0x04
if point[1] < -.5:
face_plane_code |= 0x08
if point[2] >= .5:
face_plane_code |= 0x10
if point[2] < -.5:
face_plane_code |= 0x20
return face_plane_code
def bevel_2d(point):
"""
Which of the twelve edge plane(s) is point P outside of?
"""
edge_plane_code = 0
if point[0] + point[1] >= 1.0:
edge_plane_code |= 0x001
if point[0] - point[1] >= 1.0:
edge_plane_code |= 0x002
if -point[0] + point[1] > 1.0:
edge_plane_code |= 0x004
if -point[0] - point[1] > 1.0:
edge_plane_code |= 0x008
if point[0] + point[2] >= 1.0:
edge_plane_code |= 0x010
if point[0] - point[2] >= 1.0:
edge_plane_code |= 0x020
if -point[0] + point[2] > 1.0:
edge_plane_code |= 0x040
if -point[0] - point[2] > 1.0:
edge_plane_code |= 0x080
if point[1] + point[2] >= 1.0:
edge_plane_code |= 0x100
if point[1] - point[2] >= 1.0:
edge_plane_code |= 0x200
if -point[1] + point[2] > 1.0:
edge_plane_code |= 0x400
if -point[1] - point[2] > 1.0:
edge_plane_code |= 0x800
return edge_plane_code
def bevel_3d(point):
"""
Which of the eight corner plane(s) is point P outside of?
"""
corner_plane_code = 0
if (point[0] + point[1] + point[2]) >= 1.5:
corner_plane_code |= 0x01
if (point[0] + point[1] - point[2]) >= 1.5:
corner_plane_code |= 0x02
if (point[0] - point[1] + point[2]) >= 1.5:
corner_plane_code |= 0x04
if (point[0] - point[1] - point[2]) >= 1.5:
corner_plane_code |= 0x08
if (-point[0] + point[1] + point[2]) > 1.5:
corner_plane_code |= 0x10
if (-point[0] + point[1] - point[2]) > 1.5:
corner_plane_code |= 0x20
if (-point[0] - point[1] + point[2]) > 1.5:
corner_plane_code |= 0x40
if (-point[0] - point[1] - point[2]) > 1.5:
corner_plane_code |= 0x80
return corner_plane_code
def check_point(point_a, point_b, alpha, mask):
"""
Test the point "alpha" of the way from P1 to P2
See if it is on a face of the cube
Consider only faces in "mask"
"""
plane_point_x = lerp(alpha, point_a[0], point_b[0])
plane_point_y = lerp(alpha, point_a[1], point_b[1])
plane_point_z = lerp(alpha, point_a[2], point_b[2])
plane_point = (plane_point_x, plane_point_y, plane_point_z)
return face_plane(plane_point) & mask
def check_line(point_a, point_b, outcode_diff):
"""
/* Compute intersection of P1 --> P2 line segment with face planes */
/* Then test intersection point to see if it is on cube face */
/* Consider only face planes in "outcode_diff" */
/* Note: Zero bits in "outcode_diff" means face line is outside of */
"""
if (0x01 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[0])/(point_b[0] - point_a[0]), 0x3e) == INSIDE:
return INSIDE
if (0x02 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[0])/(point_b[0] - point_a[0]), 0x3d) == INSIDE:
return INSIDE
if (0x04 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[1])/(point_b[1] - point_a[1]), 0x3b) == INSIDE:
return INSIDE
if (0x08 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[1])/(point_b[1] - point_a[1]), 0x37) == INSIDE:
return INSIDE
if (0x10 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[2])/(point_b[2] - point_a[2]), 0x2f) == INSIDE:
return INSIDE
if (0x20 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[2])/(point_b[2] - point_a[2]), 0x1f) == INSIDE:
return INSIDE
return OUTSIDE
def point_triangle_intersection(p, t):
"""
Test if 3D point is inside 3D triangle
@type p: list[float]
@type t: Triangle
"""
# /* First, a quick bounding-box test: */
# /* If P is outside triangle bbox, there cannot be an intersection. */
# add/sub EPS as buffer to avoid an floating point issue
if p[0] > t.max(0) + EPS:
return OUTSIDE
if p[1] > t.max(1) + EPS:
return OUTSIDE
if p[2] > t.max(2) + EPS:
return OUTSIDE
if p[0] < t.min(0) - EPS:
return OUTSIDE
if p[1] < t.min(1) - EPS:
return OUTSIDE
if p[2] < t.min(2) - EPS:
return OUTSIDE
# /* For each triangle side, make a vector out of it by subtracting vertexes; */
# /* make another vector from one vertex to point P. */
# /* The crossproduct of these two vectors is orthogonal to both and the */
# /* signs of its X,Y,Z components indicate whether P was to the inside or */
# /* to the outside of this triangle side. */
vect12 = np.subtract(t.v1, t.v2)
vect1h = np.subtract(t.v1, p)
cross12_1p = cross_product(vect12, vect1h)
sign12 = sign3(cross12_1p) # /* Extract X,Y,Z signs as 0..7 or 0...63 integer */
vect23 = np.subtract(t.v2, t.v3)
vect2h = np.subtract(t.v2, p)
cross23_2p = cross_product(vect23, vect2h)
sign23 = sign3(cross23_2p)
vect31 = np.subtract(t.v3, t.v1)
vect3h = np.subtract(t.v3, p)
cross31_3p = cross_product(vect31, vect3h)
sign31 = sign3(cross31_3p)
# /* If all three cross product vectors agree in their component signs, */
# /* then the point must be inside all three. */
# /* P cannot be OUTSIDE all three sides simultaneously. */
if (sign12 & sign23 & sign31) == 0:
return OUTSIDE
return INSIDE
def t_c_intersection(triangle):
"""
/**********************************************/
/* This is the main algorithm procedure. */
/* Triangle t is compared with a unit cube, */
/* centered on the origin. */
/* It returns INSIDE (0) or OUTSIDE(1) if t */
/* intersects or does not intersect the cube. */
/**********************************************/
@type triangle: Triangle
"""
# long v1_test,v2_test,v3_test;
# float d,denom;
# Point3 vect12,vect13,norm;
# Point3 hitpp,hitpn,hitnp,hitnn;
# /* First compare all three vertexes with all six face-planes */
# /* If any vertex is inside the cube, return immediately! */
v1_test = face_plane(triangle.v1)
v2_test = face_plane(triangle.v2)
v3_test = face_plane(triangle.v3)
if v1_test == INSIDE:
return INSIDE
if v2_test == INSIDE:
return INSIDE
if v3_test == INSIDE:
return INSIDE
# /* If all three vertexes were outside of one or more face-planes, */
# /* return immediately with a trivial rejection! */
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* Now do the same trivial rejection test for the 12 edge planes */
v1_test |= bevel_2d(triangle.v1) << 8
v2_test |= bevel_2d(triangle.v2) << 8
v3_test |= bevel_2d(triangle.v3) << 8
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* Now do the same trivial rejection test for the 8 corner planes */
v1_test |= bevel_3d(triangle.v1) << 24
v2_test |= bevel_3d(triangle.v2) << 24
v3_test |= bevel_3d(triangle.v3) << 24
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* If vertex 1 and 2, as a pair, cannot be trivially rejected */
# /* by the above tests, then see if the v1-->v2 triangle edge */
# /* intersects the cube. Do the same for v1-->v3 and v2-->v3. */
# /* Pass to the intersection algorithm the "OR" of the outcode */
# /* bits, so that only those cube faces which are spanned by */
# /* each triangle edge need be tested. */
if (v1_test & v2_test) == 0:
if check_line(triangle.v1, triangle.v2, v1_test | v2_test) == INSIDE:
return INSIDE
if (v1_test & v3_test) == 0:
if check_line(triangle.v1, triangle.v3, v1_test | v3_test) == INSIDE:
return INSIDE
if (v2_test & v3_test) == 0:
if check_line(triangle.v2, triangle.v3, v2_test | v3_test) == INSIDE:
return INSIDE
# /* By now, we know that the triangle is not off to any side, */
# /* and that its sides do not penetrate the cube. We must now */
# /* test for the cube intersecting the interior of the triangle. */
# /* We do this by looking for intersections between the cube */
# /* diagonals and the triangle...first finding the intersection */
# /* of the four diagonals with the plane of the triangle, and */
# /* then if that intersection is inside the cube, pursuing */
# /* whether the intersection point is inside the triangle itself. */
# /* To find plane of the triangle, first perform crossproduct on */
# /* two triangle side vectors to compute the normal vector. */
vect12 = np.subtract(triangle.v1, triangle.v2)
vect13 = np.subtract(triangle.v1, triangle.v3)
norm = cross_product(vect12, vect13)
# /* The normal vector "norm" X,Y,Z components are the coefficients */
# /* of the triangles AX + BY + CZ + D = 0 plane equation. If we */
# /* solve the plane equation for X=Y=Z (a diagonal), we get */
# /* -D/(A+B+C) as a metric of the distance from cube center to the */
# /* diagonal/plane intersection. If this is between -0.5 and 0.5, */
# /* the intersection is inside the cube. If so, we continue by */
# /* doing a point/triangle intersection. */
# /* Do this for all four diagonals. */
d = norm[0] * triangle.v1[0] + norm[1] * triangle.v1[1] + norm[2] * triangle.v1[2]
# /* if one of the diagonals is parallel to the plane, the other will intersect the plane */
denom = norm[0] + norm[1] + norm[2]
hitpp = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
# /* skip parallel diagonals to the plane; division by 0 can occur */
hitpp[0] = hitpp[1] = hitpp[2] = d / denom
if abs(hitpp[0]) <= 0.5:
if point_triangle_intersection(hitpp, triangle) == INSIDE:
return INSIDE
denom = norm[0] + norm[1] - norm[2]
hitpn = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
hitpn[0] = hitpn[1] = d / denom
hitpn[2] = -hitpn[0]
if abs(hitpn[0]) <= 0.5:
if point_triangle_intersection(hitpn, triangle) == INSIDE:
return INSIDE
denom = norm[0] - norm[1] + norm[2]
hitnp = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
hitnp[0] = hitnp[2] = d / denom
hitnp[1] = -hitnp[0]
if abs(hitnp[0]) <= 0.5:
if point_triangle_intersection(hitnp, triangle) == INSIDE:
return INSIDE
denom = norm[0] - norm[1] - norm[2]
hitnn = [0.0, 0.0, 0.0]
if abs(denom) > EPS:
hitnn[0] = d / denom
hitnn[1] = hitnn[2] = -hitnn[0]
if abs(hitnn[0]) <= 0.5:
if point_triangle_intersection(hitnn, triangle) == INSIDE:
return INSIDE
# /* No edge touched the cube; no cube diagonal touched the triangle. */
# /* We're done...there was no intersection. */
return OUTSIDE
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/voxelintersect/triangleCube.c | .c | 12,166 | 325 | /* Original file:
* https://github.com/erich666/GraphicsGems/blob/master/gemsiii/triangleCube.c
* Some optimisations for use in voxelisation have been made
*/
#include <math.h>
/* this version of SIGN3 shows some numerical instability, and is improved
* by using the uncommented macro that follows, and a different test with it */
#ifdef OLD_TEST
#define SIGN3( A ) (((A).x<0)?4:0 | ((A).y<0)?2:0 | ((A).z<0)?1:0)
#else
#define EPS 1e-5
#define SIGN3( A ) \
(((A).x < EPS) ? 4 : 0 | ((A).x > -EPS) ? 32 : 0 | \
((A).y < EPS) ? 2 : 0 | ((A).y > -EPS) ? 16 : 0 | \
((A).z < EPS) ? 1 : 0 | ((A).z > -EPS) ? 8 : 0)
#endif
#define CROSS( A, B, C ) { \
(C).x = (A).y * (B).z - (A).z * (B).y; \
(C).y = -(A).x * (B).z + (A).z * (B).x; \
(C).z = (A).x * (B).y - (A).y * (B).x; \
}
#define SUB( A, B, C ) { \
(C).x = (A).x - (B).x; \
(C).y = (A).y - (B).y; \
(C).z = (A).z - (B).z; \
}
#define LERP( A, B, C) ((B)+(A)*((C)-(B)))
#define MIN3(a,b,c) ((((a)<(b))&&((a)<(c))) ? (a) : (((b)<(c)) ? (b) : (c)))
#define MAX3(a,b,c) ((((a)>(b))&&((a)>(c))) ? (a) : (((b)>(c)) ? (b) : (c)))
#define INSIDE 0
#define OUTSIDE 1
typedef struct {
float x;
float y;
float z;
} Point3;
typedef struct{
Point3 v1; /* Vertex1 */
Point3 v2; /* Vertex2 */
Point3 v3; /* Vertex3 */
} Triangle3;
/*___________________________________________________________________________*/
/* Which of the six face-plane(s) is point P outside of? */
long face_plane(Point3 p)
{
long outcode;
outcode = 0;
if (p.x >= .5) outcode |= 0x01; // > .5
if (p.x < -.5) outcode |= 0x02;
if (p.y >= .5) outcode |= 0x04; // > .5
if (p.y < -.5) outcode |= 0x08;
if (p.z >= .5) outcode |= 0x10; // > .5
if (p.z < -.5) outcode |= 0x20;
return(outcode);
}
/*. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . */
/* Which of the twelve edge plane(s) is point P outside of? */
long bevel_2d(Point3 p)
{
long outcode;
outcode = 0;
if ( p.x + p.y >= 1.0) outcode |= 0x001; // > 1.0
if ( p.x - p.y >= 1.0) outcode |= 0x002; // > 1.0
if (-p.x + p.y > 1.0) outcode |= 0x004;
if (-p.x - p.y > 1.0) outcode |= 0x008;
if ( p.x + p.z >= 1.0) outcode |= 0x010; // > 1.0
if ( p.x - p.z >= 1.0) outcode |= 0x020; // > 1.0
if (-p.x + p.z > 1.0) outcode |= 0x040;
if (-p.x - p.z > 1.0) outcode |= 0x080;
if ( p.y + p.z >= 1.0) outcode |= 0x100; // > 1.0
if ( p.y - p.z >= 1.0) outcode |= 0x200; // > 1.0
if (-p.y + p.z > 1.0) outcode |= 0x400;
if (-p.y - p.z > 1.0) outcode |= 0x800;
return(outcode);
}
/*. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . */
/* Which of the eight corner plane(s) is point P outside of? */
long bevel_3d(Point3 p)
{
long outcode;
outcode = 0;
if (( p.x + p.y + p.z) >= 1.5) outcode |= 0x01; // > 1.5
if (( p.x + p.y - p.z) >= 1.5) outcode |= 0x02; // > 1.5
if (( p.x - p.y + p.z) >= 1.5) outcode |= 0x04; // > 1.5
if (( p.x - p.y - p.z) >= 1.5) outcode |= 0x08; // > 1.5
if ((-p.x + p.y + p.z) > 1.5) outcode |= 0x10;
if ((-p.x + p.y - p.z) > 1.5) outcode |= 0x20;
if ((-p.x - p.y + p.z) > 1.5) outcode |= 0x40;
if ((-p.x - p.y - p.z) > 1.5) outcode |= 0x80;
return(outcode);
}
/*. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . */
/* Test the point "alpha" of the way from P1 to P2 */
/* See if it is on a face of the cube */
/* Consider only faces in "mask" */
long check_point(Point3 p1, Point3 p2, float alpha, long mask)
{
Point3 plane_point;
plane_point.x = LERP(alpha, p1.x, p2.x);
plane_point.y = LERP(alpha, p1.y, p2.y);
plane_point.z = LERP(alpha, p1.z, p2.z);
return(face_plane(plane_point) & mask);
}
/*. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . */
/* Compute intersection of P1 --> P2 line segment with face planes */
/* Then test intersection point to see if it is on cube face */
/* Consider only face planes in "outcode_diff" */
/* Note: Zero bits in "outcode_diff" means face line is outside of */
long check_line(Point3 p1, Point3 p2, long outcode_diff)
{
if ((0x01 & outcode_diff) != 0)
if (check_point(p1,p2,( 0.5f-p1.x)/(p2.x-p1.x),0x3e) == INSIDE) return(INSIDE);
if ((0x02 & outcode_diff) != 0)
if (check_point(p1,p2,(-0.5f-p1.x)/(p2.x-p1.x),0x3d) == INSIDE) return(INSIDE);
if ((0x04 & outcode_diff) != 0)
if (check_point(p1,p2,( 0.5f-p1.y)/(p2.y-p1.y),0x3b) == INSIDE) return(INSIDE);
if ((0x08 & outcode_diff) != 0)
if (check_point(p1,p2,(-0.5f-p1.y)/(p2.y-p1.y),0x37) == INSIDE) return(INSIDE);
if ((0x10 & outcode_diff) != 0)
if (check_point(p1,p2,( 0.5f-p1.z)/(p2.z-p1.z),0x2f) == INSIDE) return(INSIDE);
if ((0x20 & outcode_diff) != 0)
if (check_point(p1,p2,(-0.5f-p1.z)/(p2.z-p1.z),0x1f) == INSIDE) return(INSIDE);
return(OUTSIDE);
}
/*. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . */
/* Test if 3D point is inside 3D triangle */
long point_triangle_intersection(Point3 p, Triangle3 t)
{
long sign12,sign23,sign31;
Point3 vect12,vect23,vect31,vect1h,vect2h,vect3h;
Point3 cross12_1p,cross23_2p,cross31_3p;
/* First, a quick bounding-box test: */
/* If P is outside triangle bbox, there cannot be an intersection. */
if (p.x > MAX3(t.v1.x, t.v2.x, t.v3.x) + EPS) return(OUTSIDE);
if (p.y > MAX3(t.v1.y, t.v2.y, t.v3.y) + EPS) return(OUTSIDE);
if (p.z > MAX3(t.v1.z, t.v2.z, t.v3.z) + EPS) return(OUTSIDE);
if (p.x < MIN3(t.v1.x, t.v2.x, t.v3.x) - EPS) return(OUTSIDE);
if (p.y < MIN3(t.v1.y, t.v2.y, t.v3.y) - EPS) return(OUTSIDE);
if (p.z < MIN3(t.v1.z, t.v2.z, t.v3.z) - EPS) return(OUTSIDE);
/* For each triangle side, make a vector out of it by subtracting vertexes; */
/* make another vector from one vertex to point P. */
/* The crossproduct of these two vectors is orthogonal to both and the */
/* signs of its X,Y,Z components indicate whether P was to the inside or */
/* to the outside of this triangle side. */
SUB(t.v1, t.v2, vect12)
SUB(t.v1, p, vect1h);
CROSS(vect12, vect1h, cross12_1p)
sign12 = SIGN3(cross12_1p); /* Extract X,Y,Z signs as 0..7 or 0...63 integer */
SUB(t.v2, t.v3, vect23)
SUB(t.v2, p, vect2h);
CROSS(vect23, vect2h, cross23_2p)
sign23 = SIGN3(cross23_2p);
SUB(t.v3, t.v1, vect31)
SUB(t.v3, p, vect3h);
CROSS(vect31, vect3h, cross31_3p)
sign31 = SIGN3(cross31_3p);
/* If all three crossproduct vectors agree in their component signs, */
/* then the point must be inside all three. */
/* P cannot be OUTSIDE all three sides simultaneously. */
/* this is the old test; with the revised SIGN3() macro, the test
* needs to be revised. */
#ifdef OLD_TEST
if ((sign12 == sign23) && (sign23 == sign31))
return(INSIDE);
else
return(OUTSIDE);
#else
return ((sign12 & sign23 & sign31) == 0) ? OUTSIDE : INSIDE;
#endif
}
/*. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . */
/**********************************************/
/* This is the main algorithm procedure. */
/* Triangle t is compared with a unit cube, */
/* centered on the origin. */
/* It returns INSIDE (0) or OUTSIDE(1) if t */
/* intersects or does not intersect the cube. */
/**********************************************/
long t_c_intersection(Triangle3 t)
{
long v1_test,v2_test,v3_test;
float d,denom;
Point3 vect12,vect13,norm;
Point3 hitpp,hitpn,hitnp,hitnn;
/* First compare all three vertexes with all six face-planes */
/* If any vertex is inside the cube, return immediately! */
if ((v1_test = face_plane(t.v1)) == INSIDE) return(INSIDE);
if ((v2_test = face_plane(t.v2)) == INSIDE) return(INSIDE);
if ((v3_test = face_plane(t.v3)) == INSIDE) return(INSIDE);
/* If all three vertexes were outside of one or more face-planes, */
/* return immediately with a trivial rejection! */
if ((v1_test & v2_test & v3_test) != 0) return(OUTSIDE);
/* Now do the same trivial rejection test for the 12 edge planes */
v1_test |= bevel_2d(t.v1) << 8;
v2_test |= bevel_2d(t.v2) << 8;
v3_test |= bevel_2d(t.v3) << 8;
if ((v1_test & v2_test & v3_test) != 0) return(OUTSIDE);
/* Now do the same trivial rejection test for the 8 corner planes */
v1_test |= bevel_3d(t.v1) << 24;
v2_test |= bevel_3d(t.v2) << 24;
v3_test |= bevel_3d(t.v3) << 24;
if ((v1_test & v2_test & v3_test) != 0) return(OUTSIDE);
/* If vertex 1 and 2, as a pair, cannot be trivially rejected */
/* by the above tests, then see if the v1-->v2 triangle edge */
/* intersects the cube. Do the same for v1-->v3 and v2-->v3. */
/* Pass to the intersection algorithm the "OR" of the outcode */
/* bits, so that only those cube faces which are spanned by */
/* each triangle edge need be tested. */
if ((v1_test & v2_test) == 0)
if (check_line(t.v1,t.v2,v1_test|v2_test) == INSIDE) return(INSIDE);
if ((v1_test & v3_test) == 0)
if (check_line(t.v1,t.v3,v1_test|v3_test) == INSIDE) return(INSIDE);
if ((v2_test & v3_test) == 0)
if (check_line(t.v2,t.v3,v2_test|v3_test) == INSIDE) return(INSIDE);
/* By now, we know that the triangle is not off to any side, */
/* and that its sides do not penetrate the cube. We must now */
/* test for the cube intersecting the interior of the triangle. */
/* We do this by looking for intersections between the cube */
/* diagonals and the triangle...first finding the intersection */
/* of the four diagonals with the plane of the triangle, and */
/* then if that intersection is inside the cube, pursuing */
/* whether the intersection point is inside the triangle itself. */
/* To find plane of the triangle, first perform crossproduct on */
/* two triangle side vectors to compute the normal vector. */
SUB(t.v1,t.v2,vect12);
SUB(t.v1,t.v3,vect13);
CROSS(vect12,vect13,norm)
/* The normal vector "norm" X,Y,Z components are the coefficients */
/* of the triangles AX + BY + CZ + D = 0 plane equation. If we */
/* solve the plane equation for X=Y=Z (a diagonal), we get */
/* -D/(A+B+C) as a metric of the distance from cube center to the */
/* diagonal/plane intersection. If this is between -0.5 and 0.5, */
/* the intersection is inside the cube. If so, we continue by */
/* doing a point/triangle intersection. */
/* Do this for all four diagonals. */
d = norm.x * t.v1.x + norm.y * t.v1.y + norm.z * t.v1.z;
/* if one of the diagonals is parallel to the plane, the other will intersect the plane */
if(fabs(denom=(norm.x + norm.y + norm.z))>EPS)
/* skip parallel diagonals to the plane; division by 0 can occur */
{
hitpp.x = hitpp.y = hitpp.z = d / denom;
if (fabs(hitpp.x) <= 0.5)
if (point_triangle_intersection(hitpp,t) == INSIDE) return(INSIDE);
}
if(fabs(denom=(norm.x + norm.y - norm.z))>EPS)
{
hitpn.z = -(hitpn.x = hitpn.y = d / denom);
if (fabs(hitpn.x) <= 0.5)
if (point_triangle_intersection(hitpn,t) == INSIDE) return(INSIDE);
}
if(fabs(denom=(norm.x - norm.y + norm.z))>EPS)
{
hitnp.y = -(hitnp.x = hitnp.z = d / denom);
if (fabs(hitnp.x) <= 0.5)
if (point_triangle_intersection(hitnp,t) == INSIDE) return(INSIDE);
}
if(fabs(denom=(norm.x - norm.y - norm.z))>EPS)
{
hitnn.y = hitnn.z = -(hitnn.x = d / denom);
if (fabs(hitnn.x) <= 0.5)
if (point_triangle_intersection(hitnn,t) == INSIDE) return(INSIDE);
}
/* No edge touched the cube; no cube diagonal touched the triangle. */
/* We're done...there was no intersection. */
return(OUTSIDE);
}
| C |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/voxelintersect/LICENSE.md | .md | 706 | 5 | LICENSE
This code repository predates the concept of Open Source, and predates most licenses along such lines. As such, the official license truly is:
EULA: The Graphics Gems code is copyright-protected. In other words, you cannot claim the text of the code as your own and resell it. Using the code is permitted in any program, product, or library, non-commercial or commercial. Giving credit is not required, though is a nice gesture. The code comes as-is, and if there are any flaws or problems with any Gems code, nobody involved with Gems - authors, editors, publishers, or webmasters - are to be held responsible. Basically, don't be a jerk, and remember that anything free comes with no guarantee. | Markdown |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/common/progressbar.py | .py | 1,102 | 25 | # Print iterations progress
# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
import sys
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=20, fill='='):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
sys.stderr.write('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix))
# Print New Line on Complete
if iteration == total:
sys.stderr.write('\n')
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/common/__init__.py | .py | 29 | 2 | __author__ = 'Peter Hofmann'
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/meshlib/mtlreader.py | .py | 7,545 | 222 | import os
import sys
class Texture(object):
"""
@type file_path: str
@type origin: (float, float, float)
@type stretch: (float, float, float)
@type turbulence: (float, float, float)
"""
def __init__(self):
self.file_path = ""
self.origin = (0.0, 0.0, 0.0)
self.stretch = (1.0, 1.0, 1.0)
self.turbulence = (0.0, 0.0, 0.0)
def read(self, map_statement):
"""
@type map_statement: str
"""
tmp = map_statement.rsplit(' ', 1)
if len(tmp) == 1:
self.file_path = tmp[0]
else:
options, self.file_path = tmp
while options.startswith('-'):
if options.startswith('-o'):
key, u, v, w, options = options.split(' ', 4)
self.origin = (float(u), float(v), float(w))
elif options.startswith('-s'):
key, u, v, w, options = options.split(' ', 4)
self.stretch = (float(u), float(v), float(w))
elif options.startswith('-t'):
key, u, v, w, options = options.split(' ', 4)
self.turbulence = (float(u), float(v), float(w))
else:
key, value, options = options.split(' ', 2)
def to_stream(self, stream):
# stream.write("-o {} ".format(self.origin))
# stream.write("-s {} ".format(self.stretch))
# stream.write("-t {} ".format(self.turbulence))
stream.write("{}\n".format(self.file_path))
class Material(object):
"""
Color and illumination
# @type Ka:
# @type Kd:
# @type Ks:
# @type Tf:
# @type illum: int
@type d: float
# @type Ns:
# @type sharpness:
# @type Ni:
Texture map
@type map_Ka: Texture
@type map_Kd: Texture
# @type map_Ks:
# @type map_Ns:
# @type map_d:
# @type disp:
# @type decal:
# @type bump:
Reflection map
# @type refl:
"""
def __init__(self):
self.d = 1.0
self.map_Ka = None
self.map_Kd = None
def read(self, statement_lines):
"""
@type statement_lines: list[str]
"""
for line in statement_lines:
key, data_string = line.split(' ', 1)
if key == 'map_Ka':
self.map_Ka = Texture()
self.map_Ka.read(data_string)
elif key == 'map_Kd':
self.map_Kd = Texture()
self.map_Kd.read(data_string)
elif key == 'd':
if data_string.startswith('-'):
self.d = float(data_string.rsplit(' ', 1)[1])
else:
self.d = float(data_string)
elif key == 'Tr':
if data_string.startswith('-'):
self.d = 1 - float(data_string.rsplit(' ', 1)[1])
else:
self.d = 1 - float(data_string)
def to_stream(self, stream):
stream.write(" d {} \n".format(self.d))
if self.map_Ka:
stream.write(" map_Ka ".format(self.d))
self.map_Ka.to_stream(stream)
if self.map_Kd:
stream.write(" map_Kd ".format(self.d))
self.map_Kd.to_stream(stream)
class MtlReader(object):
"""
A Minimalistic reader since I do not care about anything but texture
newmtl Inner_Wall
map_Ka bMOinnerwall.jpg
map_Kd bMOinnerwall.jpg
@type _materials: dict[str, Material]
"""
def __init__(self):
self._materials = {}
self._texture_directory = "."
def read_stream(self, input_stream, texture_directory="."):
self._materials = {}
self._texture_directory = texture_directory
statement_lines = None
material_name = ""
for line in input_stream:
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
if line.startswith("newmtl"):
if statement_lines:
self._materials[material_name] = Material()
self._materials[material_name].read(statement_lines)
del statement_lines
statement_lines = []
material_name = line.split(' ', 1)[1]
else:
statement_lines.append(line)
# read last material
if statement_lines:
self._materials[material_name] = Material()
self._materials[material_name].read(statement_lines)
def search_file(self, file_name_prefix):
if "EXPORT" in file_name_prefix:
file_name_prefix = file_name_prefix.split("EXPORT")[0]
results = []
for item in os.listdir(self._texture_directory):
entry = item
if "EXPORT" in entry:
entry = item.split("EXPORT")[0]
if entry.startswith(file_name_prefix) and os.path.isfile(os.path.join(self._texture_directory, item)):
results.append(item)
if len(results) == 1:
return results[0]
return None
# elif len(results) == 0:
# raise RuntimeError("Could not reconstruct texture file: {}".format(file_name_prefix))
# else:
# raise RuntimeError("Could not reconstruct texture file, multiple hits found: {}".format(file_name_prefix))
def reconstruct_mtl(self, input_stream, texture_directory="."):
success_failure = [0, 0]
self._materials = {}
self._texture_directory = texture_directory
for line in input_stream:
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
if not line.startswith("usemtl"):
continue
material_name = line.split(' ', 1)[1]
self._materials[material_name] = Material()
file_name = self.search_file(material_name)
if file_name is None:
success_failure[1] += 1
continue
success_failure[0] += 1
statement_lines = ["map_Ka {}".format(file_name)]
self._materials[material_name].read(statement_lines)
return success_failure
def read(self, file_path, texture_directory=None):
"""
@type file_path: str
@type texture_directory: str
@return:
"""
assert os.path.exists(file_path), "Bad file path: {}".format(file_path)
if texture_directory is None:
texture_directory = os.path.dirname(file_path)
success_failure = None
with open(file_path) as input_stream:
if file_path.lower().endswith('.mtl'):
self.read_stream(input_stream, texture_directory)
elif file_path.lower().endswith('.obj'):
success_failure = self.reconstruct_mtl(input_stream, texture_directory)
return success_failure
def validate_textures(self):
for material_name, material in self._materials.items():
file_path = os.path.join(self._texture_directory, material.map_Ka.file_path)
if not os.path.exists(file_path) or not os.path.isfile(file_path):
return False
return True
def to_stream(self, stream):
for material_name, material in self._materials.items():
stream.write("newmtl {} \n".format(material_name))
material.to_stream(stream)
def to_stdout(self):
self.to_stream(sys.stdout)
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/meshlib/stlreader.py | .py | 6,739 | 208 | import numpy as np
import os
from struct import unpack
from .defaultreader import DefaultReader
class StlReader(DefaultReader):
"""
@type _facets: dict[str, list[tuple[tuple[float]]]]
@type _norms: dict[str, list[tuple[float]]]
"""
def __init__(self):
self._facets = {}
self._norms = {}
@staticmethod
def read_binary(file_path):
"""
Created on Thu Nov 19 06:37:35 2013
@author: Sukhbinder Singh
Reads a Binary file and
Returns Header,Points,Normals,Vertex1,Vertex2,Vertex3
Source: http://sukhbinder.wordpress.com/2013/11/28/binary-stl-file-reader-in-python-powered-by-numpy/
@type file_path: str
@rtype:
"""
fp = open(file_path, 'rb')
header = fp.read(80)
nn = fp.read(4)
number_of_facets = unpack('i', nn)[0]
record_dtype = np.dtype([
('normals', np.float32, (3,)),
('Vertex1', np.float32, (3,)),
('Vertex2', np.float32, (3,)),
('Vertex3', np.float32, (3,)),
('atttr', '<i2', (1,))
])
data = np.fromfile(fp, dtype=record_dtype, count=number_of_facets)
fp.close()
normals = data['normals']
vertex_1 = data['Vertex1']
vertex_2 = data['Vertex2']
vertex_3 = data['Vertex3']
# p = np.append(vertex_1, vertex_2, axis=0)
# p = np.append(p, vertex_3, axis=0) # list(v1)
# points = np.array(list(set(tuple(p1) for p1 in p)))
return header, normals, vertex_1, vertex_2, vertex_3
@staticmethod
def parse_askii_verticle(input_stream):
"""
'vertex 0.0 0.0 0.0'
@param input_stream:
@rtype: (float, float, float)
"""
_, verticle_x, verticle_y, verticle_z = input_stream.readline().strip().split(' ')
return float(verticle_x), float(verticle_y), float(verticle_z),
@staticmethod
def parse_askii_triangle(input_stream):
"""
'vertex 0.0 0.0 0.0' x3
@param input_stream:
@rtype: ((float, float, float), (float, float, float), (float, float, float))
"""
assert input_stream.readline().strip().startswith("outer loop")
triangle = (
StlReader.parse_askii_verticle(input_stream),
StlReader.parse_askii_verticle(input_stream),
StlReader.parse_askii_verticle(input_stream))
assert input_stream.readline().strip().startswith("endloop")
return triangle
@staticmethod
def parse_askii_list_of_facets(input_stream):
"""
'facet normal 0.0 -1.0 0.0'
'outer loop'
'vertex 0.0 0.0 0.0' x3
'endloop'
'endfacet'
@param input_stream:
@rtype: collections.Iterable[((float, float, float), ((float, float, float), (float, float, float), (float, float, float)))]
"""
line = input_stream.readline().strip()
while not line.startswith("endsolid"):
_, _, normal_x, normal_y, normal_z = line.split(' ')
triangle = StlReader.parse_askii_triangle(input_stream)
assert input_stream.readline().strip().startswith("endfacet")
yield (normal_x, normal_y, normal_z), triangle
line = input_stream.readline().strip()
@staticmethod
def parse_askii_solids(input_stream):
"""
'solid cube_corner'
'facet normal 0.0 -1.0 0.0'
'outer loop'
'vertex 0.0 0.0 0.0' x3
'endloop'
'endfacet'
'endsolid'
@param input_stream:
@rtype: collections.Iterable[(str, collections.Iterable[((float, float, float), ((float, float, float), (float, float, float), (float, float, float)))]])]
"""
line = input_stream.readline()
while line:
line = line.strip()
assert line.startswith("solid"), line
_, name = line.split(' ', 1)
# print(line)
yield name, StlReader.parse_askii_list_of_facets(input_stream)
line = input_stream.readline()
input_stream.close()
@staticmethod
def read_askii_stl(file_path):
"""
@type file_path: str
@rtype: collections.Iterable[(str, collections.Iterable[((float, float, float), ((float, float, float), (float, float, float), (float, float, float)))]])]
"""
assert os.path.exists(file_path), "Bad path: {}".format(file_path)
return StlReader.parse_askii_solids(open(file_path, 'r'))
@staticmethod
def _is_ascii_stl(file_path):
"""
@type file_path: str
@rtype: bool
"""
with open(file_path, 'rb') as input_data:
line = input_data.readline()
if line.startswith(b'solid'):
return True
else:
return False
def read(self, file_path):
"""
@type file_path: str
@rtype: None
"""
del self._facets
del self._norms
self._facets = {}
self._norms = {}
if StlReader._is_ascii_stl(file_path):
for name, facets in StlReader.read_askii_stl(file_path):
assert name not in self._facets, "Objects in file are not unique"
self._facets[name] = []
self._norms[name] = []
for normal, (v1, v2, v3) in facets:
self._facets[name].append((v1, v2, v3))
self._norms[name].append(normal)
else:
head, n, v1, v2, v3 = StlReader.read_binary(file_path)
self._facets["obj"] = []
self._norms["obj"] = []
for norm, vertex_1, vertex_2, vertex_3 in zip(n, v1, v2, v3):
# yield (tuple(i), tuple(j), tuple(k))
self._facets["obj"].append((vertex_1, vertex_2, vertex_3))
self._norms["obj"].append(norm)
def get_names(self):
"""
@rtype: collections.Iterable[str]
"""
return self._facets.keys()
def get_facets(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
if name:
assert name in self._facets, "Unknown object: {}".format(name)
for facet in self._facets[name]:
yield facet
else:
assert name is None, "Unknown object: {}".format(name)
for name in self._facets:
for facet in self._facets[name]:
yield facet
def has_triangular_facets(self):
"""
@rtype: bool
"""
# todo: is this always the case?
return True
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/meshlib/__init__.py | .py | 51 | 3 | __author__ = 'Peter Hofmann'
__version__ = "0.0.4"
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/meshlib/defaultreader.py | .py | 705 | 41 | class DefaultReader(object):
"""
Mesh Reader Prototype
"""
def read_archive(self, file_path):
"""
@type file_path: str
@rtype: None
"""
pass
def read(self, file_path):
"""
@type file_path: str
@rtype: None
"""
pass
def get_names(self):
"""
@rtype: collections.Iterable[str]
"""
pass
def get_facets(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
pass
def has_triangular_facets(self):
"""
@rtype: bool
"""
pass
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/meshlib/objreader.py | .py | 15,405 | 428 | import tempfile
import zipfile
import shutil
import sys
import os
from .defaultreader import DefaultReader
class MeshGroup(object):
"""
# group name
g [group name]
# Vertices
v 0.123 0.234 0.345 1.0
# Texture coordinates
vt 0.500 1 [0]
# Vertex normals
vn 0.707 0.000 0.707
# Parameter space vertices
vp 0.310000 3.210000 2.100000
# Polygonal face element
f 6/4/1 3/5/3 7/6/5
# usemtl Material__3
@type _material_library_file_path: str
@type _tmp_material: str
@type _vertex_indices: list[list[int, int, int]]
@type _texture_indices: list[list[int, int, int]]
@type _normal_indices: list[list[int, int, int]]
@type _use_material: dict[str, list[int]]
"""
def __init__(self, material_library_file_path=None):
"""
@type material_library_file_path: str
"""
self._material_library_file_path = material_library_file_path
self._tmp_material = None
self._vertex_indices = []
self._texture_indices = []
self._normal_indices = []
self._use_material = {None: []}
def __iter__(self):
"""
access to block config from the class
@rtype:
"""
for face_element in self._vertex_indices:
yield face_element
def items_vertex(self):
for element in self._vertex_indices:
yield element
def items_texture(self):
for element in self._texture_indices:
yield element
def items_normal(self):
for element in self._normal_indices:
yield element
def polygons_to_triangles(self, vertex_indice, texture_indice, normal_indice):
for u in range(len(vertex_indice)-2):
for v in range(u+1, len(vertex_indice)-1):
for w in range(v+1, len(vertex_indice)):
# print(u, v, w)
self._vertex_indices.append([vertex_indice[u], vertex_indice[v], vertex_indice[w]])
if len(texture_indice):
self._texture_indices.append([texture_indice[u], texture_indice[v], texture_indice[w]])
if len(normal_indice):
self._normal_indices.append([normal_indice[u], normal_indice[v], normal_indice[w]])
def parse_f(self, line):
vertex_indice = []
texture_indice = []
normal_indice = []
for entry in line.split(' '):
values = entry.split('/')
vertex_indice.append(int(values[0]))
if len(values) > 1:
try:
texture_indice.append(int(values[1]))
except ValueError:
pass
if len(values) > 2:
try:
normal_indice.append(int(values[2]))
except ValueError:
pass
if len(vertex_indice) > 3:
self.polygons_to_triangles(vertex_indice, texture_indice, normal_indice)
return
self._vertex_indices.append(vertex_indice)
if len(texture_indice):
self._texture_indices.append(texture_indice)
if len(normal_indice):
self._normal_indices.append(normal_indice)
self._use_material[self._tmp_material].append(len(self._vertex_indices))
def parse_usemtl(self, line):
"""
@type line: str
"""
self._tmp_material = line
self._use_material[self._tmp_material] = []
def has_triangular_facets(self):
if len(self._vertex_indices) == 0:
return False
return len(self._vertex_indices[0]) == 3
class MeshObject(object):
"""
# o object name
# mtllib Scaffold.mtl
# g group name
@type _groups: dict[str, MeshGroup]
@type _tmp_material_library_file_path: str
@type _vertices: list[list[float, float, float, float]]
@type _texture_coordinates: list[list[float, float, float]]
@type _vertex_normals: list[list[float, float, float]
@type _parameter_space_vertices: list[list[int, int, int]]
"""
def __init__(self, material_library_file_path=None):
"""
"""
self._groups = {}
self._tmp_material_library_file_path = material_library_file_path
self._tmp_material = None
self._vertices = []
self._texture_coordinates = []
self._vertex_normals = []
self._parameter_space_vertices = []
def parse_g(self, line):
"""
@type line: str
@rtype: MeshGroup
"""
assert line not in self._groups, "Groups are not unique: {}".format(line)
self._groups[line] = MeshGroup(self._tmp_material_library_file_path)
return self._groups[line]
def parse_v(self, line):
"""
@type line: str
"""
values = [float(value.strip()) for value in line.split(' ')]
self._vertices.append(values)
def parse_vt(self, line):
"""
@type line: str
"""
values = [float(value) for value in line.split(' ')]
self._texture_coordinates.append(values)
def parse_vn(self, line):
"""
@type line: str
"""
values = tuple([float(value) for value in line.split(' ')])
self._vertex_normals.append(values)
def parse_vp(self, line):
"""
@type line: str
"""
values = [int(value) for value in line.split(' ')]
self._parameter_space_vertices.append(values)
def parse_mtllib(self, line):
"""
@type line: str
"""
self._tmp_material_library_file_path = line
def get_facets(self):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
for name, group in self._groups.items():
for indice in group.items_vertex():
yield (
self._vertices[indice[0]-1],
self._vertices[indice[1]-1],
self._vertices[indice[2]-1])
def get_texture_facets(self):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
for name, group in self._groups.items():
for indice in group.items_texture():
yield (
self._texture_coordinates[indice[0]-1],
self._texture_coordinates[indice[1]-1],
self._texture_coordinates[indice[2]-1])
def get_normals(self):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
for name, group in self._groups.items():
for indice in group.items_normal():
yield (
self._vertex_normals[indice[0]-1],
self._vertex_normals[indice[1]-1],
self._vertex_normals[indice[2]-1])
def has_triangular_facets(self):
list_of_lookups = [group.has_triangular_facets() for name, group in self._groups.items()]
return all(list_of_lookups)
class ObjReader(DefaultReader):
"""
@type _tmp_dir: str
@type _objects: dict[str, MeshObject]
@type _tmp_material_library_file_path: str
"""
def __init__(self):
self._tmp_dir = None
self._objects = {}
self._directory_textures = None
self._tmp_material_library_file_path = None
def __exit__(self, type, value, traceback):
if self._tmp_dir and os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
self.tmp_dir = None
def __del__(self):
if self._tmp_dir and os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
self.tmp_dir = None
def read(self, file_path):
assert os.path.exists(file_path), "Bad file path: '{}'".format(file_path)
self._objects = {}
self._tmp_material_library_file_path = None
current_object = None
current_group = None
with open(file_path) as input_stream:
for line in input_stream:
line = line.rstrip()
if line.startswith('#') or not len(line):
continue
line_split = line.split(' ', 1)
if len(line_split) == 1:
sys.stderr.write("[ObjReader] WARNING: Bad line: {}\n".format(line_split[0]))
continue
key, data = line_split
data = data.strip()
key = key.lower()
if key == 'mtllib':
if current_object:
current_object.parse_mtllib(data)
else:
self.parse_mtllib(data)
continue
if not current_object and key != 'o':
name = os.path.splitext(os.path.basename(file_path))[0]
current_object = self.parse_o(name)
if not current_group and (key == 'f' or key == 'usemtl'):
name = os.path.splitext(os.path.basename(file_path))[0]
current_group = current_object.parse_g(name)
if key == 'o':
current_object = self.parse_o(data)
continue
if key == 'g':
current_group = current_object.parse_g(data)
continue
if key == 'v':
current_object.parse_v(data)
continue
if key == 'vt':
current_object.parse_vt(data)
continue
if key == 'vn':
current_object.parse_vn(data)
continue
if key == 'vp':
current_object.parse_vp(data)
continue
if key == 'f':
current_group.parse_f(data)
continue
if key == 'usemtl':
current_group.parse_usemtl(data)
continue
else:
sys.stderr.write("[ObjReader] WARNING: Unknown key: {}\n".format(key))
continue
@staticmethod
def _get_obj_file_path(directory):
list_of_dir = os.listdir(directory)
for item in list_of_dir:
if item. endswith('obj'):
return os.path.join(directory, item)
return None
def read_archive(self, file_path):
"""
@param file_path:
@return:
"""
# deal with temporary directory
self._tmp_dir = tempfile.mkdtemp(prefix="{}_".format("ObjReader"))
# deal with input directory
assert zipfile.is_zipfile(file_path), "Not a zip archive: '{}'".format(file_path)
directory_input = tempfile.mkdtemp(dir=self._tmp_dir)
with zipfile.ZipFile(file_path, "r") as read_handler:
read_handler.extractall(directory_input)
directory_items = os.listdir(directory_input)
while len(directory_items) == 1:
if os.path.isdir(os.path.join(directory_input, directory_items[0])):
directory_input = os.path.join(directory_input, directory_items[0])
directory_items = os.listdir(directory_input)
if os.path.exists(os.path.join(directory_input, 'source')):
list_of_dir = os.listdir(directory_input)
if len(list_of_dir) and list_of_dir[0].endswith(".zip"):
# source contains another zip file, most with likely copies of textures
file_path = os.path.join(directory_input, 'source', list_of_dir[0])
assert zipfile.is_zipfile(file_path), "Not a zip archive: '{}'".format(file_path)
directory_input = tempfile.mkdtemp(dir=self._tmp_dir)
with zipfile.ZipFile(file_path, "r") as read_handler:
read_handler.extractall(directory_input)
if os.path.exists(os.path.join(directory_input, 'lod0')):
directory_input = os.path.join(directory_input, 'lod0')
directory_source = directory_input
directory_textures = directory_input
if os.path.exists(os.path.join(directory_input, 'source')):
directory_source = os.path.join(directory_input, 'source')
if os.path.exists(os.path.join(directory_input, 'textures')):
directory_textures = os.path.join(directory_input, 'textures')
self._directory_textures = directory_textures
file_path_obj = ObjReader._get_obj_file_path(directory_source)
assert isinstance(file_path_obj, str), "obj file not found."
self.read(file_path_obj)
def parse_o(self, line):
"""
@type line: str
@rtype: MeshObject
"""
assert line not in self._objects, "Objects are not unique: {}".format(line)
self._objects[line] = MeshObject(self._tmp_material_library_file_path)
return self._objects[line]
def parse_mtllib(self, line):
self._tmp_material_library_file_path = line
def get_facets(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
if name:
assert name in self._objects, "Unknown object: {}".format(name)
for element in self._objects[name].get_facets():
yield element
else:
assert name is None, "Unknown object: {}".format(name)
for name, mesh_object in self._objects.items():
for element in mesh_object.get_facets():
yield element
def get_texture_facets(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
if name:
assert name in self._objects, "Unknown object: {}".format(name)
for element in self._objects[name].get_texture_facets():
yield element
else:
assert name is None, "Unknown object: {}".format(name)
for name, mesh_object in self._objects.items():
for element in mesh_object.get_texture_facets():
yield element
def get_normals(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
if name:
assert name in self._objects, "Unknown object: {}".format(name)
for element in self._objects[name].get_normals():
yield element
else:
assert name is None, "Unknown object: {}".format(name)
for name, mesh_object in self._objects.items():
for element in mesh_object.get_normals():
yield element
def get_names(self):
"""
@rtype: collections.Iterable[str]
"""
repr(self._objects.keys())
def has_triangular_facets(self):
"""
@rtype: bool
"""
list_of_lookups = [mesh_object.has_triangular_facets() for name, mesh_object in self._objects.items()]
# print(list_of_lookups)
return all(list_of_lookups)
| Python |
3D | potpov/New-Maxillo-Dataset-Segmentation | hull/voxelize/meshlib/meshreader.py | .py | 1,938 | 64 | import os
from .defaultreader import DefaultReader
from .stlreader import StlReader
from .objreader import ObjReader
class MeshReader(DefaultReader):
"""
@type _type_reader: dict[str, any]
@type _reader: DefaultReader
"""
_type_reader = {
".stl": StlReader,
".obj": ObjReader,
".zip": ObjReader,
}
def __init__(self):
self._reader = DefaultReader()
def read_archive(self, file_path):
"""
@type file_path: str
@rtype: None
"""
# todo: test for stl or obj archive
assert os.path.exists(file_path), "Bad file path: {}".format(file_path)
assert os.path.isfile(file_path), "Not a file: {}".format(file_path)
file_extension = os.path.splitext(os.path.basename(file_path))[1].lower()
assert file_extension == ".zip", "Bad file type: {}".format(file_extension)
del self._reader
self._reader = self._type_reader[file_extension]()
self._reader.read_archive(file_path)
def read(self, file_path):
"""
@type file_path: str
@rtype: None
"""
assert os.path.exists(file_path), "Bad file path: {}".format(file_path)
assert os.path.isfile(file_path), "Not a file: {}".format(file_path)
file_extension = os.path.splitext(os.path.basename(file_path))[1].lower()
assert file_extension in self._type_reader, "Unknown file type: {}".format(file_extension)
del self._reader
self._reader = self._type_reader[file_extension]()
self._reader.read(file_path)
def get_facets(self, name=None):
"""
@rtype: collections.Iterable[((float, float, float), (float, float, float), (float, float, float))]
"""
return self._reader.get_facets()
def has_triangular_facets(self):
"""
@rtype: bool
"""
return self._reader.has_triangular_facets()
| Python |
3D | jstnmchl/xraySimulator | getDefaultParams.m | .m | 844 | 30 | function [ dp ] = getDefaultParams()
%GETDEFAULTSCENEPARAMS Summary of this function goes here
% Detailed explanation goes here
%TODO Generates default parameters for x-ray simulation that are
%independent of the phantom being imaged (e.g. detector size, source location,
%etc.). Also creates template to be modified by user for custom parameters.
%TODO If time, convert params from structure to object to prevent
%mismatched data types, misnamed vars, etc. If done, also add check of
%object type to checkInputs
%Geometric/Simulation Parameters
dp.phantomToSourceDistance = 100; %cm
dp.phantomToDetectorDistance = 20; %cm
dp.detectorWidthDist= 40; %cm
dp.detectorWidthPixels= 32;
dp.detectorHeightDist= 40; %cm
dp.detectorHeightPixels = 32;
%Other
dp.showOutput = true;
dp.phantomTransparency = 0.5;
dp.writeImageToDisk = true;
end
| MATLAB |
3D | jstnmchl/xraySimulator | xraySimulator.m | .m | 11,533 | 398 | function [ image ] = xraySimulator(stlFilename, attenCoeffs, outputImgFilename, params, varargin )
%XRAYSIMULATOR Simulates x-ray images of one or more objects (STL files)
% created by an x-ray point source and a rectangular x-ray detector. The
% resulting simulation is visualized in a 3D plot and the simulated x-ray
% image written as a bitmap file.
%
% For detailed description and usage, see xraySimulator_README.md
tic
if (nargin<4 || isempty(params) )
params = getDefaultParams();
end
if nargin > 4
rotTransScale = varargin{1};
else
rotTransScale =[];
end
if nargin > 5
error('Too many input variables')
end
addLibsToPath();
checkInputs();
phantom = generateVirtualPhantom(stlFilename, attenCoeffs, rotTransScale);
scene = generateSimulationScene(params);
image = generateSimulatedImage(phantom, scene);
if params.showOutput
plotScene(scene,phantom,image,params)
end
if params.writeImageToDisk
writeImgToDisk(image,outputImgFilename);
end
runTime = toc;
disp(['Total run time: ' num2str(runTime) ' seconds'])
function [] = checkInputs()
%%% Check validity of inputs
%Ensure stlFilename is string array (if only one filename, convert
%char to string).
if not(isstring(stlFilename))
if ischar(stlFilename) && (size(stlFilename,1) == 1)
stlFilename = string(stlFilename);
else
error('Invalid variable type of stlFilename. Variable should be string array or 1D character array');
end
end
%TODO: does stlfilename end in .stl?
%TODO: does imgFilename end in .bmp?
%TODO: Same number of stls as attenuation coeffs?
%TODO: check distances in params are non-negative
%TODO: check pixels in params (width/height) >0
%TODO: Check all variables in params defined, no other params defined (to
%protect against misspelled variable names)
%If present, check rotTransScale for correct size, non-negative
%scale. If scale = 0, change to 1 and display warning.
if ~isempty(rotTransScale)
for i=1:size(rotTransScale,1)
if rotTransScale(i,7) < 0
error(['Scale value must be non-negative. Scale of ' char(stlFilename(i)) ' was ' num2str(rotTransScale(i,7)) ])
elseif rotTransScale(i,7) == 0
rotTransScale(i,7) = 1;
warning(['Scale value for ' char(stlFilename(i)) ' was 0 (implies non-existent object). Automatically corrected to 1.'])
end
end
end
end
end
function [] = addLibsToPath()
%Adds folders of third-party functions to path. Temporarily changes CD to location of
%xraySimulator to ensure folders can be found
scriptFolder = fileparts(mfilename('fullpath'));
oldFolder = cd(scriptFolder);
addpath(genpath('3rdParty'))
cd(oldFolder);
end
function [p] = generateVirtualPhantom(stlFilename, attenCoeffs, rotTransScale)
%Reads stl(s) from file and save to struct of faces, vertices and filenam.
%Associate X-ray atenuation coefficient(s) with each phantom.
numStls = size(stlFilename,1);
p = cell(numStls,1); %phantom
for i=1:numStls
[p{i}.vertices,p{i}.faces,~,~] = stlRead(stlFilename(i));
p{i}.name = stlFilename(i);
p{i}.attenCoeff = attenCoeffs(i);
if ~isempty(rotTransScale)
p{i}.vertices = transformVertices(p{i}.vertices,rotTransScale(i,:));
end
end
end
function v = transformVertices(v, rts)
%Transforms vertices of object according to rts (rotation - deg, translation - cm,
%scale). Rotation and scaling applied about object centre (i.e. mean of vertices).
%rts = [Rx Ry Rz Tx Ty Tz S];
%centre
centre = mean(v);
v = bsxfun(@minus,v,centre);
%rotate
%dcm = angle2dcm(deg2rad(rts(3)),deg2rad(rts(2)),deg2rad(rts(1)), 'ZYX');
dcm = SpinCalc('EA321toDCM', [rts(3) rts(2) rts(1)]);
v = v*dcm;
%scale
scale = eye(3).*rts(7);
v = v*scale;
%un-centre
v = bsxfun(@plus,v,centre);
%translate
v = bsxfun(@plus,v,rts(4:6));
end
function [scene] = generateSimulationScene(params)
%Generates geometry of scene (source, detector, rays) in
%common coordinate system with phantom.
%Origin of scene corresponds to origin of STL file. Source lies on X axis
%(x-coordinate < 0). Detector is perpendicular to x axis (x-coordinate > 0),
%centred on x-axis.
scene.sourceCoords = [-1*params.phantomToSourceDistance 0 0];
scene.pixelCoords = generateDetectorPixelCoords(params);
scene.lineSegs = generateSourceToDetectorLineSegs(scene.sourceCoords,scene.pixelCoords);
end
function [pixels] = generateDetectorPixelCoords(params)
%Generate 3xNxM array defining centres of NxM pixels of detector
%numPixels = params.detectorWidthPixels*params.detectorHeightPixels;
%pixels = zeros(numPixels,3);
pixels = zeros(3, params.detectorWidthPixels,params.detectorHeightPixels);
pixelWidth = params.detectorWidthDist/params.detectorWidthPixels;
pixelHeight = params.detectorHeightDist/params.detectorHeightPixels;
xVal = params.phantomToDetectorDistance;
yVals = -1*(0.5*params.detectorWidthDist-0.5*pixelWidth):pixelWidth:(0.5*params.detectorWidthDist-0.5*pixelWidth);
zVals = -1*(0.5*params.detectorHeightDist-0.5*pixelHeight):pixelHeight:(0.5*params.detectorHeightDist-0.5*pixelHeight);
[Y,Z] = meshgrid(yVals, zVals);
pixels(1,:,:) = repmat(xVal,params.detectorHeightPixels,params.detectorWidthPixels);
pixels(2,:,:) = Y;
pixels(3,:,:) = Z;
end
function [lineSegs] = generateSourceToDetectorLineSegs(source, pixels)
%Returns 3xNxM array, defining line segments from the source to
%each of NxM pixels as vectors
lineSegs=zeros(size(pixels));
[~, height, width] = size(pixels);
for h=1:height
for w=1:width
lineSegs(:,h,w) = pixels(:,h,w)' - source;
end
end
end
function [img] = generateSimulatedImage(phantom, scene)
%Calculates simulated Xray image from scene, phantom geometry
checkForCollisions(phantom,scene);
numObjects = size(phantom,1);
height = size(scene.lineSegs,2);
width = size(scene.lineSegs,3);
attenuation = zeros([height width numObjects]);
for i=1:numObjects
attenuation(:,:,i) = findXrayAttenuation(phantom{i}, scene);
end
%Find relative intensity (i.e. I/I_0) at each pixel
rIntensity = exp(-1*sum(attenuation,3));
%Invert rIntensity values so dense objects (e.g. bone) appear bright
img = -1*rIntensity + 1;
%Correct for direction conventions
img = flipud(fliplr(img));
end
function [] = checkForCollisions(phantom,scene)
%Checks for collisions between objects and source. (Collisions between
%objects not checked for, treated as summative overlap)
%TODO Check for collisions between objects and detector
numObjects = size(phantom,1);
isSourceInside = zeros(numObjects,1);
for i=1:numObjects
isSourceInside(i) = in_polyhedron(phantom{i}.faces, phantom{i}.vertices,scene.sourceCoords);
end
if any(isSourceInside)
names = [];
for i=1:numObjects
if isSourceInside(i)
names = [names ' ' char(phantom{i}.name)];
end
end
error([num2str(sum(isSourceInside)) ' STL files collide with the x-ray source. Colliding files are:' names ])
end
end
function [atten] = findXrayAttenuation(object, scene)
%Returns NxM array of attenuation (i.e. x in I=I_0*e^-x )for a single
%closed surface where NxM is the number of pixels (width x height) on the detector
boundingBox = generateBoundingBox(object);
%boundingBox=[];
[~, height, width] = size(scene.pixelCoords);
atten=zeros(height,width);
%Create progress bar for finding attenuation (can be lengthy)
numProjections = numel(atten);
progress=0;
textprogressbar(['Projecting X-Rays for ' char(object.name) ': ']);
%If progress bar interrupted, previous bar will not be closed and result in
%error on subsequent run. Calling textprogressbar a second time fixes this
%problem.
try
textprogressbar(0);
catch
textprogressbar('');
end
for h=1:height
for w=1:width
atten(h,w) = estLineIntegral(scene.sourceCoords, scene.lineSegs(:,h,w), object, boundingBox);
progress = progress+1;
textprogressbar(progress/numProjections *100);
end
end
textprogressbar(' Done.');
end
function [box] = generateBoundingBox(obj)
%Returns vertices of rectangular bounding box around object in format
%required by TriangleRayIntersections
v = obj.vertices;
boxRange =[min(v(:,1)) max(v(:,1)); min(v(:,2)) max(v(:,2)); min(v(:,3)) max(v(:,3))];
[X, Y, Z] = meshgrid(boxRange(1,:),boxRange(2,:), boxRange(3,:));
boxVertices = [reshape(X,[],1,1) reshape(Y,[],1,1) reshape(Z,[],1,1)];
boxFaces = boundary(boxVertices(:,1),boxVertices(:,2),boxVertices(:,3));
box(:,:,1) = boxVertices(boxFaces(:,1),:);
box(:,:,2) = boxVertices(boxFaces(:,2),:);
box(:,:,3) = boxVertices(boxFaces(:,3),:);
end
function [integral] = estLineIntegral(src,lineSeg, object, bBox)
%Returns estimate of line integral from source to centre of one
%detector pixel through object by finding path length through object,
%multiplying by object's x-ray attenuation.
%Check intersection with bounding box first for speed
[intersect_box, ~, ~, ~,~] = TriangleRayIntersection(src, lineSeg, bBox(:,:,1), bBox(:,:,2), bBox(:,:,3),'lineType','segment');
if sum(intersect_box) == 0
integral = 0;
return
end
vert1 = object.vertices(object.faces(:,1),:);
vert2 = object.vertices(object.faces(:,2),:);
vert3 = object.vertices(object.faces(:,3),:);
[intersect, T, ~, ~,xcoor] = TriangleRayIntersection(src, lineSeg, vert1, vert2, vert3,'lineType','segment');
numIntersections = sum(intersect);
pathLength = 0;
if numIntersections == 0
integral = 0;
return
elseif mod(numIntersections,2)
%TODO Handle edge case (pun intended) where ray intersects with edge of
%outermost triangle, leading to only one intersection (i.e. entrance
%and exit are same point). Should return integral = 0
error(['Odd number of intersections. X-ray appears to enter object but not exit or vice versa. Object: ' char(object.name)] )
end
intersectCoords = xcoor(intersect,:);
%Sort intersections by proximity to source
intersectT = T(intersect);
intersectCoords = [intersectCoords intersectT];
intersectCoords = sortrows(intersectCoords,4);
intersectCoords = intersectCoords(:,1:3);
%Assuming odd intersections are x-ray entering object & even leaving, sum
%path length through object
for i=2:2:numIntersections
pathLength = pathLength + norm(intersectCoords(i,:) - intersectCoords(i-1,:));
end
integral = pathLength * object.attenCoeff;
end
function [] = plotScene(scene,phantom, img, params)
figure()
%Plot source
plot3(scene.sourceCoords(1),scene.sourceCoords(2),scene.sourceCoords(3),'r*');
grid on; hold on; axis equal;
xlabel('X'); ylabel('Y'); zlabel('Z');
%Plot detector/image
W = params.detectorWidthDist/2;
H = params.detectorHeightDist/2;
X = repmat(params.phantomToDetectorDistance,2,2);
Y = [W -W; W -W];
Z = [H H; -H -H];
surface(X,Y,Z,repmat(img,1,1,3), 'FaceColor','texturemap','EdgeColor','none');
numObjects = size(phantom,1);
C = {'b','r','y',[.5 .6 .7],[.8 .2 .6], 'k','g'}; % Cell array of colors.
for i=1:numObjects
cInd = mod(i,numel(C)); %Cycle through colors
obj.vertices = phantom{i}.vertices;
obj.faces = phantom{i}.faces;
patch(obj,'FaceColor', C{cInd}, ...
'EdgeColor', 'none', ...
'FaceLighting', 'gouraud', ...
'AmbientStrength', 0.15, ...
'FaceAlpha', params.phantomTransparency);
end
end
function [] = writeImgToDisk(img,outputImgFilename)
%TODO handle scenario where filename already exists
imwrite(img, outputImgFilename);
end | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/textprogressbar/textprogressbar.m | .m | 2,043 | 61 | function textprogressbar(c)
% This function creates a text progress bar. It should be called with a
% STRING argument to initialize and terminate. Otherwise the number correspoding
% to progress in % should be supplied.
% INPUTS: C Either: Text string to initialize or terminate
% Percentage number to show progress
% OUTPUTS: N/A
% Example: Please refer to demo_textprogressbar.m
% Author: Paul Proteus (e-mail: proteus.paul (at) yahoo (dot) com)
% Version: 1.0
% Changes tracker: 29.06.2010 - First version
% Inspired by: http://blogs.mathworks.com/loren/2007/08/01/monitoring-progress-of-a-calculation/
%% Initialization
persistent strCR; % Carriage return pesistent variable
% Vizualization parameters
strPercentageLength = 10; % Length of percentage string (must be >5)
strDotsMaximum = 10; % The total number of dots in a progress bar
%% Main
if isempty(strCR) && ~ischar(c),
% Progress bar must be initialized with a string
error('The text progress must be initialized with a string');
elseif isempty(strCR) && ischar(c),
% Progress bar - initialization
fprintf('%s',c);
strCR = -1;
elseif ~isempty(strCR) && ischar(c),
% Progress bar - termination
strCR = [];
fprintf([c '\n']);
elseif isnumeric(c)
% Progress bar - normal progress
c = floor(c);
percentageOut = [num2str(c) '%%'];
percentageOut = [percentageOut repmat(' ',1,strPercentageLength-length(percentageOut)-1)];
nDots = floor(c/100*strDotsMaximum);
dotOut = ['[' repmat('.',1,nDots) repmat(' ',1,strDotsMaximum-nDots) ']'];
strOut = [percentageOut dotOut];
% Print it on the screen
if strCR == -1,
% Don't do carriage return during first run
fprintf(strOut);
else
% Do it during all the other runs
fprintf([strCR strOut]);
end
% Update carriage return
strCR = repmat('\b',1,length(strOut)-1);
else
% Any other unexpected input
error('Unsupported argument type');
end
| MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/textprogressbar/demo_textprogressbar.m | .m | 321 | 16 | %demo_textprogressbar
%This a demo for textprogressbar script
textprogressbar('calculating outputs: ');
for i=1:100,
textprogressbar(i);
pause(0.1);
end
textprogressbar('done');
textprogressbar('saving data: ');
for i=1:0.5:80,
textprogressbar(i);
pause(0.05);
end
textprogressbar('terminated'); | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/TriangleRayIntersection/PointInsideVolume.m | .m | 1,366 | 37 | function inside = PointInsideVolume(point, faces, vertices)
%% Point within the volume test
% TriangleRayIntersection is a low level function which can be used to
% solve higher level problems. For example a test to see if point is inside
% or outside of a volume defined by a continous surface:
%% chack input
if nargin==2 && isa(faces, 'triangulation')
[faces, vertices] = freeBoundary(faces);
end
switch size(point,2)
case 2 % 2D case
xv = vertices(faces', 1); % define polygon
yv = vertices(faces', 2);
inside = inpolygon(point(:,1), point(:,2), xv, yv);
case 3
eps = 1e-5;
vert1 = vertices(faces(:,1),:);
vert2 = vertices(faces(:,2),:);
vert3 = vertices(faces(:,3),:);
inside = false(size(point,1),1);
for iPoint = 1:size(point,1)
certain = 0;
while ~certain
dir = rand(1,3)-0.5; % pick random direction
[intersect, ~, u, v] = TriangleRayIntersection(point(iPoint,:), ...
dir, vert1, vert2, vert3, 'border', 'inclusive');
nIntersect = sum(intersect); % number of intersections
inside(iPoint) = mod(nIntersect,2)>0; % inside if odd number of intersections
% make sure ray stays away fron surface triangle edges
bary = [u, v, 1-u-v];
bary = bary(intersect,:);
certain = all( min(abs(bary),[], 2)>eps );
end
end
end | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlPlot.m | .m | 640 | 24 | function stlPlot(v, f, name)
%STLPLOT is an easy way to plot an STL object
%V is the Nx3 array of vertices
%F is the Mx3 array of faces
%NAME is the name of the object, that will be displayed as a title
figure;
object.vertices = v;
object.faces = f;
patch(object,'FaceColor', [0.8 0.8 1.0], ...
'EdgeColor', 'none', ...
'FaceLighting', 'gouraud', ...
'AmbientStrength', 0.15);
% Add a camera light, and tone down the specular highlighting
camlight('headlight');
material('dull');
% Fix the axes scaling, and set a nice view angle
axis('image');
view([-135 35]);
grid on;
title(name);
| MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlRead.m | .m | 401 | 13 | function [v, f, n, name] = stlRead(fileName)
%STLREAD reads any STL file not depending on its format
%V are the vertices
%F are the faces
%N are the normals
%NAME is the name of the STL object (NOT the name of the STL file)
format = stlGetFormat(fileName);
if strcmp(format,'ascii')
[v,f,n,name] = stlReadAscii(fileName);
elseif strcmp(format,'binary')
[v,f,n,name] = stlReadBinary(fileName);
end | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlDemo.m | .m | 1,340 | 40 | %% STLDEMO shows how to use the functions included in the toolbox STLTOOLS
%% EXAMPLE 1.- How to cut a sphere and close the base to get a semisphere
% load an ascii STL sample file (STLGETFORMAT and STLREADASCII)
[vertices,faces,normals,name] = stlRead('sphere300faces.stl');
stlPlot(vertices,faces,name);
% the sphere is centered in the origin
% now we get a list of vertices to be deleted if (x,y,z<0)
minZ = 0;
[rows, ~] = find(vertices(:,3) < minZ);
list = vertices(rows,:);
% if we delete the list of vertices with z<0, we get an opened semisphere
% (as the base is not closed)
[newv,newf] = stlDelVerts(vertices,faces,list);
stlPlot(newv,newf,name);
% the next step is to identify a new list with the faces that are opened
% (that means all the sides that belong only to a unique triangle)
list = stlGetVerts(newv,newf,'opened');
% finally we generate all the new faces that are needed just to close the
% base of the semisphere
[vsemi,fsemi] = stlAddVerts(newv,newf,list);
stlPlot(vsemi,fsemi,'closed semisphere');
%% EXAMPLE 2.- How to get a section of a femur
[vertices,faces,normals,name] = stlRead('femur_binary.stl');
stlPlot(vertices,faces,name);
minX = 1.2;
[rows, ~] = find(vertices(:,1) < minX);
list = vertices(rows,:);
[newv,newf] = stlDelVerts(vertices,faces,list);
stlPlot(newv,newf,'section of the femur');
| MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlReadBinary.m | .m | 2,257 | 60 | function [v, f, n, name] = stlReadBinary(fileName)
%STLREADBINARY reads a STL file written in BINARY format
%V are the vertices
%F are the faces
%N are the normals
%NAME is the name of the STL object (NOT the name of the STL file)
%=======================
% STL binary file format
%=======================
% Binary STL files have an 84 byte header followed by 50-byte records, each
% describing a single facet of the mesh. Technically each facet could be
% any 2D shape, but that would screw up the 50-byte-per-facet structure, so
% in practice only triangular facets are used. The present code ONLY works
% for meshes composed of triangular facets.
%
% HEADER:
% 80 bytes: Header text
% 4 bytes: (int) The number of facets in the STL mesh
%
% DATA:
% 4 bytes: (float) normal x
% 4 bytes: (float) normal y
% 4 bytes: (float) normal z
% 4 bytes: (float) vertex1 x
% 4 bytes: (float) vertex1 y
% 4 bytes: (float) vertex1 z
% 4 bytes: (float) vertex2 x
% 4 bytes: (float) vertex2 y
% 4 bytes: (float) vertex2 z
% 4 bytes: (float) vertex3 x
% 4 bytes: (float) vertex3 y
% 4 bytes: (float) vertex3 z
% 2 bytes: Padding to make the data for each facet 50-bytes in length
% ...and repeat for next facet...
fid = fopen(fileName);
header = fread(fid,80,'int8'); % reading header's 80 bytes
name = deblank(native2unicode(header,'ascii')');
if isempty(name)
name = 'Unnamed Object'; % no object name in binary files!
end
nfaces = fread(fid,1,'int32'); % reading the number of facets in the stl file (next 4 byters)
nvert = 3*nfaces; % number of vertices
% reserve memory for vectors (increase the processing speed)
n = zeros(nfaces,3);
v = zeros(nvert,3);
f = zeros(nfaces,3);
for i = 1 : nfaces % read the data for each facet
tmp = fread(fid,3*4,'float'); % read coordinates
n(i,:) = tmp(1:3); % x,y,z components of the facet's normal vector
v(3*i-2,:) = tmp(4:6); % x,y,z coordinates of vertex 1
v(3*i-1,:) = tmp(7:9); % x,y,z coordinates of vertex 2
v(3*i,:) = tmp(10:12); % x,y,z coordinates of vertex 3
f(i,:) = [3*i-2 3*i-1 3*i]; % face
fread(fid,1,'int16'); % Move to the start of the next facet (2 bytes of padding)
end
fclose(fid);
% slim the file (delete duplicated vertices)
[v,f] = stlSlimVerts(v,f); | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlAddVerts.m | .m | 644 | 15 | function [vnew, fnew] = stlAddVerts(v, f, list)
%STLADDVERTS adds new vertices (and consequently, new faces) to a STL object
%V is the Nx3 array of vertices
%F is the Mx3 array of faces
%LIST is the list of vertices to be added to the object
%VNEW is the new array of vertices
%FNEW is the new array of faces
% triangulation just with the slice
faces = delaunay(list(:,1),list(:,2)); % calculate new faces
% update object
nvert = length(v); % number of original vertices
v = [v; list]; % update vertices with the ones in the list
f = [f; faces+nvert]; % update faces with the new ones
[vnew,fnew] = stlSlimVerts(v,f); % clear repeated vertices | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlWrite.m | .m | 10,034 | 252 | function stlWrite(filename, varargin)
%STLWRITE Write STL file from patch or surface data.
%
% STLWRITE(FILE, FV) writes a stereolithography (STL) file to FILE for a
% triangulated patch defined by FV (a structure with fields 'vertices'
% and 'faces').
%
% STLWRITE(FILE, FACES, VERTICES) takes faces and vertices separately,
% rather than in an FV struct
%
% STLWRITE(FILE, X, Y, Z) creates an STL file from surface data in X, Y,
% and Z. STLWRITE triangulates this gridded data into a triangulated
% surface using triangulation options specified below. X, Y and Z can be
% two-dimensional arrays with the same size. If X and Y are vectors with
% length equal to SIZE(Z,2) and SIZE(Z,1), respectively, they are passed
% through MESHGRID to create gridded data. If X or Y are scalar values,
% they are used to specify the X and Y spacing between grid points.
%
% STLWRITE(...,'PropertyName',VALUE,'PropertyName',VALUE,...) writes an
% STL file using the following property values:
%
% MODE - File is written using 'binary' (default) or 'ascii'.
%
% TITLE - Header text (max 80 chars) written to the STL file.
%
% TRIANGULATION - When used with gridded data, TRIANGULATION is either:
% 'delaunay' - (default) Delaunay triangulation of X, Y
% 'f' - Forward slash division of grid quads
% 'b' - Back slash division of quadrilaterals
% 'x' - Cross division of quadrilaterals
% Note that 'f', 'b', or 't' triangulations now use an
% inbuilt version of FEX entry 28327, "mesh2tri".
%
% FACECOLOR - Single colour (1-by-3) or one-colour-per-face (N-by-3)
% vector of RGB colours, for face/vertex input. RGB range
% is 5 bits (0:31), stored in VisCAM/SolidView format
% (http://en.wikipedia.org/wiki/STL_(file_format)#Color_in_binary_STL)
%
% Example 1:
% % Write binary STL from face/vertex data
% tmpvol = false(20,20,20); % Empty voxel volume
% tmpvol(8:12,8:12,5:15) = 1; % Turn some voxels on
% fv = isosurface(~tmpvol, 0.5); % Make patch w. faces "out"
% stlwrite('test.stl',fv) % Save to binary .stl
%
% Example 2:
% % Write ascii STL from gridded data
% [X,Y] = deal(1:40); % Create grid reference
% Z = peaks(40); % Create grid height
% stlwrite('test.stl',X,Y,Z,'mode','ascii')
%
% Example 3:
% % Write binary STL with coloured faces
% cVals = fv.vertices(fv.faces(:,1),3); % Colour by Z height.
% cLims = [min(cVals) max(cVals)]; % Transform height values
% nCols = 255; cMap = jet(nCols); % onto an 8-bit colour map
% fColsDbl = interp1(linspace(cLims(1),cLims(2),nCols),cMap,cVals);
% fCols8bit = fColsDbl*255; % Pass cols in 8bit (0-255) RGB triplets
% stlwrite('testCol.stl',fv,'FaceColor',fCols8bit)
% Original idea adapted from surf2stl by Bill McDonald. Huge speed
% improvements implemented by Oliver Woodford. Non-Delaunay triangulation
% of quadrilateral surface courtesy of Kevin Moerman. FaceColor
% implementation by Grant Lohsen.
%
% Author: Sven Holcombe, 11-24-11
% Check valid filename path
path = fileparts(filename);
if ~isempty(path) && ~exist(path,'dir')
error('Directory "%s" does not exist.',path);
end
% Get faces, vertices, and user-defined options for writing
[faces, vertices, options] = parseInputs(varargin{:});
asciiMode = strcmp( options.mode ,'ascii');
% Create the facets
facets = single(vertices');
facets = reshape(facets(:,faces'), 3, 3, []);
% Compute their normals
V1 = squeeze(facets(:,2,:) - facets(:,1,:));
V2 = squeeze(facets(:,3,:) - facets(:,1,:));
normals = V1([2 3 1],:) .* V2([3 1 2],:) - V2([2 3 1],:) .* V1([3 1 2],:);
clear V1 V2
normals = bsxfun(@times, normals, 1 ./ sqrt(sum(normals .* normals, 1)));
facets = cat(2, reshape(normals, 3, 1, []), facets);
clear normals
% Open the file for writing
permissions = {'w','wb+'};
fid = fopen(filename, permissions{asciiMode+1});
if (fid == -1)
error('stlwrite:cannotWriteFile', 'Unable to write to %s', filename);
end
% Write the file contents
if asciiMode
% Write HEADER
fprintf(fid,'solid %s\r\n',options.title);
% Write DATA
fprintf(fid,[...
'facet normal %.7E %.7E %.7E\r\n' ...
'outer loop\r\n' ...
'vertex %.7E %.7E %.7E\r\n' ...
'vertex %.7E %.7E %.7E\r\n' ...
'vertex %.7E %.7E %.7E\r\n' ...
'endloop\r\n' ...
'endfacet\r\n'], facets);
% Write FOOTER
fprintf(fid,'endsolid %s\r\n',options.title);
else % BINARY
% Write HEADER
fprintf(fid, '%-80s', options.title); % Title
fwrite(fid, size(facets, 3), 'uint32'); % Number of facets
% Write DATA
% Add one uint16(0) to the end of each facet using a typecasting trick
facets = reshape(typecast(facets(:), 'uint16'), 12*2, []);
% Set the last bit to 0 (default) or supplied RGB
facets(end+1,:) = options.facecolor;
fwrite(fid, facets, 'uint16');
end
% Close the file
fclose(fid);
%fprintf('Wrote %d facets\n',size(facets, 2));
%% Input handling subfunctions
function [faces, vertices, options] = parseInputs(varargin)
% Determine input type
if isstruct(varargin{1}) % stlwrite('file', FVstruct, ...)
if ~all(isfield(varargin{1},{'vertices','faces'}))
error( 'Variable p must be a faces/vertices structure' );
end
faces = varargin{1}.faces;
vertices = varargin{1}.vertices;
options = parseOptions(varargin{2:end});
elseif isnumeric(varargin{1})
firstNumInput = cellfun(@isnumeric,varargin);
firstNumInput(find(~firstNumInput,1):end) = 0; % Only consider numerical input PRIOR to the first non-numeric
numericInputCnt = nnz(firstNumInput);
options = parseOptions(varargin{numericInputCnt+1:end});
switch numericInputCnt
case 3 % stlwrite('file', X, Y, Z, ...)
% Extract the matrix Z
Z = varargin{3};
% Convert scalar XY to vectors
ZsizeXY = fliplr(size(Z));
for i = 1:2
if isscalar(varargin{i})
varargin{i} = (0:ZsizeXY(i)-1) * varargin{i};
end
end
% Extract X and Y
if isequal(size(Z), size(varargin{1}), size(varargin{2}))
% X,Y,Z were all provided as matrices
[X,Y] = varargin{1:2};
elseif numel(varargin{1})==ZsizeXY(1) && numel(varargin{2})==ZsizeXY(2)
% Convert vector XY to meshgrid
[X,Y] = meshgrid(varargin{1}, varargin{2});
else
error('stlwrite:badinput', 'Unable to resolve X and Y variables');
end
% Convert to faces/vertices
if strcmp(options.triangulation,'delaunay')
faces = delaunay(X,Y);
vertices = [X(:) Y(:) Z(:)];
else
if ~exist('mesh2tri','file')
error('stlwrite:missing', '"mesh2tri" is required to convert X,Y,Z matrices to STL. It can be downloaded from:\n%s\n',...
'http://www.mathworks.com/matlabcentral/fileexchange/28327')
end
[faces, vertices] = mesh2tri(X, Y, Z, options.triangulation);
end
case 2 % stlwrite('file', FACES, VERTICES, ...)
faces = varargin{1};
vertices = varargin{2};
otherwise
error('stlwrite:badinput', 'Unable to resolve input types.');
end
end
if ~isempty(options.facecolor) % Handle colour preparation
facecolor = uint16(options.facecolor);
%Set the Valid Color bit (bit 15)
c0 = bitshift(ones(size(faces,1),1,'uint16'),15);
%Red color (10:15), Blue color (5:9), Green color (0:4)
c0 = bitor(bitshift(bitand(2^6-1, facecolor(:,1)),10),c0);
c0 = bitor(bitshift(bitand(2^11-1, facecolor(:,2)),5),c0);
c0 = bitor(bitand(2^6-1, facecolor(:,3)),c0);
options.facecolor = c0;
else
options.facecolor = 0;
end
function options = parseOptions(varargin)
IP = inputParser;
IP.addParamValue('mode', 'binary', @ischar)
IP.addParamValue('title', sprintf('Created by stlwrite.m %s',datestr(now)), @ischar);
IP.addParamValue('triangulation', 'delaunay', @ischar);
IP.addParamValue('facecolor',[], @isnumeric)
IP.addParamValue('facecolour',[], @isnumeric)
IP.parse(varargin{:});
options = IP.Results;
if ~isempty(options.facecolour)
options.facecolor = options.facecolour;
end
function [F,V]=mesh2tri(X,Y,Z,tri_type)
% function [F,V]=mesh2tri(X,Y,Z,tri_type)
%
% Available from http://www.mathworks.com/matlabcentral/fileexchange/28327
% Included here for convenience. Many thanks to Kevin Mattheus Moerman
% kevinmoerman@hotmail.com
% 15/07/2010
%------------------------------------------------------------------------
[J,I]=meshgrid(1:1:size(X,2)-1,1:1:size(X,1)-1);
switch tri_type
case 'f'%Forward slash
TRI_I=[I(:),I(:)+1,I(:)+1; I(:),I(:),I(:)+1];
TRI_J=[J(:),J(:)+1,J(:); J(:),J(:)+1,J(:)+1];
F = sub2ind(size(X),TRI_I,TRI_J);
case 'b'%Back slash
TRI_I=[I(:),I(:)+1,I(:); I(:)+1,I(:)+1,I(:)];
TRI_J=[J(:)+1,J(:),J(:); J(:)+1,J(:),J(:)+1];
F = sub2ind(size(X),TRI_I,TRI_J);
case 'x'%Cross
TRI_I=[I(:)+1,I(:); I(:)+1,I(:)+1; I(:),I(:)+1; I(:),I(:)];
TRI_J=[J(:),J(:); J(:)+1,J(:); J(:)+1,J(:)+1; J(:),J(:)+1];
IND=((numel(X)+1):numel(X)+prod(size(X)-1))';
F = sub2ind(size(X),TRI_I,TRI_J);
F(:,3)=repmat(IND,[4,1]);
Fe_I=[I(:),I(:)+1,I(:)+1,I(:)]; Fe_J=[J(:),J(:),J(:)+1,J(:)+1];
Fe = sub2ind(size(X),Fe_I,Fe_J);
Xe=mean(X(Fe),2); Ye=mean(Y(Fe),2); Ze=mean(Z(Fe),2);
X=[X(:);Xe(:)]; Y=[Y(:);Ye(:)]; Z=[Z(:);Ze(:)];
end
V=[X(:),Y(:),Z(:)]; | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlGetFormat.m | .m | 1,687 | 38 | function format = stlGetFormat(fileName)
%STLGETFORMAT identifies the format of the STL file and returns 'binary' or
%'ascii'
fid = fopen(fileName);
% Check the file size first, since binary files MUST have a size of 84+(50*n)
fseek(fid,0,1); % Go to the end of the file
fidSIZE = ftell(fid); % Check the size of the file
if rem(fidSIZE-84,50) > 0
format = 'ascii';
else
% Files with a size of 84+(50*n), might be either ascii or binary...
% Read first 80 characters of the file.
% For an ASCII file, the data should begin immediately (give or take a few
% blank lines or spaces) and the first word must be 'solid'.
% For a binary file, the first 80 characters contains the header.
% It is bad practice to begin the header of a binary file with the word
% 'solid', so it can be used to identify whether the file is ASCII or
% binary.
fseek(fid,0,-1); % go to the beginning of the file
header = strtrim(char(fread(fid,80,'uchar')')); % trim leading and trailing spaces
isSolid = strcmp(header(1:min(5,length(header))),'solid'); % take first 5 char
fseek(fid,-80,1); % go to the end of the file minus 80 characters
tail = char(fread(fid,80,'uchar')');
isEndSolid = findstr(tail,'endsolid');
% Double check by reading the last 80 characters of the file.
% For an ASCII file, the data should end (give or take a few
% blank lines or spaces) with 'endsolid <object_name>'.
% If the last 80 characters contains the word 'endsolid' then this
% confirms that the file is indeed ASCII.
if isSolid & isEndSolid
format = 'ascii';
else
format = 'binary';
end
end
fclose(fid); | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlDelVerts.m | .m | 805 | 26 | function [vnew, fnew] = stlDelVerts(v, f, list)
%STLDELVERT removes a list of vertices from STL files
%V is the Nx3 array of vertices
%F is the Mx3 array of faces
%LIST are the vertices (rows) to delete, where length(LIST) < N
%VNEW is the new array of vertices
%FNEW is the new array of faces
% find (on the global set) the position (rows) of the vertices to be deleted
[~,vdel] = ismember(list,v,'rows');
% delete vertices and get new tags
vnew = v;
tags = 1:length(v);
vnew(vdel,:) = [];
tags(vdel) = [];
% delete faces
fnew = f;
[fdel,~] = find(ismember(f,vdel)); % find the position (rows) of the faces to delete
fnew(fdel,:) = [];
% rename faces, as some of the vertices have been deleted
flist = reshape(fnew,numel(fnew),1);
[~,ind] = ismember(flist,tags);
fnew = reshape(ind,numel(flist)/3,3); | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlSlimVerts.m | .m | 879 | 30 | function [vnew, fnew]= stlSlimVerts(v, f)
% PATCHSLIM removes duplicate vertices in surface meshes.
%
% This function finds and removes duplicate vertices.
%
% USAGE: [v, f]=patchslim(v, f)
%
% Where v is the vertex list and f is the face list specifying vertex
% connectivity.
%
% v contains the vertices for all triangles [3*n x 3].
% f contains the vertex lists defining each triangle face [n x 3].
%
% This will reduce the size of typical v matrix by about a factor of 6.
%
% For more information see:
% http://www.esmonde-white.com/home/diversions/matlab-program-for-loading-stl-files
%
% Francis Esmonde-White, May 2010
if ~exist('v','var')
error('The vertex list (v) must be specified.');
end
if ~exist('f','var')
error('The vertex connectivity of the triangle faces (f) must be specified.');
end
[vnew, indexm, indexn] = unique(v, 'rows');
fnew = indexn(f);
| MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlReadAscii.m | .m | 1,604 | 56 | function [v, f, n, name] = stlReadAscii(fileName)
%STLREADASCII reads a STL file written in ASCII format
%V are the vertices
%F are the faces
%N are the normals
%NAME is the name of the STL object (NOT the name of the STL file)
%======================
% STL ascii file format
%======================
% ASCII STL files have the following structure. Technically each facet
% could be any 2D shape, but in practice only triangular facets tend to be
% used. The present code ONLY works for meshes composed of triangular
% facets.
%
% solid object_name
% facet normal x y z
% outer loop
% vertex x y z
% vertex x y z
% vertex x y z
% endloop
% endfacet
%
% <Repeat for all facets...>
%
% endsolid object_name
fid = fopen(fileName);
cellcontent = textscan(fid,'%s','delimiter','\n'); % read all the file and put content in cells
content = cellcontent{:}(logical(~strcmp(cellcontent{:},''))); % remove all blank lines
fclose(fid);
% read the STL name
line1 = char(content(1));
if (size(line1,2) >= 7)
name = line1(7:end);
else
name = 'Unnamed Object';
end
% read the vector normals
normals = char(content(logical(strncmp(content,'facet normal',12))));
n = str2num(normals(:,13:end));
% read the vertex coordinates (vertices)
vertices = char(content(logical(strncmp(content,'vertex',6))));
v = str2num(vertices(:,7:end));
nvert = size(vertices,1); % number of vertices
nfaces = sum(strcmp(content,'endfacet')); % number of faces
if (nvert == 3*nfaces)
f = reshape(1:nvert,[3 nfaces])'; % create faces
end
% slim the file (delete duplicated vertices)
[v,f] = stlSlimVerts(v,f); | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/stlTools/stlGetVerts.m | .m | 1,067 | 31 | function list = stlGetVerts(v, f, mode)
%GETVERTS returns the vertices that are 'opened' or 'closed' depending on
%the 'mode'. An 'open' vertice is the one that defines an open side. An
%open side is the one that only takes part of one triangle
%V is the Nx3 array of vertices
%F is the Mx3 array of faces
%MODE can be 'opened' or 'closed' depending of the kind of vertices to list
%LIST is the list of 'opened' or 'closed' vertices
sides = sort([[f(:,1) f(:,2)]; ...
[f(:,2) f(:,3)]; ...
[f(:,3) f(:,1)]],2);
[C,ia,ic] = unique(sides,'rows');
ind_all = sort(ic); % open and closed sides
ind_rep = find(diff(ind_all) == 0);
ind_cls = ind_all(ind_rep); % closed sides
sides_cls = C(ind_cls,:);
ind_rep = [ind_rep; ind_rep+1];
ind_opn = ind_all;
ind_opn(ind_rep) = []; % open sides
sides_opn = C(ind_opn,:);
switch mode,
case'opened',
list = v(unique(sides_opn(:)),:);
case 'closed',
list = v(unique(sides_cls(:)),:);
otherwise,
error('getVerts:InvalidMode','The ''mode'' valid values are ''opened'' or ''closed''');
end | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/in_polyhedron/in_polyhedron_test.m | .m | 4,080 | 90 | %% Tutorial and tests of IN_POLYHEDRON function
% *By Jarek Tuszynski* (jaroslaw.w.tuszynski@leidos.com)
%
% IN_POLYHEDRON tests if points are inside a 3D triangulated surface
% (faces/vertices) or volume (tetrahedrals/vertices). There are NO
% assumptions about orientation of the face normals.
%
% IN = INPOLYHEDRON(X,POINTS) tests if the query points (POINTS) are inside
% the surface/polyhedron defined by X. X can be a structure with fields
% 'vertices' and 'faces' or an object of MATLAB triangulation class. In
% case of triangulation class object we will only use the outside
% boundary. POINTS is an N-by-3 set of XYZ coordinates. IN is an N-by-1
% logical vector which will be TRUE for each query point inside the surface.
%
% INPOLYHEDRON(FACES,VERTICES,POINTS) takes faces/vertices separately,
% rather than in an FV structure.
%
%% Algorithm
% For each point do:
%
% # shoot a random ray out of the query point in a random direction
% # for each face solve:
% $\left[\begin{array}{ccc} -d_{x} & v1_{x}-v0_{x} & v2_{x}-v0_{x} \\ -d_{y} & v1_{y}-v0_{y} & v2_{y}-v0_{y} \\ -d_{z} & v1_{z}-v0_{z} & v2_{z}-v0_{z} \end{array}\right]\*\left[\begin{array}{c} t \\ u \\ v \end{array} \right]=\left[\begin{array}{c} o_{x}-v0_{x} \\ o_{y}-v0_{y} \\ o_{z}-v0_{z} \end{array}\right]$
% for $\left[\begin{array}{c} t \\ u \\ v \end{array} \right]$.
% _d_ is the ray direction. Variables _u_ , _v_ are barycentric coordinates
% and _t/|d|_ is the distance from the intersection point to the ray origin.
% Ray/triangle intersect if all _t_, _u_, _v_ and _w_=1-u-v are positive.
% # count ray / surface intersections
% # even number means inside and odd mean outside
% # in rare case the ray hits one of the surface faces right on the
% edge repeat the process with a new ray
%
%% References
% Based on
% * "Fast, minimum storage ray-triangle intersection". Tomas Möller and
% Ben Trumbore. Journal of Graphics Tools, 2(1):21--28, 1997.
% http://www.graphics.cornell.edu/pubs/1997/MT97.pdf (Ray/triangle
% intersection)
% * http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/raytri.c
% (Ray/triangle intersection)
% * Robert Sedgewick "Algorithms" (point in polygon algorithm)
%
%% Licence
% *The function is distributed under BSD License*
format compact; % viewing preference
clear variables; close all;
rng('shuffle');
type('license.txt')
%% Test if random points are inside or outside of the volume
% defined by MATLAB test object "tetmesh"
load tetmesh;
TR = triangulation(tet,X);
[S.faces, S.vertices] = freeBoundary(TR);
n = 2000; % number of points
points = 80*rand(n,3) - repmat([40 40 0], n, 1);
tic
in1 = in_polyhedron(S, points);
fprintf('Number of points inside is %i, outside is %i. Calculation time: %f sec\n', ...
nnz(in1), nnz(in1==0), toc);
%% Plot results
figure, hold on, view(3) % Display the result
set(gcf, 'Position', get(gcf, 'Position').*[0 0 1.5 1.5])
patch(S,'FaceColor','g','FaceAlpha',0.2)
plot3(points( in1,1),points( in1,2),points( in1,3),'bo','MarkerFaceColor','b')
plot3(points(~in1,1),points(~in1,2),points(~in1,3),'r.'), axis image
legend({'volume', 'points inside', 'points outside'}, 'Location', 'southoutside')
%% Compare the results to the output of similar inpolyhedron function
% by Sven Holcombe (http://www.mathworks.com/matlabcentral/fileexchange/37856)
% inpolyhedron function is usually faster but requires knowlege about the
% face normals.
if exist('inpolyhedron.m', 'file')
tic
in2 = inpolyhedron(S, points);
fprintf('Number of points inside is %i, outside is %i. Calculation time: %f sec\n', ...
nnz(in1), nnz(in1==0), toc);
fprintf('Number of differences is %i\n', sum(in1~=in2));
end
%% Flip 50% of face normals and repeat
msk = rand(size(S.faces,1),1) > 0.5;
S.faces(msk,:) = fliplr(S.faces(msk,:));
in3 = in_polyhedron(S, points);
fprintf('Number of differences for in_polyhedron is %i\n', sum(in1~=in3));
if exist('inpolyhedron.m', 'file')
in4 = inpolyhedron(S, points);
fprintf('Number of differences for inpolyhedron is %i\n', sum(in1~=in4));
end | MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/in_polyhedron/in_polyhedron.m | .m | 6,666 | 145 | function inside = in_polyhedron(varargin)
%% Point within the volume test
%IN_POLYHEDRON Tests if points are inside a 3D triangulated surface
% (faces/vertices) or volume (tetrahedrals/vertices). There is NO
% assumption about orientation of the face normals.
%
% IN = IN_POLYHEDRON(X,POINTS) tests if the query points (POINTS) are inside
% the surface/polyhedron defined by X. X can be a structure with fields
% 'vertices' and 'faces' or an object of MATLAB triangulation class. In
% case of triangulation class object we will only use the outside
% boundary. POINTS is an N-by-3 set of XYZ coordinates. IN is an N-by-1
% logical vector which will be TRUE for each query point inside the surface.
%
% IN_POLYHEDRON(FACES,VERTICES,POINTS) takes faces/vertices separately,
% rather than in an FV structure.
%
% Algorithm:
% For each point do:
% 1) shoot a random ray out of the query point in a random direction
% 2) for each face solve
% |t|
% M * |u| = (o-v0)
% |v|
% for [t; u; v] where M = [-d, v1-v0, v2-v0]. "d" is the ray direction.
% u, v, w (=1-u-v) are barycentric coordinates and t is the distance from
% the ray origin in units of |d|.
% Ray/triangle intersect if all t, u, v and w are positive.
% 3) count ray / surface intersections
% 4) even number means inside and odd mean outside
% 5) in rare case the ray hits one of the surface faces right on the
% edge repeat the process with a new ray
%
% Based on:
% * Ray/triangle intersection
% * "Fast, minimum storage ray-triangle intersection". Tomas Möller and
% Ben Trumbore. Journal of Graphics Tools, 2(1):21--28, 1997.
% http://www.graphics.cornell.edu/pubs/1997/MT97.pdf
% * http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/raytri.c
% * point in polygon algorithm: Robert Sedgewick "Algorithms"
%
% Author:
% Jarek Tuszynski (jaroslaw.w.tuszynski@leidos.com)
%
% License: BSD license (http://en.wikipedia.org/wiki/BSD_licenses)
%% Get POINTS, FACES and VERTICES inputs
if isa(varargin{1}, 'triangulation') % in_polyhedron(triangulation_class, POINTS)
[faces, vertices] = freeBoundary(varargin{1});
points = varargin{2};
elseif isstruct(varargin{1}) % in_polyhedron(FVstruct, POINTS)
ok = isfield(varargin{1}, 'vertices') && isfield(varargin{1}, 'faces');
assert(ok, 'Structure FV must have "faces" and "vertices" fields' );
faces = varargin{1}.faces;
vertices = varargin{1}.vertices;
points = varargin{2};
else % in_polyhedron(FACES, VERTICES, POINTS)
faces = varargin{1};
vertices = varargin{2};
points = varargin{3};
end
clear varargin
%% Transpose inputs if needed
if (size(points ,1)==3 && size(points ,2)~=3), points = points'; end
if (size(vertices,1)==3 && size(vertices,2)~=3), vertices = vertices'; end
if (size(faces ,1)==3 && size(faces ,2)~=3), faces = faces'; end
assert(size(points ,2)==3, '"Points" array must be in Nx3 format');
assert(size(vertices,2)==3, '"Vertices" array must be in Nx3 format');
assert(size(faces ,2)==3, '"Faces" array must be in Nx3 format');
%% Convert the polyhedron into array of faces, stored as a point and 2 vectors
vert0 = vertices(faces(:,1),:);
edge1 = vertices(faces(:,2),:)-vert0; % find vectors for two edges sharing vert0
edge2 = vertices(faces(:,3),:)-vert0;
N = size(vert0,1);
clear vertices faces % those are no longer needed
%% In case of 3D data use the following algorithm:
% 1) shoot a random ray out of the point in a random direction
% 2) solve for each face
% |t|
% M * |u| = (o-v0)
% |v|
% for [t; u; v] where M = [-d, v1-v0, v2-v0]. u,v are barycentric coordinates
% and t - the distance from the ray origin in |d| units
% ray/triangle intersect if t>=0, u>=0, v>=0 and u+v<=1
% 3) count ray / surface intersections
% 4) even number means inside and odd mean outside
% 5) in rare case the ray hits one of the surface faces right on the
% edge repeat the process with a new ray
% distance (in barycentric units (0-1)) from the edge where we might get
% roundoff errors.
eps = 1e-10;
inside = nan+zeros(size(points,1),1); % nan indicates that there was no succesful test yet
while any(isnan(inside))
dir = repmat(rand(1,3)-0.5,N,1); % pick random direction for the ray
pvec = cross_prod(dir, edge2);
det = sum(edge1.*pvec,2); % determinant of the matrix M = dot(edge1,pvec)
angleOK = (abs(det)>eps); % if determinant is near zero then ray lies in the plane of the triangle
%% For each point which we did not calculated yet...
for iPoint = 1:size(points,1)
if ~isnan(inside(iPoint))
continue
end
tvec = -bsxfun(@minus, vert0, points(iPoint,:)); % vector from vert0 to ray origin
u = sum(tvec.*pvec,2)./det; % 1st barycentric coordinate
% limit some calculations only to line/triangle pairs where it makes
% a difference. It is tempting to try to push this concept of
% limiting the number of calculations to only the necessary to "u"
% and "t" but that produces slower code
ok = (angleOK & u>-eps & u<=1.0+eps); % mask out the faces that AFAIK do not intersect
u = u(ok,:); % trim so it is the size of v and t
% if all line/plane intersections are outside the triangle than no intersections
if ~any(ok)
if ~any(u>eps & u<=1.0-eps) % if far away from the edges...
inside (iPoint) = false;
end
continue
end
qvec = cross_prod(tvec(ok,:), edge1(ok,:)); % prepare to test V parameter
v = sum(dir (ok,:).*qvec,2)./det(ok,:); % 2nd barycentric coordinate
t = sum(edge2(ok,:).*qvec,2)./det(ok,:); % distance from point to triangle
% test if line/plane intersection is within the triangle
bary = [u, v, 1-u-v, t]; % barymatric coordinates
intersect = all(bary>-eps,2); % intersection on the correct side of the origin
baryi = bary(intersect,:); % barymatric coordinates for the intersections only
if all( min(abs(baryi),[], 2)>eps ) % ray did not hit the edge of the triangle
nIntersect = sum(intersect); % number of intersections
inside(iPoint) = mod(nIntersect,2)>0; % inside if odd number of intersections
end
if any(abs(max(bary,[], 2)-1)<eps & abs(t)<eps) % test for point being one of the grid points
inside(iPoint) = true;
end
end
end
inside = (inside~=0); % convert to boolean
%% ========================================================================
function c=cross_prod(a,b)
% strip down version of MATLAB cross function
c = [a(:,2).*b(:,3)-a(:,3).*b(:,2), a(:,3).*b(:,1)-a(:,1).*b(:,3), a(:,1).*b(:,2)-a(:,2).*b(:,1)];
| MATLAB |
3D | jstnmchl/xraySimulator | 3rdParty/SpinCalc/SpinCalc.m | .m | 20,640 | 409 | function OUTPUT=SpinCalc(CONVERSION,INPUT,tol,ichk)
%Function for the conversion of one rotation input type to desired output.
%Supported conversion input/output types are as follows:
% 1: Q Rotation Quaternions
% 2: EV Euler Vector and rotation angle (degrees)
% 3: DCM Orthogonal DCM Rotation Matrix
% 4: EA### Euler angles (12 possible sets) (degrees)
%
%Author: John Fuller
%National Institute of Aerospace
%Hampton, VA 23666
%John.Fuller@nianet.org
%
%Version 1.3
%June 30th, 2009
%
%Version 1.3 updates
% SpinCalc now detects when input data is too close to Euler singularity, if user is choosing
% Euler angle output. Prohibits output if middle angle is within 0.1 degree of singularity value.
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% OUTPUT=SpinCalc(CONVERSION,INPUT,tol,ichk)
%Inputs:
%CONVERSION - Single string value that dictates the type of desired
% conversion. The conversion strings are listed below.
%
% 'DCMtoEA###' 'DCMtoEV' 'DCMtoQ' **for cases that involve
% 'EA###toDCM' 'EA###toEV' 'EA###toQ' euler angles, ### should be
% 'EVtoDCM' 'EVtoEA###' 'EVtoQ' replaced with the proper
% 'QtoDCM' 'QtoEA###' 'QtoEV' order desired. EA321 would
% 'EA###toEA###' be Z(yaw)-Y(pitch)-X(roll).
%
%INPUT - matrix or vector that corresponds to the first entry in the
% CONVERSION string, formatted as follows:
%
% DCM - 3x3xN multidimensional matrix which pre-multiplies a coordinate
% frame column vector to calculate its coordinates in the desired
% new frame.
%
% EA### - [psi,theta,phi] (Nx3) row vector list dictating to the first angle
% rotation (psi), the second (theta), and third (phi) (DEGREES)
%
% EV - [m1,m2,m3,MU] (Nx4) row vector list dictating the components of euler
% rotation vector (original coordinate frame) and the Euler
% rotation angle about that vector (MU) (DEGREES)
%
% Q - [q1,q2,q3,q4] (Nx4) row vector list defining quaternion of
% rotation. q4 = cos(MU/2) where MU is Euler rotation angle
%
%tol - tolerance value
%ichk - 0 disables warning flags
% 1 enables warning flags (near singularities)
%**NOTE: N corresponds to multiple orientations
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
%Output:
%OUTPUT - matrix or vector corresponding to the second entry in the
% CONVERSION input string, formatted as shown above.
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
%Pre-processer to determine type of conversion from CONVERSION string input
%Types are numbered as follows:
%Q=1 EV=2 DCM=3 EA=4
i_type=strfind(lower(CONVERSION),'to');
length=size(CONVERSION,2);
if length>12 || length<4, %no CONVERSION string can be shorter than 4 or longer than 12 chars
error('Error: Invalid entry for CONVERSION input string');
end
o_type=length-i_type;
if i_type<5,
i_type=i_type-1;
else
i_type=i_type-2;
end
if o_type<5,
o_type=o_type-1;
else
o_type=o_type-2;
end
TYPES=cell(1,4);
TYPES{1,1}='Q'; TYPES{1,2}='EV'; TYPES{1,3}='DCM'; TYPES{1,4}='EA';
INPUT_TYPE=TYPES{1,i_type};
OUTPUT_TYPE=TYPES{1,o_type};
clear TYPES
%Confirm input as compared to program interpretation
if i_type~=4 && o_type~=4, %if input/output are NOT Euler angles
CC=[INPUT_TYPE,'to',OUTPUT_TYPE];
if strcmpi(CONVERSION,CC)==0;
error('Error: Invalid entry for CONVERSION input string');
end
else
if i_type==4, %if input type is Euler angles, determine the order of rotations
EULER_order_in=str2double(CONVERSION(1,3:5));
rot_1_in=floor(EULER_order_in/100); %first rotation
rot_2_in=floor((EULER_order_in-rot_1_in*100)/10); %second rotation
rot_3_in=(EULER_order_in-rot_1_in*100-rot_2_in*10); %third rotation
if rot_1_in<1 || rot_2_in<1 || rot_3_in<1 || rot_1_in>3 || rot_2_in>3 || rot_3_in>3,
error('Error: Invalid input Euler angle order type (conversion string).'); %check that all orders are between 1 and 3
elseif rot_1_in==rot_2_in || rot_2_in==rot_3_in,
error('Error: Invalid input Euler angle order type (conversion string).'); %check that no 2 consecutive orders are equal (invalid)
end
%check input dimensions to be 1x3x1
if size(INPUT,2)~=3 || size(INPUT,3)~=1
error('Error: Input euler angle data vector is not Nx3')
end
%identify singularities
input_size = size(INPUT);
N = input_size(1);
% Identify singularities (second Euler angle out of range)
EA2 = INPUT(:,2); % (Nx1) 2nd Euler angle(s)
ZEROS = zeros(N,1); % (Nx1)
ONES = ones(N,1); % (Nx1)
if rot_1_in==rot_3_in % Type 2 rotation (1st and 3rd rotations about same axis)
if any(EA2>180*ONES) || any(EA2<ZEROS)
error('Second input Euler angle(s) outside 0 to 180 degree range')
elseif any(EA2>=178*ONES) || any(EA2<=2*ONES)
if ichk==1
errordlg('Warning: Second input Euler angle(s) near a singularity (0 or 180 degrees).')
end
end
else % Type 1 rotation (rotations about three distinct axes)
if any(abs(EA2)>=90*ONES)
error('Second input Euler angle(s) outside -90 to 90 degree range')
elseif any(abs(EA2)>88*ONES)
if ichk==1
errordlg('Warning: Second input Euler angle(s) near a singularity (-90 or 90 degrees).')
end
end
end
end
if o_type==4, %if output type is Euler angles, determine order of rotations
EULER_order_out=str2double(CONVERSION(1,length-2:length));
rot_1_out=floor(EULER_order_out/100); %first rotation
rot_2_out=floor((EULER_order_out-rot_1_out*100)/10); %second rotation
rot_3_out=(EULER_order_out-rot_1_out*100-rot_2_out*10); %third rotation
if rot_1_out<1 || rot_2_out<1 || rot_3_out<1 || rot_1_out>3 || rot_2_out>3 || rot_3_out>3,
error('Error: Invalid output Euler angle order type (conversion string).'); %check that all orders are between 1 and 3
elseif rot_1_out==rot_2_out || rot_2_out==rot_3_out,
error('Error: Invalid output Euler angle order type (conversion string).'); %check that no 2 consecutive orders are equal
end
end
if i_type==4 && o_type~=4, %if input are euler angles but not output
CC=['EA',num2str(EULER_order_in),'to',OUTPUT_TYPE]; %construct program conversion string for checking against user input
elseif o_type==4 && i_type~=4, %if output are euler angles but not input
CC=[INPUT_TYPE,'to','EA',num2str(EULER_order_out)]; %construct program conversion string for checking against user input
elseif i_type==4 && o_type==4, %if both input and output are euler angles
CC=['EA',num2str(EULER_order_in),'to','EA',num2str(EULER_order_out)]; %construct program conversion string
end
if strcmpi(CONVERSION,CC)==0; %check program conversion string against user input to confirm the conversion command
error('Error: Invalid entry for CONVERSION input string');
end
end
clear i_type o_type CC
%From the input, determine the quaternions that uniquely describe the
%rotation prescribed by that input. The output will be calculated in the
%second portion of the code from these quaternions.
switch INPUT_TYPE
case 'DCM'
if size(INPUT,1)~=3 || size(INPUT,2)~=3 %check DCM dimensions
error('Error: DCM matrix is not 3x3xN');
end
N=size(INPUT,3); %number of orientations
%Check if matrix is indeed orthogonal
perturbed=NaN(3,3,N);
DCM_flag=0;
for ii=1:N,
perturbed(:,:,ii)=abs(INPUT(:,:,ii)*INPUT(:,:,ii)'-eye(3)); %perturbed array shows difference between DCM*DCM' and I
if abs(det(INPUT(:,:,ii))-1)>tol, %if determinant is off by one more than tol, user is warned.
if ichk==1,
DCM_flag=1;
end
end
if abs(det(INPUT(:,:,ii))+1)<0.05, %if determinant is near -1, DCM is improper
error('Error: Input DCM(s) improper');
end
if DCM_flag==1,
errordlg('Warning: Input DCM matrix determinant(s) off from 1 by more than tolerance.')
end
end
DCM_flag=0;
if ichk==1,
for kk=1:N,
for ii=1:3,
for jj=1:3,
if perturbed(ii,jj,kk)>tol, %if any difference is larger than tol, user is warned.
DCM_flag=1;
end
end
end
end
if DCM_flag==1,
fprintf('Warning: Input DCM(s) matrix not orthogonal to precision tolerance.')
end
end
clear perturbed DCM_flag
Q=NaN(4,N);
for ii=1:N,
denom=NaN(4,1);
denom(1)=0.5*sqrt(1+INPUT(1,1,ii)-INPUT(2,2,ii)-INPUT(3,3,ii));
denom(2)=0.5*sqrt(1-INPUT(1,1,ii)+INPUT(2,2,ii)-INPUT(3,3,ii));
denom(3)=0.5*sqrt(1-INPUT(1,1,ii)-INPUT(2,2,ii)+INPUT(3,3,ii));
denom(4)=0.5*sqrt(1+INPUT(1,1,ii)+INPUT(2,2,ii)+INPUT(3,3,ii));
%determine which Q equations maximize denominator
switch find(denom==max(denom),1,'first') %determines max value of qtests to put in denominator
case 1
Q(1,ii)=denom(1);
Q(2,ii)=(INPUT(1,2,ii)+INPUT(2,1,ii))/(4*Q(1,ii));
Q(3,ii)=(INPUT(1,3,ii)+INPUT(3,1,ii))/(4*Q(1,ii));
Q(4,ii)=(INPUT(2,3,ii)-INPUT(3,2,ii))/(4*Q(1,ii));
case 2
Q(2,ii)=denom(2);
Q(1,ii)=(INPUT(1,2,ii)+INPUT(2,1,ii))/(4*Q(2,ii));
Q(3,ii)=(INPUT(2,3,ii)+INPUT(3,2,ii))/(4*Q(2,ii));
Q(4,ii)=(INPUT(3,1,ii)-INPUT(1,3,ii))/(4*Q(2,ii));
case 3
Q(3,ii)=denom(3);
Q(1,ii)=(INPUT(1,3,ii)+INPUT(3,1,ii))/(4*Q(3,ii));
Q(2,ii)=(INPUT(2,3,ii)+INPUT(3,2,ii))/(4*Q(3,ii));
Q(4,ii)=(INPUT(1,2,ii)-INPUT(2,1,ii))/(4*Q(3,ii));
case 4
Q(4,ii)=denom(4);
Q(1,ii)=(INPUT(2,3,ii)-INPUT(3,2,ii))/(4*Q(4,ii));
Q(2,ii)=(INPUT(3,1,ii)-INPUT(1,3,ii))/(4*Q(4,ii));
Q(3,ii)=(INPUT(1,2,ii)-INPUT(2,1,ii))/(4*Q(4,ii));
end
end
Q=Q';
clear denom
case 'EV' %Euler Vector Input Type
if size(INPUT,2)~=4 || size(INPUT,3)~=1 %check dimensions
error('Error: Input euler vector and rotation data matrix is not Nx4')
end
N=size(INPUT,1);
MU=INPUT(:,4)*pi/180; %assign mu name for clarity
if sqrt(INPUT(:,1).^2+INPUT(:,2).^2+INPUT(:,3).^2)-ones(N,1)>tol*ones(N,1), %check that input m's constitute unit vector
error('Input euler vector(s) components do not constitute a unit vector')
end
if MU<zeros(N,1) || MU>2*pi*ones(N,1), %check if rotation about euler vector is between 0 and 360
error('Input euler rotation angle(s) not between 0 and 360 degrees')
end
Q=[INPUT(:,1).*sin(MU/2),INPUT(:,2).*sin(MU/2),INPUT(:,3).*sin(MU/2),cos(MU/2)]; %quaternion
clear m1 m2 m3 MU
case 'EA'
psi=INPUT(:,1)*pi/180; theta=INPUT(:,2)*pi/180; phi=INPUT(:,3)*pi/180;
N=size(INPUT,1); %number of orientations
%Pre-calculate cosines and sines of the half-angles for conversion.
c1=cos(psi./2); c2=cos(theta./2); c3=cos(phi./2);
s1=sin(psi./2); s2=sin(theta./2); s3=sin(phi./2);
c13=cos((psi+phi)./2); s13=sin((psi+phi)./2);
c1_3=cos((psi-phi)./2); s1_3=sin((psi-phi)./2);
c3_1=cos((phi-psi)./2); s3_1=sin((phi-psi)./2);
if EULER_order_in==121,
Q=[c2.*s13,s2.*c1_3,s2.*s1_3,c2.*c13];
elseif EULER_order_in==232,
Q=[s2.*s1_3,c2.*s13,s2.*c1_3,c2.*c13];
elseif EULER_order_in==313;
Q=[s2.*c1_3,s2.*s1_3,c2.*s13,c2.*c13];
elseif EULER_order_in==131,
Q=[c2.*s13,s2.*s3_1,s2.*c3_1,c2.*c13];
elseif EULER_order_in==212,
Q=[s2.*c3_1,c2.*s13,s2.*s3_1,c2.*c13];
elseif EULER_order_in==323,
Q=[s2.*s3_1,s2.*c3_1,c2.*s13,c2.*c13];
elseif EULER_order_in==123,
Q=[s1.*c2.*c3+c1.*s2.*s3,c1.*s2.*c3-s1.*c2.*s3,c1.*c2.*s3+s1.*s2.*c3,c1.*c2.*c3-s1.*s2.*s3];
elseif EULER_order_in==231,
Q=[c1.*c2.*s3+s1.*s2.*c3,s1.*c2.*c3+c1.*s2.*s3,c1.*s2.*c3-s1.*c2.*s3,c1.*c2.*c3-s1.*s2.*s3];
elseif EULER_order_in==312,
Q=[c1.*s2.*c3-s1.*c2.*s3,c1.*c2.*s3+s1.*s2.*c3,s1.*c2.*c3+c1.*s2.*s3,c1.*c2.*c3-s1.*s2.*s3];
elseif EULER_order_in==132,
Q=[s1.*c2.*c3-c1.*s2.*s3,c1.*c2.*s3-s1.*s2.*c3,c1.*s2.*c3+s1.*c2.*s3,c1.*c2.*c3+s1.*s2.*s3];
elseif EULER_order_in==213,
Q=[c1.*s2.*c3+s1.*c2.*s3,s1.*c2.*c3-c1.*s2.*s3,c1.*c2.*s3-s1.*s2.*c3,c1.*c2.*c3+s1.*s2.*s3];
elseif EULER_order_in==321,
Q=[c1.*c2.*s3-s1.*s2.*c3,c1.*s2.*c3+s1.*c2.*s3,s1.*c2.*c3-c1.*s2.*s3,c1.*c2.*c3+s1.*s2.*s3];
else
error('Error: Invalid input Euler angle order type (conversion string)');
end
clear c1 s1 c2 s2 c3 s3 c13 s13 c1_3 s1_3 c3_1 s3_1 psi theta phi
case 'Q'
if size(INPUT,2)~=4 || size(INPUT,3)~=1
error('Error: Input quaternion matrix is not Nx4');
end
N=size(INPUT,1); %number of orientations
if ichk==1,
if abs(sqrt(INPUT(:,1).^2+INPUT(:,2).^2+INPUT(:,3).^2+INPUT(:,4).^2)-ones(N,1))>tol*ones(N,1)
errordlg('Warning: Input quaternion norm(s) deviate(s) from unity by more than tolerance')
end
end
Q=INPUT;
end
clear INPUT INPUT_TYPE EULER_order_in
%Normalize quaternions in case of deviation from unity. User has already
%been warned of deviation.
Qnorms=sqrt(sum(Q.*Q,2));
Q=[Q(:,1)./Qnorms,Q(:,2)./Qnorms,Q(:,3)./Qnorms,Q(:,4)./Qnorms];
switch OUTPUT_TYPE
case 'DCM'
Q=reshape(Q',1,4,N);
OUTPUT=[Q(1,1,:).^2-Q(1,2,:).^2-Q(1,3,:).^2+Q(1,4,:).^2,2*(Q(1,1,:).*Q(1,2,:)+Q(1,3,:).*Q(1,4,:)),2*(Q(1,1,:).*Q(1,3,:)-Q(1,2,:).*Q(1,4,:));
2*(Q(1,1,:).*Q(1,2,:)-Q(1,3,:).*Q(1,4,:)),-Q(1,1,:).^2+Q(1,2,:).^2-Q(1,3,:).^2+Q(1,4,:).^2,2*(Q(1,2,:).*Q(1,3,:)+Q(1,1,:).*Q(1,4,:));
2*(Q(1,1,:).*Q(1,3,:)+Q(1,2,:).*Q(1,4,:)),2*(Q(1,2,:).*Q(1,3,:)-Q(1,1,:).*Q(1,4,:)),-Q(1,1,:).^2-Q(1,2,:).^2+Q(1,3,:).^2+Q(1,4,:).^2];
case 'EV'
MU=2*atan2(sqrt(sum(Q(:,1:3).*Q(:,1:3),2)),Q(:,4));
if sin(MU/2)~=zeros(N,1),
OUTPUT=[Q(:,1)./sin(MU/2),Q(:,2)./sin(MU/2),Q(:,3)./sin(MU/2),MU*180/pi];
else
OUTPUT=NaN(N,4);
for ii=1:N,
if sin(MU(ii,1)/2)~=0,
OUTPUT(ii,1:4)=[Q(ii,1)/sin(MU(ii,1)/2),Q(ii,2)/sin(MU(ii,1)/2),Q(ii,3)/sin(MU(ii,1)/2),MU(ii,1)*180/pi];
else
OUTPUT(ii,1:4)=[1,0,0,MU(ii,1)*180/pi];
end
end
end
case 'Q'
OUTPUT=Q;
case 'EA'
if EULER_order_out==121,
psi=atan2((Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)),(Q(:,2).*Q(:,4)-Q(:,1).*Q(:,3)));
theta=acos(Q(:,4).^2+Q(:,1).^2-Q(:,2).^2-Q(:,3).^2);
phi=atan2((Q(:,1).*Q(:,2)-Q(:,3).*Q(:,4)),(Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)));
Euler_type=2;
elseif EULER_order_out==232;
psi=atan2((Q(:,1).*Q(:,4)+Q(:,2).*Q(:,3)),(Q(:,3).*Q(:,4)-Q(:,1).*Q(:,2)));
theta=acos(Q(:,4).^2-Q(:,1).^2+Q(:,2).^2-Q(:,3).^2);
phi=atan2((Q(:,2).*Q(:,3)-Q(:,1).*Q(:,4)),(Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)));
Euler_type=2;
elseif EULER_order_out==313;
psi=atan2((Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)),(Q(:,1).*Q(:,4)-Q(:,2).*Q(:,3)));
theta=acos(Q(:,4).^2-Q(:,1).^2-Q(:,2).^2+Q(:,3).^2);
phi=atan2((Q(:,1).*Q(:,3)-Q(:,2).*Q(:,4)),(Q(:,1).*Q(:,4)+Q(:,2).*Q(:,3)));
Euler_type=2;
elseif EULER_order_out==131;
psi=atan2((Q(:,1).*Q(:,3)-Q(:,2).*Q(:,4)),(Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)));
theta=acos(Q(:,4).^2+Q(:,1).^2-Q(:,2).^2-Q(:,3).^2);
phi=atan2((Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)),(Q(:,3).*Q(:,4)-Q(:,1).*Q(:,2)));
Euler_type=2;
elseif EULER_order_out==212;
psi=atan2((Q(:,1).*Q(:,2)-Q(:,3).*Q(:,4)),(Q(:,1).*Q(:,4)+Q(:,2).*Q(:,3)));
theta=acos(Q(:,4).^2-Q(:,1).^2+Q(:,2).^2-Q(:,3).^2);
phi=atan2((Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)),(Q(:,1).*Q(:,4)-Q(:,2).*Q(:,3)));
Euler_type=2;
elseif EULER_order_out==323;
psi=atan2((Q(:,2).*Q(:,3)-Q(:,1).*Q(:,4)),(Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)));
theta=acos(Q(:,4).^2-Q(:,1).^2-Q(:,2).^2+Q(:,3).^2);
phi=atan2((Q(:,1).*Q(:,4)+Q(:,2).*Q(:,3)),(Q(:,2).*Q(:,4)-Q(:,1).*Q(:,3)));
Euler_type=2;
elseif EULER_order_out==123;
psi=atan2(2.*(Q(:,1).*Q(:,4)-Q(:,2).*Q(:,3)),(Q(:,4).^2-Q(:,1).^2-Q(:,2).^2+Q(:,3).^2));
theta=asin(2.*(Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)));
phi=atan2(2.*(Q(:,3).*Q(:,4)-Q(:,1).*Q(:,2)),(Q(:,4).^2+Q(:,1).^2-Q(:,2).^2-Q(:,3).^2));
Euler_type=1;
elseif EULER_order_out==231;
psi=atan2(2.*(Q(:,2).*Q(:,4)-Q(:,1).*Q(:,3)),(Q(:,4).^2+Q(:,1).^2-Q(:,2).^2-Q(:,3).^2));
theta=asin(2.*(Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)));
phi=atan2(2.*(Q(:,1).*Q(:,4)-Q(:,3).*Q(:,2)),(Q(:,4).^2-Q(:,1).^2+Q(:,2).^2-Q(:,3).^2));
Euler_type=1;
elseif EULER_order_out==312;
psi=atan2(2.*(Q(:,3).*Q(:,4)-Q(:,1).*Q(:,2)),(Q(:,4).^2-Q(:,1).^2+Q(:,2).^2-Q(:,3).^2));
theta=asin(2.*(Q(:,1).*Q(:,4)+Q(:,2).*Q(:,3)));
phi=atan2(2.*(Q(:,2).*Q(:,4)-Q(:,3).*Q(:,1)),(Q(:,4).^2-Q(:,1).^2-Q(:,2).^2+Q(:,3).^2));
Euler_type=1;
elseif EULER_order_out==132;
psi=atan2(2.*(Q(:,1).*Q(:,4)+Q(:,2).*Q(:,3)),(Q(:,4).^2-Q(:,1).^2+Q(:,2).^2-Q(:,3).^2));
theta=asin(2.*(Q(:,3).*Q(:,4)-Q(:,1).*Q(:,2)));
phi=atan2(2.*(Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)),(Q(:,4).^2+Q(:,1).^2-Q(:,2).^2-Q(:,3).^2));
Euler_type=1;
elseif EULER_order_out==213;
psi=atan2(2.*(Q(:,1).*Q(:,3)+Q(:,2).*Q(:,4)),(Q(:,4).^2-Q(:,1).^2-Q(:,2).^2+Q(:,3).^2));
theta=asin(2.*(Q(:,1).*Q(:,4)-Q(:,2).*Q(:,3)));
phi=atan2(2.*(Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)),(Q(:,4).^2-Q(:,1).^2+Q(:,2).^2-Q(:,3).^2));
Euler_type=1;
elseif EULER_order_out==321;
psi=atan2(2.*(Q(:,1).*Q(:,2)+Q(:,3).*Q(:,4)),(Q(:,4).^2+Q(:,1).^2-Q(:,2).^2-Q(:,3).^2));
theta=asin(2.*(Q(:,2).*Q(:,4)-Q(:,1).*Q(:,3)));
phi=atan2(2.*(Q(:,1).*Q(:,4)+Q(:,3).*Q(:,2)),(Q(:,4).^2-Q(:,1).^2-Q(:,2).^2+Q(:,3).^2));
Euler_type=1;
else
error('Error: Invalid output Euler angle order type (conversion string).');
end
if(isreal([psi,theta,phi]))==0,
error('Error: Unreal Euler output. Input resides too close to singularity. Please choose different output type.')
end
OUTPUT=mod([psi,theta,phi]*180/pi,360); %deg
if Euler_type==1,
sing_chk=find(abs(theta)*180/pi>89.9);
sing_chk=sort(sing_chk(sing_chk>0));
if size(sing_chk,1)>=1,
error('Error: Input rotation #%s resides too close to Type 1 Euler singularity.\nType 1 Euler singularity occurs when second angle is -90 or 90 degrees.\nPlease choose different output type.',num2str(sing_chk(1,1)));
end
elseif Euler_type==2,
sing_chk=[find(abs(theta*180/pi)<0.1);find(abs(theta*180/pi-180)<0.1);find(abs(theta*180/pi-360))<0.1];
sing_chk=sort(sing_chk(sing_chk>0));
if size(sing_chk,1)>=1,
error('Error: Input rotation #%s resides too close to Type 2 Euler singularity.\nType 2 Euler singularity occurs when second angle is 0 or 180 degrees.\nPlease choose different output type.',num2str(sing_chk(1,1)));
end
end
end
| MATLAB |
3D | aafkegros/MicroscopyNodes | mkdocs_macros.py | .py | 984 | 29 | from pathlib import Path
def define_env(env):
@env.macro
def youtube(video_id, width=360, height=200):
return f'''
<div class="yt-lazy" data-id="{video_id}" style="width:{width}px; height:{height}px;">
<div class="yt-thumbnail" style="background-image: url('https://img.youtube.com/vi/{video_id}/hqdefault.jpg');">
<div class="yt-play-button"></div>
<div class="yt-overlay-text">
Click to load video from YouTube.<br />
By clicking, you agree to YouTube’s privacy policy.
</div>
</div>
</div>
'''
@env.macro
def svg(name, css_style='icon'):
# Adjust path relative to your mkdocs root directory
full_path = Path(env.project_dir) / "docs" / "html_blender_icons" / f"{name}.svg"
try:
with open(full_path, "r", encoding="utf-8") as f:
return f'<span class="{css_style}">{f.read()}</span>'
except Exception as e:
return f"<!-- ERROR loading SVG: {e} -->"
| Python |
3D | aafkegros/MicroscopyNodes | build.py | .py | 5,369 | 176 | import glob
import os
import subprocess
import sys
from dataclasses import dataclass
from typing import List, Union
# import bpy
import tomlkit
toml_path = "microscopynodes/blender_manifest.toml"
whl_path = "./microscopynodes/wheels"
blender_path ="/Users/oanegros/Documents/blenderBuilds/stable/blender-4.5.3-macos-arm64+lts.67807e1800cc/Blender/Blender.app/Contents/MacOS/Blender"
# permanent_whls = ["./microscopynodes/wheels/asciitree-0.3.4.dev1-py3-none-any.whl"]
@dataclass
class Platform:
pypi_suffix: str
metadata: str
# tags for blender metadata
# platforms = ["windows-x64", "macos-arm64", "linux-x64", "windows-arm64", "macos-x64"]
windows_x64 = Platform(pypi_suffix="win_amd64", metadata="windows-x64")
linux_x64 = Platform(pypi_suffix="manylinux2014_x86_64", metadata="linux-x64")
macos_arm = Platform(pypi_suffix="macosx_12_0_arm64", metadata="macos-arm64")
macos_intel = Platform(pypi_suffix="macosx_10_16_x86_64", metadata="macos-x64")
required_packages = [
# scikit-image + scipy is really big, but i cannot remove the fast marching cubes algorithm, or the fast find_objects
# "scipy==1.15.2",
"dask==2025.5.1",
"importlib-metadata==8.7.0", # this seemed to no longer be standard included since Blender 4.3? People had bugs with this but it's confusing
"tifffile==2025.6.11",
"imagecodecs==2025.3.30", # allows LZW compressed tif loading
"zarr==3.0.8",
"fsspec==2025.5.1",
'cmap==0.6.0',
's3fs==2025.5.1',
'pyyaml==6.0.2', # needed for preference yaml loading and writing
'zmesh==1.8.0', # for fast mesh generation from labeled data
]
# this is deprecated - for non buildable wheels, will remove in the future
nodeps_packages = [
]
build_platforms = [
windows_x64,
linux_x64,
macos_arm,
macos_intel,
]
def run_python(args: str):
python = os.path.realpath(sys.executable)
subprocess.run([python] + args.split(" "))
def remove_whls():
for whl_file in glob.glob(os.path.join(whl_path, "*.whl")):
# if whl_file not in permanent_whls:
os.remove(whl_file)
# exit()
def download_whls(
platforms: Union[Platform, List[Platform]],
required_packages: List[str] = required_packages,
python_version="3.11",
clean: bool = True,
):
if isinstance(platforms, Platform):
platforms = [platforms]
if clean:
remove_whls()
for platform in platforms:
print(required_packages, nodeps_packages, f"-m pip download {' '.join(required_packages)} --dest ./microscopynodes/wheels --only-binary=:all: --python-version={python_version} --platform={platform.pypi_suffix}")
run_python(
f"-m pip download {' '.join(required_packages)} --dest ./microscopynodes/wheels --only-binary=:all: --python-version={python_version} --platform={platform.pypi_suffix}"
)
# run_python(
# f"-m pip download {' '.join(nodeps_packages)} --dest ./microscopynodes/wheels --python-version={python_version} --platform={platform.pypi_suffix} --no-deps"
# )
def update_toml_whls(platforms):
# Define the path for wheel files
wheels_dir = "microscopynodes/wheels"
wheel_files = glob.glob(f"{wheels_dir}/*.whl")
wheel_files.sort()
# Packages to remove
packages_to_remove = {
"numpy"
}
# Filter out unwanted wheel files
to_remove = []
to_keep = []
for whl in wheel_files:
if any(pkg in whl for pkg in packages_to_remove):
to_remove.append(whl)
else:
to_keep.append(whl)
# Remove the unwanted wheel files from the filesystem
for whl in to_remove:
# if whl not in permanent_whls:
os.remove(whl)
# Load the TOML file
with open(toml_path, "r") as file:
manifest = tomlkit.parse(file.read())
# Update the wheels list with the remaining wheel files
manifest["wheels"] = [f"./wheels/{os.path.basename(whl)}" for whl in to_keep]
# Simplify platform handling
if not isinstance(platforms, list):
platforms = [platforms]
manifest["platforms"] = [p.metadata for p in platforms]
# Write the updated TOML file
with open(toml_path, "w") as file:
file.write(
tomlkit.dumps(manifest)
.replace('["', '[\n\t"')
.replace("\\\\", "/")
.replace('", "', '",\n\t"')
.replace('"]', '",\n]')
)
def clean_files(suffix: str = ".blend1") -> None:
pattern_to_remove = f"microscopynodes/**/*{suffix}"
for blend1_file in glob.glob(pattern_to_remove, recursive=True):
os.remove(blend1_file)
def build_extension(split: bool = True) -> None:
for suffix in [".blend1", ".MNSession"]:
clean_files(suffix=suffix)
if split:
subprocess.run(
f"{blender_path} --command extension build"
" --split-platforms --source-dir microscopynodes --output-dir .".split(" ")
)
else:
subprocess.run(
f"{blender_path} --command extension build "
"--source-dir microscopynodes --output-dir .".split(" ")
)
def build(platform) -> None:
download_whls(platform)
update_toml_whls(platform)
build_extension()
def main():
# for platform in build_platforms:
# build(platform)
build(build_platforms)
if __name__ == "__main__":
main()
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/parse_inputs.py | .py | 5,670 | 143 | import bpy
import numpy as np
from pathlib import Path
from .ui import preferences
from .handle_blender_structs import *
from .file_to_array import load_array, selected_array_option
from .ui.preferences import addon_preferences
def get_cache_dir():
if addon_preferences().cache_option == 'TEMPORARY':
path = tempfile.gettempdir()
if addon_preferences().cache_option == 'PATH':
path = addon_preferences().cache_path
if addon_preferences().cache_option == 'WITH_PROJECT':
path = bpy.path.abspath('//')
path = Path(path) / Path(bpy.context.scene.MiN_input_file).stem
path = path / str(bpy.context.scene.MiN_selected_array_option)
path.mkdir(parents=True, exist_ok=True)
return path
def parse_initial():
# all parameters initialized here are shared between threaded and blocking load functions
check_input()
axes_order = bpy.context.scene.MiN_axes_order
pixel_size = np.array([bpy.context.scene.MiN_xy_size,bpy.context.scene.MiN_xy_size,bpy.context.scene.MiN_z_size])
if not bpy.context.scene.MiN_pixel_sizes_are_rescaled:
pixel_size *= selected_array_option().scale()
cache_dir = get_cache_dir()
ch_dicts = parse_channellist(bpy.context.scene.MiN_channelList)
size_px = np.array([selected_array_option().shape()[axes_order.find(dim)] if dim in axes_order else 0 for dim in 'xyz'])
size_px = tuple([max(ax, 1) for ax in size_px])
if bpy.context.scene.MiN_reload is None:
bpy.context.scene.MiN_update_data = True
bpy.context.scene.MiN_update_settings = True
return ch_dicts, (axes_order, pixel_size, size_px), cache_dir
def parse_channellist(channellist):
# initializes ch_dicts, which holds data and metadata, such as user settings, per channel
ch_dicts = []
for channel in bpy.context.scene.MiN_channelList:
ch_dicts.append({k:getattr(channel,k) for k in channel.keys()}) # take over settings from UI - uses getattr to get enum names
for key in min_keys: # rename ui-keys to enum for which objects to load
if key.name.lower() in ch_dicts[-1]:
ch_dicts[-1][key] = ch_dicts[-1][key.name.lower()]
ch_dicts[-1]['identifier'] = f"ch_id{channel['ix']}" # reload-identity
ch_dicts[-1]['data'] = None
ch_dicts[-1]['collections'] = {}
ch_dicts[-1]['metadata'] = {}
ch_dicts[-1]['local_files'] = {}
ch_dicts[-1]['surf_resolution'] = int(addon_preferences(bpy.context).surf_resolution)
return ch_dicts
def parse_unit(string):
if string == "ANGSTROM":
return 1e-10
if string == "NANOMETER":
return 1e-9
if string == "MICROMETER":
return 1e-6
if string == "MILLIMETER":
return 1e-3
if string == "METER":
return 1
def parse_scale(size_px, pixel_size, objs):
scale = None
scale_factor = 1
world_scale = addon_preferences(bpy.context).import_scale
print(bpy.context.scene.MiN_unit, world_scale)
print('hey')
isotropic = np.array([1,1,pixel_size[-1]/pixel_size[0]])
if world_scale == "DEFAULT" or bpy.context.scene.MiN_unit == 'AU': # cm / px
scale = isotropic*0.01
if world_scale == "MOLECULAR_NODES" and bpy.context.scene.MiN_unit != 'AU': # cm / nm
physical_size = parse_unit(bpy.context.scene.MiN_unit) * pixel_size
scale = physical_size / 1e-7
if "_SCALE" in world_scale and bpy.context.scene.MiN_unit != 'AU': # m / unit
physical_size = parse_unit(bpy.context.scene.MiN_unit) * pixel_size
scale = physical_size / parse_unit(world_scale.removesuffix("_SCALE"))
if objs[min_keys.AXES] is not None:
old_size_px, old_scale = get_previous_scale(objs[min_keys.AXES], size_px)
if bpy.context.scene.MiN_update_data and not bpy.context.scene.MiN_update_settings:
scale = (np.array(old_size_px) / np.array(size_px)) * old_scale
scale_factor = (np.array(size_px) / np.array(old_size_px)) * (scale / old_scale)
return scale, scale_factor
def get_previous_scale(axes_obj, size_px):
try:
mod = get_min_gn(axes_obj)
nodes = mod.node_group.nodes
old_size_px = nodes['[Microscopy Nodes size_px]'].vector
old_scale = nodes['[Microscopy Nodes scale]'].vector
return old_size_px, old_scale
except KeyError as e:
print(e)
pass
def parse_loc(scale, size_px, container):
if not bpy.context.scene.MiN_update_settings:
try:
return container.location
except Exception as e:
pass
prefloc = addon_preferences(bpy.context).import_loc
if prefloc == "XY_CENTER":
return [-0.5,-0.5,0] * np.array(size_px) * scale
if prefloc == "XYZ_CENTER":
return [-0.5,-0.5,-0.5] * np.array(size_px) * scale
if prefloc == "ZERO":
return [0, 0, 0] * np.array(size_px) * scale
def check_input():
if bpy.context.scene.MiN_xy_size <= 0 or bpy.context.scene.MiN_z_size <= 0:
raise ValueError("cannot do zero-size pixels")
# TODO change this to a callback function on change channel name instead
ch_names = [ch["name"] for ch in bpy.context.scene.MiN_channelList]
if len(set(ch_names)) < len(ch_names):
raise ValueError("No duplicate channel names allowed")
return
def parse_reload(container_obj):
objs = {}
for key in min_keys:
objs[key] = None
if container_obj is not None:
for child in container_obj.children:
if get_min_gn(child) is not None and key.name.lower() in get_min_gn(child).name:
objs[key] = child
return objs
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/__init__.py | .py | 2,828 | 84 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Parts of this are based on or taken from Brady Johnston's MolecularNodes
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
from . import file_to_array
from . import ui
from . import min_nodes
from .min_nodes.shader_nodes import MIN_add_shader_node_menu, MIN_context_shader_node_menu
from .ui.preferences import addon_preferences
all_classes = (
ui.CLASSES +
file_to_array.CLASSES +
min_nodes.CLASSES
)
# print(all_classes)
def _test_register():
try:
register()
except Exception:
unregister()
register()
pass
def register():
for op in all_classes:
try:
bpy.utils.register_class(op)
except Exception as e:
print(op, e)
pass
bpy.types.Scene.MiN_array_options = bpy.props.CollectionProperty(type=file_to_array.ArrayOption)
bpy.types.Scene.MiN_channelList = bpy.props.CollectionProperty(type=ui.channel_list.ChannelDescriptor)
bpy.types.NODE_MT_add.append(MIN_add_shader_node_menu)
bpy.types.NODE_MT_context_menu.append(MIN_context_shader_node_menu)
try:
addon_preferences(bpy.context).channels[0].name
except:
try:
addon_preferences(bpy.context).n_default_channels = 6
except AttributeError:
pass
return
def unregister():
for op in all_classes:
try:
bpy.utils.unregister_class(op)
except Exception as e:
print(op, e)
pass
bpy.types.NODE_MT_add.remove(MIN_add_shader_node_menu)
bpy.types.NODE_MT_context_menu.remove(MIN_context_shader_node_menu)
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load.py | .py | 5,406 | 144 | import bpy
from pathlib import Path
import numpy as np
from .handle_blender_structs import *
from .handle_blender_structs import dependent_props
from .load_components import *
from .parse_inputs import *
from .file_to_array import load_array, arr_shape
from mathutils import Matrix
def load_threaded(params):
try:
scn = bpy.context.scene
if not scn.MiN_update_data:
return params
ch_dicts, (axes_order, pixel_size, size_px), cache_dir = params
log('Loading file')
load_array(ch_dicts) # unpacks into ch_dicts
axes_order = axes_order.replace('c', "") # channels are separated
for ch in ch_dicts:
if ch[min_keys.VOLUME] or ch[min_keys.SURFACE]:
ch["local_files"][min_keys.VOLUME] = VolumeIO().export_ch(ch, cache_dir, scn.MiN_remake, axes_order)
progress = 'Loading objects to Blender'
if any([ch['surface'] for ch in ch_dicts]):
progress = 'Meshing surfaces, ' + progress.lower()
if any([ch['labelmask'] for ch in ch_dicts]):
progress = 'Making labelmasks, ' + progress.lower()
log(progress)
except Exception as e: # hacky way to track exceptions across threaded process
params[0][0]['EXCEPTION'] = e
return params
def load_blocking(params):
# loads from the modal/threaded implementation
ch_dicts, (axes_order, pixel_size, size_px), cache_dir = params
prev_active_obj = bpy.context.active_object
scn = bpy.context.scene
# reads env variables
base_coll, cache_coll = min_base_colls(Path(scn.MiN_input_file).stem[:50], scn.MiN_reload)
if scn.MiN_overwrite_background_color:
set_background_color()
if scn.MiN_overwrite_render_settings:
set_render_settings()
# --- Prepare container ---
container = scn.MiN_reload
objs = parse_reload(container)
if container is None:
bpy.ops.object.empty_add(type="PLAIN_AXES")
container = bpy.context.view_layer.objects.active
container.name = Path(scn.MiN_input_file).stem[:50]
# -- export labelmask --
# label mask exporting is hard to move outside of blocking functions, as it uses the Blender abc export
for ch in ch_dicts:
if ch[min_keys.LABELMASK] and scn.MiN_update_data:
ch["local_files"][min_keys.LABELMASK] = LabelmaskIO().export_ch(ch, cache_dir, scn.MiN_remake, axes_order)
# -- axes, slice cube and scales --
scale, scale_factor = parse_scale(size_px, pixel_size, objs)
loc = parse_loc(scale, size_px, container)
axes_obj = load_axes(size_px, pixel_size, scale, scale_factor, axes_obj=objs[min_keys.AXES], container=container)
slice_cube = load_slice_cube(size_px, scale, scale_factor, container, slicecube=objs[min_keys.SLICECUBE])
for min_type in [min_keys.VOLUME, min_keys.SURFACE, min_keys.LABELMASK]:
if not any([ch[min_type] for ch in ch_dicts]) and objs[min_type] is None:
continue
data_io = DataIOFactory(min_type)
ch_obj = ChannelObjectFactory(min_type, objs[min_type])
for ch in ch_dicts:
if ch[min_type] and scn.MiN_update_data:
collection_activate(*cache_coll)
ch['collections'][min_type], ch['metadata'][min_type] = data_io.import_data(ch, scale)
collection_activate(*base_coll)
ch_obj.update_ch_data(ch)
if scn.MiN_update_settings:
ch_obj.update_ch_settings(ch)
ch_obj.set_parent_and_slicer(container, slice_cube, ch)
container.location = loc
# -- wrap up --
collection_deactivate_by_name('cache')
if scn.frame_current < scn.MiN_load_start_frame or scn.frame_current > scn.MiN_load_end_frame:
scn.frame_set(scn.MiN_load_start_frame)
try:
if prev_active_obj is not None:
prev_active_obj.select_set(True)
bpy.context.view_layer.objects.active = prev_active_obj
except:
pass
# after first load this should not be used again, to prevent overwriting user values
scn.MiN_reload = container
scn.MiN_overwrite_render_settings = False
scn.MiN_enable_ui = True
log('')
return
def set_background_color():
bgcol = (0.2,0.2,0.2, 1)
emitting = [ch.emission for ch in bpy.context.scene.MiN_channelList if (ch.surface or ch.volume) or ch.labelmask]
if all(emitting):
bgcol = (0, 0, 0, 1)
if all([(not emit) for emit in emitting]):
bgcol = (1, 1, 1, 1)
try:
bpy.context.scene.world.node_tree.nodes["Background"].inputs[0].default_value = bgcol
except:
pass
return
def set_render_settings():
bpy.context.scene.eevee.volumetric_tile_size = '1'
bpy.context.scene.cycles.preview_samples = 8
bpy.context.scene.cycles.samples = 64
bpy.context.scene.view_settings.view_transform = 'Standard'
bpy.context.scene.eevee.volumetric_end = 300
bpy.context.scene.eevee.taa_samples = 64
bpy.context.scene.render.engine = 'CYCLES'
bpy.context.scene.cycles.transparent_max_bounces = 40 # less slicing artefacts
bpy.context.scene.cycles.volume_bounces = 32
bpy.context.scene.cycles.volume_max_steps = 16 # less time to render
bpy.context.scene.cycles.use_denoising = False # this will introduce noise, but at least also not remove data-noise=
return
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/dependent_props.py | .py | 2,461 | 74 | import bpy
from bpy.props import (StringProperty, FloatProperty,
PointerProperty, IntProperty,
BoolProperty, EnumProperty
)
# update functions are defined locally
from ..file_to_array import change_path, change_channel_ax, change_array_option, get_array_options, selected_array_option
from ..ui.channel_list import set_channels
import functools
from operator import attrgetter
import tempfile
from pathlib import Path
bpy.types.Scene.MiN_input_file = StringProperty(
name="",
description="image path, either to tif file, zarr root folder or zarr URL",
update= change_path,
options = {'TEXTEDIT_UPDATE'},
default="",
maxlen=1024,
)
bpy.types.Scene.MiN_axes_order = StringProperty(
name="",
description="axes order (out of tzcyx)",
default="",
update=change_channel_ax,
maxlen=6)
bpy.types.Scene.MiN_selected_array_option = EnumProperty(
name="",
description="Select the imported array or transform",
items= get_array_options,
update= change_array_option
)
bpy.types.Scene.MiN_channel_nr = IntProperty(
name = "",
default = 0,
update = set_channels,
)
def poll_empty(self, object):
from ..load_components.load_generic import get_min_gn
if object.type != 'EMPTY':
return False
if any([get_min_gn(child) != None for child in object.children]):
return True
return False
bpy.types.Scene.MiN_reload = PointerProperty(
name = "",
description = "Reload data of Microscopy Nodes object.\nCan be used to replace deleted (temp) files, change resolution, or channel settings.\nUsage: Point to previously loaded microscopy data.",
type=bpy.types.Object,
poll=poll_empty,
)
def switch_pixel_size(self, context):
if bpy.context.scene.MiN_pixel_sizes_are_rescaled:
bpy.context.scene.MiN_xy_size *= selected_array_option().scale()[0]
bpy.context.scene.MiN_z_size *= selected_array_option().scale()[2]
else:
bpy.context.scene.MiN_xy_size /= selected_array_option().scale()[0]
bpy.context.scene.MiN_z_size /= selected_array_option().scale()[2]
return
bpy.types.Scene.MiN_pixel_sizes_are_rescaled = BoolProperty(
name= "Show rescaled pixel size.",
default = False,
update = switch_pixel_size
) | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/progress_handling.py | .py | 93 | 6 | import bpy
def log(string):
bpy.context.scene.MiN_progress_str = string
return None
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/__init__.py | .py | 147 | 5 | from .collection_handling import *
from .node_handling import *
from .progress_handling import *
from .props import *
from .array_handling import * | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/node_handling.py | .py | 5,525 | 160 | import bpy
from .. import min_nodes
import re
def get_nodes_last_output(group):
# fast function for tests and non-user changed trees
try:
output = group.nodes['Group Output']
except:
output = group.nodes['Material Output']
try:
last = output.inputs[0].links[0].from_node
out_input = output.inputs[0]
except:
last = output.inputs[1].links[0].from_node
out_input = output.inputs[1]
return last, output, out_input
def get_safe_nodes_last_output(group, make=False):
# safer function for getting last node for user changable trees
# still does not handle multiple output nodes
try:
return get_nodes_last_output(group)
except:
pass
xval = 0
output = None
for node in reversed(group.nodes):
if node.type == "GROUP_OUTPUT":
output = node
xval = max(xval, node.location[0])
if output is None and make == False:
return None, None, None
if output is None and make == True:
output = group.nodes.new('NodeGroupOutput')
output.location = (xval + 200, 0)
if len(output.inputs[0].links) == 0:
return None, output, None
try:
last = output.inputs[0].links[0].from_node
out_input = output.inputs[0]
except:
last = output.inputs[1].links[0].from_node
out_input = output.inputs[1]
return last, output, out_input
def get_safe_node_input(group, make=False):
innode = None
xval = 100
for node in reversed(group.nodes):
if node.type == "GROUP_INPUT":
innode = node
xval = min(xval, node.location[0])
if innode is None and make==True:
output = group.nodes.new('NodeGroupInput')
output.location = (xval - 300, 0)
return innode
def insert_last_node(group, node, move = True, safe=False):
if safe:
last, output, out_input = get_safe_nodes_last_output(group, make=True)
else:
last, output, out_input = get_nodes_last_output(group)
link = group.links.new
location = output.location
output.location = [location[0] + 300, location[1]]
node.location = [location[0] - 300, location[1]]
if last is not None:
link(last.outputs[0], node.inputs[0])
link(node.outputs[0], output.inputs[0])
def realize_instances(obj):
group = obj.modifiers['GeometryNodes'].node_group
realize = group.nodes.new('GeometryNodeRealizeInstances')
insert_last_node(group, realize)
def append(node_name, link = False):
node = bpy.data.node_groups.get(node_name)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if not node or link:
bpy.ops.wm.append(
directory = os.path.join(mn_data_file, 'NodeTree'),
filename = node_name,
link = link
)
return bpy.data.node_groups[node_name]
def get_min_gn(obj):
for mod in obj.modifiers:
if 'Microscopy Nodes' in mod.name:
return mod
return None
def get_readable_enum(enum_name, enum):
return bpy.context.scene.bl_rna.properties[enum_name].enum_items[enum].name
MIN_SOCKET_TYPES = {
'SWITCH' : "",
'VOXEL_SIZE' : "Voxel Size",
'THRESHOLD' : "Threshold"
}
# Specific for channel objects, so should move this code there at some point
def new_socket(node_group, ch, type, min_type, internal_append="", ix=None):
node_group.interface.new_socket(name="socket name not set", in_out="INPUT",socket_type=type)
socket = node_group.interface.items_tree[-1]
internalname = f"{ch['identifier']}_{min_type}_{internal_append}"
socket.default_attribute_name = f"[{internalname}]"
set_name_socket(socket, ch['name'])
if ix is not None:
node_group.interface.move(socket, ix)
return socket
def set_name_socket(socket, ch_name):
for min_type in MIN_SOCKET_TYPES:
if min_type in socket.default_attribute_name:
socket.name = " ".join([ch_name, MIN_SOCKET_TYPES[min_type]])
return
def get_socket(node_group, ch, min_type, return_ix=False, internal_append=""):
for ix, socket in enumerate(node_group.interface.items_tree):
if re.search(string=socket.default_attribute_name, pattern=f"{ch['identifier']}_{min_type}_{internal_append}+") is not None:
if return_ix:
return node_group.interface.items_tree[ix], ix
return node_group.interface.items_tree[ix]
if return_ix:
return None, None
return None
def insert_slicing(group, slice_obj):
nodes = group.nodes
links = group.links
lastnode, outnode, output_input = get_nodes_last_output(group)
texcoord = nodes.new('ShaderNodeTexCoord')
texcoord.object = slice_obj
texcoord.width = 200
texcoord.location = (outnode.location[0], outnode.location[1]+100)
slicecube = nodes.new('ShaderNodeGroup')
slicecube.node_tree = min_nodes.slice_cube_node_group()
slicecube.name = "Slice Cube"
slicecube.width = 250
slicecube.location = (outnode.location[0]+ 270, outnode.location[1])
links.new(texcoord.outputs.get('Object'),slicecube.inputs.get('Slicing Object'))
slicecube.inputs[0].show_expanded = True
if len(lastnode.inputs) > 0:
lastnode.inputs[0].show_expanded = True
links.new(lastnode.outputs[0], slicecube.inputs.get("Shader"))
links.new(slicecube.outputs.get("Shader"), output_input)
outnode.location = (outnode.location[0]+550, outnode.location[1])
return
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/array_handling.py | .py | 823 | 23 | import numpy as np
import dask.array as da
def take_index(imgdata, indices, dim, axes_order):
if dim in axes_order:
return da.take(imgdata, indices=indices, axis=axes_order.find(dim))
return imgdata
def len_axis(dim, axes_order, shape):
if dim in axes_order:
return shape[axes_order.find(dim)]
return 1
def expand_to_xyz(arr, axes_order):
# should only be called after computing dask, with no more t/c in the axes order
# handles 1D, 2D, and ordering of data
new_axes_order = axes_order
for dim in 'xyz':
if dim not in axes_order:
arr = np.expand_dims(arr,axis=0)
new_axes_order = dim + new_axes_order
return np.moveaxis(arr, [new_axes_order.find('x'),new_axes_order.find('y'),new_axes_order.find('z')],[0,1,2]).copy()
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/props.py | .py | 4,026 | 134 | import bpy
from bpy.props import (StringProperty, FloatProperty,
PointerProperty, IntProperty,
BoolProperty, EnumProperty
)
import platform
from enum import Enum
import tempfile
class min_keys(Enum):
NONE = 0
AXES = 1
VOLUME = 2
SURFACE = 3
LABELMASK = 4
SLICECUBE = 5
# # -- props --
## Names start with MiN for Microscopy Nodes (MN = Molecular Nodes)
bpy.types.Scene.MiN_remake = bpy.props.BoolProperty(
name = "MiN_remake",
description = "Force remaking vdb files",
default = False
)
bpy.types.Scene.MiN_load_start_frame = bpy.props.IntProperty(
name = "",
description = "First timeframe to be loaded",
default = 0,
min=0,
soft_max=10000,
)
bpy.types.Scene.MiN_load_end_frame = bpy.props.IntProperty(
name = "",
description = "Last timeframe to be loaded.",
default = 100,
soft_max= 10000,
min=0,
)
bpy.types.Scene.MiN_overwrite_background_color = bpy.props.BoolProperty(
name = "Overwrite background color",
description = "Sets background to white if any non-emissive channels are loaded - sets to black if only emissive channels are loaded",
default = True
)
bpy.types.Scene.MiN_overwrite_render_settings = bpy.props.BoolProperty(
name = "Overwrite render settings",
description = "Sets render settings to Microscopy Nodes defaults, to ensure relatively responsive large volume rendering.",
default = True
)
bpy.types.Scene.MiN_xy_size = FloatProperty(
name="",
description="xy physical pixel size in micrometer (only 2 digits may show up, but it is accurate to 6 digits)",
default=1.0)
bpy.types.Scene.MiN_z_size = FloatProperty(
name="",
description="z physical pixel size in micrometer (only 2 digits may show up, but it is accurate to 6 digits)",
default=1.0)
bpy.types.Scene.MiN_unit = EnumProperty(
name = '',
items=[
("ANGSTROM", "Å","Ångström, 0.1 nanometer" ,"", 0),
("NANOMETER", "nm","Nanometer" ,"", 1),
("MICROMETER", "µm","Micrometer" ,"", 2),
("MILLIMETER", "mm","Millimeter" ,"", 3),
("METER", "m","Meter" ,"", 4),
("AU", "a.u.","Arbitrary units, used to calculate an isotropic pixel size in Z." ,"", 5),
],
description= "Unit of pixel sizes",
default="AU",
)
bpy.types.Scene.MiN_ch_names = StringProperty( # | separated list of channel names from file
name = "",
)
# necessary to make uilist work
bpy.types.Scene.MiN_ch_index = IntProperty(
name = "",
)
bpy.types.Scene.MiN_enable_ui = BoolProperty(
name = "",
default = False,
)
bpy.types.Scene.MiN_load_finished = BoolProperty(
name = "",
default = False,
)
bpy.types.Scene.MiN_update_data = BoolProperty(
name = "",
description = "Reload the data from local files if they exist, or make new local files",
default = True,
)
bpy.types.Scene.MiN_update_settings = BoolProperty(
name = "",
description = "Update microscopy nodes channel settings, reapplies import transforms, so will move your data.",
default = True,
)
bpy.types.Scene.MiN_chunk = BoolProperty(
name = "Chunking",
description = 'Loads volumes in chunks of axis < 2048 px if checked.\nUnchunked large volumes WILL crash MacOS-ARM Blender outside of Cycles.\nChunked volumes can cause Cycles rendering artefacts.\nChunking may be slightly more RAM/network-efficient.',
default = False,
)
bpy.types.Scene.MiN_progress_str = bpy.props.StringProperty(
name = "",
description = "current process in load",
default="",
)
bpy.types.Scene.MiN_yaml_preferences = StringProperty(
description = 'File path to a .yaml file that overrides the Microscopy Nodes preferences - use for bpy usage',
default = "",
subtype = 'FILE_PATH',
)
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/handle_blender_structs/collection_handling.py | .py | 2,361 | 63 | import bpy
def get_collection(name, supercollections=[], duplicate=False, under_active_coll=False):
# duplicate is not duplicated-name-safe, so intention is to have types of names with/without duplication (programmer's choice)
coll = bpy.context.scene.collection
lcoll = bpy.context.view_layer.layer_collection
for ix, scoll in enumerate(supercollections):
coll = coll.children.get(scoll)
lcoll = lcoll.children[scoll]
if under_active_coll:
coll = bpy.context.collection
lcoll = bpy.context.view_layer.active_layer_collection
newcoll = coll.children.get(name)
if duplicate or newcoll is None:
newcoll = bpy.data.collections.new(name)
coll.children.link(newcoll)
name = newcoll.name
lcoll = lcoll.children[name]
return newcoll, lcoll
def collection_by_name(name, supercollections=[], duplicate=False):
coll, lcoll = get_collection(name, supercollections, duplicate, under_active_coll=False)
collection_activate(coll, lcoll)
return coll, lcoll
def get_current_collection():
return bpy.context.collection, bpy.context.view_layer.active_layer_collection
def make_subcollection(name, duplicate=False):
coll, lcoll = get_collection(name, supercollections=[], duplicate=duplicate, under_active_coll=True)
collection_activate(coll, lcoll)
return coll, lcoll
def collection_deactivate_by_name(name, supercollections=[]):
coll, lcoll = get_collection(name, supercollections, False)
lcoll.exclude = True
coll.hide_render = True
lcoll.hide_viewport = True
return coll, lcoll
def collection_activate(coll, lcoll):
bpy.context.view_layer.active_layer_collection = lcoll
lcoll.exclude = False
coll.hide_render = False
lcoll.hide_viewport = False
def clear_collection(coll):
if coll is not None:
[bpy.data.objects.remove(obj) for obj in coll.objects]
bpy.data.collections.remove(coll)
def min_base_colls(fname, min_reload):
# make or get collections
base_coll = collection_by_name('Microscopy Nodes', supercollections=[])
collection_activate(*base_coll)
collection_by_name('cache',supercollections=[])
cache_coll = collection_by_name(fname, supercollections=['cache'], duplicate=(min_reload is None))
collection_activate(*base_coll)
return base_coll, cache_coll
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/file_to_array/tif.py | .py | 1,795 | 47 | from .arrayloading import ArrayLoader
from .arrayoptions import add_array_option
import tifffile
class TifLoader(ArrayLoader):
suffixes = ['.tif', '.TIF', '.tiff', '.TIFF']
def set_file_globals(self, input_file):
with tifffile.TiffFile(input_file) as ifstif:
self._set_axes_order(ifstif.series[0].axes.lower().replace('s', 'c').replace('q','z'))
try: # try for non imagej tif
if 'unit' in dict(ifstif.imagej_metadata):
self._set_unit(dict(ifstif.imagej_metadata)['unit'])
except TypeError as e:
self._set_unit(None)
return
def fill_array_options(self, input_file):
# uses add_array_option to fill out a list of all native arrays
xy_size = self._xy_size(input_file)
z_size = self._z_size(input_file)
shape = tifffile.TiffFile(input_file).series[0].shape
add_array_option(xy_size=xy_size, z_size=z_size, shape=shape)
return
def load_array(self, input_file, array_option):
# return tifffile.imread(input_file, aszarr=True) # this can be tried in the future
return tifffile.imread(input_file, aszarr=True)
with tifffile.TiffFile(input_file) as ifstif:
return ifstif.asarray(out='memmap')
def _xy_size(self, input_file):
try:
return tifffile.TiffFile(input_file).pages[0].tags['XResolution'].value[1]/tifffile.TiffFile(input_file).pages[0].tags['XResolution'].value[0]
except Exception as e:
print(e)
return 1.0
def _z_size(self, input_file):
try:
return dict(tifffile.TiffFile(input_file).imagej_metadata)['spacing']
except Exception as e:
# print(e)
return 1.0
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/file_to_array/__init__.py | .py | 2,784 | 72 | from .tif import TifLoader
from .zarr import ZarrLoader
from .arrayoptions import ArrayOption, get_array_options, selected_array_option
from ..handle_blender_structs.progress_handling import log
import bpy
CLASSES = [ArrayOption]
LOADERS = [TifLoader, ZarrLoader]
def get_loader():
for Loader in LOADERS:
loader = Loader()
if loader.check_path():
return loader
return None
def change_path(self, context):
bpy.context.scene.MiN_channel_nr = 0
bpy.context.scene.MiN_enable_ui = False
context.scene.property_unset("MiN_xy_size")
context.scene.property_unset("MiN_z_size")
context.scene.property_unset("MiN_axes_order")
context.scene.property_unset("MiN_load_start_frame")
context.scene.property_unset("MiN_load_end_frame")
context.scene.property_unset("MiN_selected_array_option")
context.scene.property_unset("MiN_ch_names")
context.scene.MiN_array_options.clear()
log("")
context.scene.property_unset("MiN_reload")
if get_loader() is not None:
try:
get_loader().change_path(context)
except Exception as e:
print(e)
log(f"Error loading file: {e}")
return
bpy.context.scene.MiN_enable_ui = True
return
def change_array_option(self, context):
if context.scene.MiN_channel_nr != selected_array_option().len_axis('c'):
context.scene.MiN_channel_nr = selected_array_option().len_axis('c')
if bpy.context.scene.MiN_load_end_frame > selected_array_option().len_axis('t')-1:
bpy.context.scene.MiN_load_end_frame = selected_array_option().len_axis('t')-1
level = selected_array_option()
context.scene.MiN_xy_size = level['xy_size']
context.scene.MiN_z_size = level['z_size']
if bpy.context.scene.MiN_pixel_sizes_are_rescaled:
bpy.context.scene.MiN_xy_size *= level.scale()[0]
bpy.context.scene.MiN_z_size *= level.scale()[2]
# if level.ch_names != "":
# for ix, ch in enumerate(context.scene.MiN_channelList):
# ch['name'] = channels[ix]
# channels = level.ch_names.split("|")
# if context.scene.MiN_channel_nr != len(channels) or context.scene.MiN_axes_order != level['axes_order']: # this updates n channels and resets names
# context.scene.MiN_axes_order = level['axes_order']
# context.scene.MiN_channel_nr = len(channels)
# for ix, ch in enumerate(context.scene.MiN_channelList):
# if channels[ix] != "":
# ch['name'] = channels[ix]
return
def load_array(ch_dicts):
get_loader().unpack_array(ch_dicts)
def change_channel_ax(self, context):
get_loader().reset_options(context.scene.MiN_input_file)
def arr_shape():
selected_array_option().shape() | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/file_to_array/arrayloading.py | .py | 8,615 | 199 | import bpy
from pathlib import Path
import numpy as np
import dask.array as da
from .arrayoptions import copy_array_option, selected_array_option
from ..handle_blender_structs import len_axis
MAX_BLENDER_SIZE_GIB = 4
class ArrayLoader():
# the suffixes of the file path
suffix = [None]
# callback functions
def check_path(self):
return Path(bpy.context.scene.MiN_input_file).suffix in self.suffixes
def change_path(self, context):
# only called if check_path == True
self.set_file_globals(bpy.context.scene.MiN_input_file)
self.reset_options(bpy.context.scene.MiN_input_file)
return
def reset_options(self, context):
bpy.context.scene.MiN_array_options.clear()
self.fill_array_options(bpy.context.scene.MiN_input_file)
self._add_generated_scales() # workaround for 4 GiB limit
self._set_ui()
self._write_ch_names_to_channels()
bpy.context.scene.MiN_selected_array_option = str(len(bpy.context.scene.MiN_array_options) - 1)
return
# -- abstract methods to implement in subclass --
def set_file_globals(self, input_file):
# can set the following:
self._set_axes_order("") # OBLIGATORY - takes a string of tcxyz in any order or subset of these
self._set_unit("") # OPTIONAL - tries to parse unit string, defaults are "ANGSTROM", "NANOMETER", "MICROMETER", "MILLIMETER", "METER"
self._set_ch_names([]) # OPTIONAL - sets a list of channel names from file
return
def fill_array_options(self, input_file):
# uses add_array_option() to fill out a list of all native arrays
return
def load_array(self, input_file, array_option):
# gets the array data from the selected option
return
# -- default methods --
def _set_unit(self, unit_str):
try:
bpy.context.scene.MiN_unit = parse_unit(unit_str)
except Exception as e:
# print(f'did not parse unit ({unit_str})', e)
bpy.context.scene.MiN_unit = "AU"
def _set_axes_order(self, axes_order):
try:
bpy.context.scene.MiN_axes_order = axes_order
except:
print('did not parse axis order')
bpy.context.scene.property_unset("MiN_axes_order")
def _set_ch_names(self, lst):
try:
bpy.context.scene.MiN_ch_names = "|".join([str(name) for name in lst])
except:
bpy.context.scene.property_unset('MiN_ch_names')
def _write_ch_names_to_channels(self):
# setting of channel list can only be done once n channels is known - which is inferred from array
if bpy.context.scene.MiN_ch_names == "":
return
ch_names = bpy.context.scene.MiN_ch_names.split("|")
for ix, ch in enumerate(bpy.context.scene.MiN_channelList):
ch['name'] = ch_names[ix % len(ch_names)]
return
def shape(self):
return selected_array_scale().shape()
def _add_generated_scales(self):
# this is a workaround for blender/blender#136263 - scales over 4 GiB give issues in Eevee and Viewport
# takes the last named scale and generates smaller scales - these will be downscaled by Microscopy Nodes
last_option = bpy.context.scene.MiN_array_options[-1] # guaranteed to be last in zarr/tif
if last_option.size_gibibytes(exclude_dims='ct') > MAX_BLENDER_SIZE_GIB and last_option.len_axis('c') > 1:
scale_option = copy_array_option(copy_from = last_option)
while scale_option.size_gibibytes(exclude_dims='ct') > MAX_BLENDER_SIZE_GIB:
scale_option.resize((2,2,1))
if scale_option.size_gibibytes(exclude_dims='ct') > MAX_BLENDER_SIZE_GIB:
scale_option.resize((1,1,2))
if last_option.size_gibibytes(exclude_dims='t') > MAX_BLENDER_SIZE_GIB:
scale_option = copy_array_option(copy_from = last_option)
while scale_option.size_gibibytes(exclude_dims='t') > MAX_BLENDER_SIZE_GIB:
scale_option.resize((2,2,1))
if scale_option.size_gibibytes(exclude_dims='t') > MAX_BLENDER_SIZE_GIB:
scale_option.resize((1,1,2))
return
def _set_ui(self):
for option in bpy.context.scene.MiN_array_options:
option.ui_text = f"{option.shape()}, up to {option.human_size()}"
if len(option['path']) > 0:
option.ui_text = f"{option['path']}: {option.ui_text}"
if option.is_rescaled:
if option.scale()[2] == 1:
option.ui_text = f"{option.ui_text}, downscaled in XY"
else:
option.ui_text = f"{option.ui_text}, downscaled in XYZ"
if option.is_rescaled:
option.description = "Downscaled volume. "
else:
option.description = "Native volume. "
if option.size_gibibytes(exclude_dims='t') < MAX_BLENDER_SIZE_GIB:
option.icon = 'VOLUME_DATA'
option.description += "Full array can easily fit into Blender"
elif option.size_gibibytes(exclude_dims='ct') < MAX_BLENDER_SIZE_GIB:
option.icon = 'EVENT_ONEKEY'
option.description += "One volume channel possible at this scale in Eevee and Viewport (limited to 4 GiB per timepoint)"
else:
option.icon = 'WARNING_LARGE'
option.description += "This scale will only work in Cycles render mode - may cause freezing in other modes."
return
def unpack_array(self, ch_dicts):
# this makes the array a dictionary of single channels, as Mi Nodes has a relatively single-channel data model
# dask array makes sure lazy actions actually get performed lazily
axes_order = bpy.context.scene.MiN_axes_order
chunks = ['auto' if dim in 'xyz' else 1 for dim in axes_order] # time and channels are always loadable as separate chunks as they go to separate vdbs
imgdata = da.from_zarr(self.load_array(bpy.context.scene.MiN_input_file, selected_array_option()), chunks=chunks)
# imgdata = da.from_array(self.load_array(bpy.context.scene.MiN_input_file, selected_array_option()), chunks=chunks)
if len(axes_order) != len(imgdata.shape):
raise ValueError("axes_order length does not match data shape: " + str(imgdata.shape))
if selected_array_option().is_rescaled:
imgdata = map_resize(imgdata)
# imgdata = imgdata.compute_chunk_sizes()
ix = 0
for ix, ch in enumerate(ch_dicts):
if ch['data'] is None:
ch['data'] = da.take(imgdata, indices=ix, axis=axes_order.find('c')) if 'c' in axes_order else imgdata
if np.issubdtype(ch['data'].dtype,np.floating):
ch['max_val'] = np.max(ch['data'])
if ix >= selected_array_option().len_axis('c'):
break
return
def parse_unit(unit_str):
if unit_str in ['A', 'Å', '\\u00C5','ANGSTROM', 'ÅNGSTROM','ÅNGSTRÖM', 'Ångstrom','angstrom','ångström','ångstrom']:
return "ANGSTROM"
elif unit_str in ['nm', 'nanometer', 'NM', 'NANOMETER']:
return "NANOMETER"
elif unit_str in ['\\u00B5m', 'micron', 'micrometer', 'microns', 'um', 'µm', 'MICROMETER']:
return "MICROMETER"
elif unit_str in ['mm', 'millimeter', 'MM', 'MILLIMETER']:
return "MILLIMETER"
elif unit_str in ['m', 'meter', 'M', 'METER']:
return "METER"
return "AU"
def map_resize(dask_arr):
target_shape = np.array(selected_array_option().shape())
scale = np.array(dask_arr.shape) / target_shape
# Compute the target chunks per block
target_chunks = tuple(
tuple(int(np.floor(c / s)) for c, s in zip(block_chunk, scale))
for block_chunk in dask_arr.chunks
)
# Flatten target_chunks (map_blocks expects tuple of ints per dim)
target_chunks_flat = tuple(max(c) for c in target_chunks)
def resize_block(block):
in_shape = np.array(block.shape)
out_shape = np.floor(in_shape / scale).astype(int)
coords = [np.floor(np.arange(o) * s).astype(int)
for o, s in zip(out_shape, in_shape / out_shape)]
grid = np.ix_(*coords)
return block[grid]
return dask_arr.map_blocks(resize_block,
dtype=dask_arr.dtype,
chunks=target_chunks_flat)
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/file_to_array/arrayoptions.py | .py | 4,222 | 130 | import bpy
import numpy as np
class ArrayOption(bpy.types.PropertyGroup):
identifier: bpy.props.IntProperty()
# NOTE: rescaled xy_size and z_size by MiN rescaling is done elsewhere.
xy_size: bpy.props.FloatProperty()
z_size: bpy.props.FloatProperty()
# for generated scales
is_rescaled: bpy.props.FloatProperty()
# UI
icon: bpy.props.StringProperty()
ui_text: bpy.props.StringProperty()
description: bpy.props.StringProperty()
# zarr
path: bpy.props.StringProperty() # internal path
store: bpy.props.StringProperty() # zarr store
# |-separated lists
shape_str : bpy.props.StringProperty() # has getters and setters to map to tuple
scale_str : bpy.props.StringProperty() # has getters and setters to map to tuple
def len_axis(self, dim='c'):
if dim not in bpy.context.scene.MiN_axes_order:
return 1
return self.shape()[bpy.context.scene.MiN_axes_order.find(dim)]
def shape(self):
return [int(dim) for dim in self.shape_str.split("|")]
def set_shape(self, shape):
self.shape_str = "|".join([str(dim) for dim in shape])
def scale(self):
return [int(dim) for dim in self.scale_str.split("|")]
def set_scale(self, scale):
self.scale_str = "|".join([str(dim) for dim in scale])
def resize(self, scaling_vector):
# resizes the description of the scale only - resizing of array is called in unpack_array
self.is_rescaled = True
newshape = []
for dim, axislen in zip(bpy.context.scene.MiN_axes_order, self.shape()):
if dim in 'xyz':
newshape.append(int(axislen // scaling_vector['xyz'.find(dim)]))
else:
newshape.append(axislen)
self.set_scale(np.array(self.scale()) * scaling_vector)
self.set_shape(newshape)
def size_bytes(self, exclude_dims=''):
estimated_max_size = 1
for dim in self.shape():
estimated_max_size *= dim
for dim in exclude_dims:
estimated_max_size /= self.len_axis(dim)
return estimated_max_size *4 # vdb's are 32 bit floats == 4 byte per voxel
def size_gibibytes(self, exclude_dims=''):
return self.size_bytes(exclude_dims) / 2**30
def human_size(self, exclude_dims=''):
return _human_size(self.size_bytes(exclude_dims))
def _human_size(bytes, units=[' bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']):
return f"{bytes:.2f} {units[0]}" if bytes < 1024 else _human_size(bytes / 1024, units[1:])
def selected_array_option():
try:
return bpy.context.scene.MiN_array_options[int(bpy.context.scene.MiN_selected_array_option)]
except IndexError:
# retunadd_array_option(shape=(0,0,0))
return None
def get_array_options(scene, context):
# callback for enum
if len(context.scene.MiN_array_options) == 0:
return [('0','','','',0)]
items = []
for ix, option in enumerate(context.scene.MiN_array_options):
items.append((
str(ix),
option.ui_text,
option.description,
option.icon,
ix
))
return items
def copy_array_option(copy_from):
level = add_array_option()
if 'shape' in copy_from: # shape is multivalue saved into a string
level.set_shape(copy_from['shape'])
copy_from.pop('shape')
for key in copy_from.keys():
try:
setattr(level, key, getattr(copy_from, key))
except AttributeError:
setattr(level, key, copy_from[key])
level.identifier = len(bpy.context.scene.MiN_array_options) - 1
return level
def add_array_option(xy_size=1.0, z_size=1.0, shape=(1,1,1), copy_from=None, path="", store=""):
level = bpy.context.scene.MiN_array_options.add()
# make sure keys exist
level.identifier = len(bpy.context.scene.MiN_array_options) - 1
level.xy_size = xy_size
level.z_size = z_size
level.set_shape(shape)
level.path = path
level.store = store
level.is_rescaled = False
level.icon = ""
level.ui_text = ""
level.description = ""
level.scale_str = "1|1|1"
return level
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/file_to_array/zarr.py | .py | 4,641 | 124 | from .arrayloading import ArrayLoader
from ..handle_blender_structs.progress_handling import log
import numpy as np
import zarr
import json
import os
import bpy
from pathlib import Path
from urllib.parse import urljoin
from .arrayoptions import copy_array_option
import s3fs
OME_ZARR_V_0_4_KWARGS = dict(dimension_separator="/", normalize_keys=False)
OME_ZARR_V_0_1_KWARGS = dict(dimension_separator=".")
class ZarrLoader(ArrayLoader):
suffixes = ['.zarr']
def check_path(self):
# .zarr can also not be the suffix
super().check_path()
return self.suffixes[0] in str(bpy.context.scene.MiN_input_file)
def set_file_globals(self, input_file):
try:
file_globals, _ = self.parse_zattrs(input_file)
except KeyError as e:
print(f"key error: {e}")
log(f"Could not parse .zattrs")
self._set_axes_order(file_globals['axes_order'])
if 'unit' in file_globals:
self._set_unit(file_globals['unit'])
self._set_ch_names(file_globals['ch_names'])
return
def fill_array_options(self, input_file):
try:
_, file_array_options = self.parse_zattrs(input_file)
except KeyError as e:
log(f"Could not parse .zattrs, see print log for detail")
for file_option in file_array_options:
copy_array_option(file_option)
def load_array(self, input_file, array_option):
return self.open_zarr(array_option.store)[array_option.path]
def open_zarr(self, uri):
if uri.startswith("file:"):
# Primarily this is to deal with spaces in Windows paths (encoded as %20).
uri = os.fsdecode(unquote_to_bytes(uri))
uri = str(uri)
if uri.startswith("s3://"):
store = s3fs.S3Map(root=uri, s3=s3fs.S3FileSystem(anon=True), check=False)
else:
store=uri
# store = FSStore(uri, mode="r")
return zarr.open_group(store, mode='r')
def parse_zattrs(self, uri):
group = self.open_zarr(uri)
try:
multiscale_spec = group.attrs['multiscales'][0]
except:
multiscale_spec = group.attrs['ome']['multiscales'][0]
file_globals = {}
array_options = []
file_globals['ch_names'] = [c.get('label') for c in group.attrs.get('omero', {}).get('channels', [])]
file_globals['axes_order'] = _get_axes_order_from_spec(multiscale_spec)
# print(file_globa)
axes_order = file_globals['axes_order']
datasets = multiscale_spec["datasets"]
try:
file_globals['unit'] = next(iter([axis['unit'] for axis in multiscale_spec["axes"] if axis['type'] == 'space']), None)
except:
pass
for scale in datasets: # OME-Zarr spec requires datasets ordered from high to low resolution
array_options.append({})
level = array_options[-1]
level['store'] = uri
level['path'] = scale['path']
if "coordinateTransformations" in scale:
scaletransform = [transform for transform in scale['coordinateTransformations'] if transform['type'] == 'scale'][0]
level['xy_size'] = scaletransform['scale'][axes_order.find('x')]
if 'z' in axes_order:
level['z_size'] = scaletransform['scale'][axes_order.find('z')]
zarray = zarr.open_array(store=group.store, path=scale["path"])
level['shape'] = zarray.shape
if np.issubdtype(zarray.dtype,np.floating):
log("Floating point arrays cannot be loaded lazily, will use a lot of RAM")
return file_globals, array_options
def _get_axes_order_from_spec(validated_ome_spec):
if "axes" in validated_ome_spec:
ome_axes = validated_ome_spec["axes"]
if "name" in ome_axes[0]:
# v0.4: spec["axes"] requires name, recommends type and unit; like:
# [
# {'name': 'c', 'type': 'channel'},
# {'name': 'y', 'type': 'space', 'unit': 'nanometer'},
# {'name': 'x', 'type': 'space', 'unit': 'nanometer'}
# ]
axes_order = "".join([d["name"] for d in ome_axes])
else:
# v0.3: ['t', 'c', 'y', 'x']
axes_order = "".join(ome_axes)
else:
# v0.1 and v0.2 did not allow variable axes
axes_order = "tczyx"
return axes_order
def append_uri(uri, append):
if Path(uri).exists():
return Path(uri) / append
if uri[-1] != '/':
uri += "/"
return urljoin(uri, append)
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/ui/__init__.py | .py | 176 | 8 | from . import ops
from . import channel_list
from . import panel
from . import preferences
CLASSES = ops.CLASSES + channel_list.CLASSES + panel.CLASSES + preferences.CLASSES
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/ui/ops.py | .py | 4,144 | 124 | import bpy
from .. import load
from .. import parse_inputs
from .. import handle_blender_structs
from .channel_list import *
from bpy.types import (Panel,
Operator,
AddonPreferences,
PropertyGroup,
)
from bpy.types import UIList
import threading
class TifLoadOperator(bpy.types.Operator):
""" Load a microscopy image. Resaves your data into vdb (volume) and abc (mask) formats into Cache Folder"""
bl_idname ="microscopynodes.load"
bl_label = "Load"
_timer = None
value = 0
thread = None
params = None
def modal(self, context, event):
if event.type == 'TIMER':
[region.tag_redraw() for region in context.area.regions]
if self.thread is None:
if 'EXCEPTION' in self.params[0][0]: # hacky
raise(self.params[0][0]['EXCEPTION'])
return {"CANCELLED"}
context.window_manager.event_timer_remove(self._timer)
load.load_blocking(self.params)
return {'FINISHED'}
if not self.thread.is_alive():
self.thread = None # update UI for one timer-round
return {"RUNNING_MODAL"}
if event.type in {'RIGHTMOUSE', 'ESC'}: # Cancel
# Revert all changes that have been made
return {'CANCELLED'}
return {"RUNNING_MODAL"}
def execute(self, context):
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, window=context.window)
self.params = parse_inputs.parse_initial()
self.thread = threading.Thread(name='loading thread', target=load.load_threaded, args=(self.params,))
wm.modal_handler_add(self)
self.thread.start()
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
return
class TifLoadBackgroundOperator(bpy.types.Operator):
""" Load a microscopy image. Resaves your data into vdb (volume) and abc (mask) formats into Cache Folder"""
bl_idname ="microscopynodes.load_background"
bl_label = "Load"
def execute(self, context):
params = parse_inputs.parse_initial()
load.load_threaded(params)
load.load_blocking(params)
return {'FINISHED'}
class ArrayOptionSelectOperator(bpy.types.Operator):
"""Select Zarr dataset"""
bl_idname = "microscopynodes.arrayselection"
bl_label = "Load array option"
ix: bpy.props.IntProperty()
def execute(self, context):
bpy.context.scene.MiN_selected_zarr_level = self.ix
return {'FINISHED'}
class ArrayOptionMenu(bpy.types.Menu):
bl_label = "Zarr datasets"
bl_idname = "SCENE_MT_ArrayOptionMenu"
def draw(self, context):
layout = self.layout
for ix, array_option in enumerate(bpy.context.scene.MiN_array_options):
prop = layout.operator(ArrayOptionSelectOperator.bl_idname, text=array_option.ui_text, icon=array_option.icon)
prop.ix = ix
class SelectPathOperator(Operator):
"""Select file or directory"""
bl_idname = "microscopynodes.select_path"
bl_label = "Select path"
bl_options = {'REGISTER'}
# These are magic keywords for Blender
filepath: bpy.props.StringProperty(
name="filepath",
description=".tif path",
default = ""
)
directory: bpy.props.StringProperty(
name="directory",
description=".zarr path",
default= ""
)
def execute(self, context):
if self.filepath != "":
bpy.context.scene.MiN_input_file = self.filepath
elif self.directory != "":
bpy.context.scene.MiN_input_file = self.directory
return {'FINISHED'}
def invoke(self, context, event):
self.filepath = ""
self.directory = ""
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
CLASSES = [TifLoadOperator, TifLoadBackgroundOperator, ArrayOptionSelectOperator, ArrayOptionMenu, SelectPathOperator] | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/ui/preferences.py | .py | 7,450 | 178 | import bpy
from .. import __package__
from bpy.props import StringProperty, BoolProperty, EnumProperty
from pathlib import Path
import tempfile
import yaml
class MicroscopyNodesPreferences(bpy.types.AddonPreferences):
from .ui.channel_list import ChannelDescriptor
bl_idname = __package__
def set_channels(self, context):
while len(addon_preferences(bpy.context).channels)-1 < addon_preferences(bpy.context).n_default_channels:
ch = len(addon_preferences(bpy.context).channels)
channel = addon_preferences(bpy.context).channels.add()
# This instantiates the keys!
channel.ix = ch
channel.volume = True
channel.emission = True
channel.surface = False
channel.labelmask = False
channel.materials = True
channel.surf_resolution = 'ACTUAL'
channel.threshold=-1
channel.cmap='SINGLE_COLOR'
channel.name = f"Channel {ch}"
channel.single_color = INIT_COLORS[ch % len(INIT_COLORS)]
while len(addon_preferences(bpy.context).channels)-1 >= addon_preferences(bpy.context).n_default_channels:
addon_preferences(bpy.context).channels.remove(len(addon_preferences(bpy.context).channels)-1)
import_scale_no_unit_spoof : EnumProperty(
name = 'Microscopy scale -> Blender scale (needs metric pixel unit)',
items=[
("DEFAULT", "px -> cm","Scales to 0.01 blender-m/pixel in XY, rescales Z to isotropic pixel size" ,"", 0),
],
description= "Defines the scale transform from input space to Blender meters, pixel space is rescaled to isotropic in Z from relative pixel size.",
default='DEFAULT',
)
import_scale : EnumProperty(
name = "Microscopy scale -> Blender scale",
items=[
("DEFAULT", "px -> cm","Scales to 0.01 blender-m/pixel in XY, rescales Z to isotropic pixel size" ,"", 0),
("NANOMETER_SCALE", "nm -> m", "Scales to 1 nm/blender-meter" ,"", 1),
("MICROMETER_SCALE", "µm -> m", "Scales to 1 µm/blender-meter" ,"", 2),
("MILLIMETER_SCALE", "mm -> m", "Scales to 1 mm/blender-meter " ,"", 3),
("METER_SCALE", "m -> m", "Scales to 1 m/blender-meter " ,"", 4),
("MOLECULAR_NODES", "nm -> cm (Molecular Nodes)", "Scales to 1 nm/blender-centimeter " ,"", 5),
],
description= "Defines the scale transform from input space to Blender meters, pixel space is rescaled to isotropic in Z from relative pixel size.",
default='DEFAULT',
)
n_default_channels : bpy.props.IntProperty(
name = 'Defined default channels',
min= 1,
max=20,
default =6,
update=set_channels
)
cache_path: StringProperty(
description = 'Only used if cache option is PATH',
options = {'TEXTEDIT_UPDATE'},
default = str(Path('~', '.microscopynodes').expanduser()),
subtype = 'DIR_PATH',
)
cache_option: bpy.props.EnumProperty(
name = "Data storage",
items=[
("TEMPORARY", "Temporary","See the current temp path in Addon Preferences" ,"", 0),
("PATH", "Path", "","", 1),
("WITH_PROJECT", "With Project","", "", 2),
],
description= "Data is resaved into vdb files (large 32bit volume files) for volumes and isosurfaces, and smaller abc mesh files for labelmasks. Microscopy Nodes does not clean out the files.",
default='TEMPORARY',
)
channels : bpy.props.CollectionProperty(type=ChannelDescriptor)
import_loc : EnumProperty(
name = 'Import location',
items=[
("XY_CENTER", "XY Center","Center volume in XY" ,"", 0),
("XYZ_CENTER", "XYZ Center","Center volume in XYZ" ,"", 1),
("ZERO", "Origin"," Volume origin at world origin" ,"", 2),
],
description= "Defines the coordinate translation after import from input space to Blender meters",
default='XY_CENTER',
)
surf_resolution : bpy.props.EnumProperty(
name = "Meshing density of surfaces and masks",
items=[
("0", "Actual","Takes the actual grid size, most accurate, but heavy on RAM." ,"EVENT_A", 0),
("1", "Fine", "Close to actual grid meshing, but more flexible" ,"EVENT_F", 1),
("2", "Medium", "Medium density mesh","EVENT_M", 2),
("3", "Coarse","Coarse mesh minimizes the RAM usage of surface encoding.", "EVENT_C", 3),
],
description= "Coarser will be less RAM intensive",
default='0',
)
invert_color : bpy.props.BoolProperty(
name="Invert Color",
description = "Invert color lookup tables on load",
default = False
)
def draw(self, context):
layout = self.layout
row = layout.row()
if context.scene.MiN_yaml_preferences != "":
row.label(text=f"Preferences are overriden from {context.scene.MiN_yaml_preferences}", icon="ERROR")
row= layout.row()
row.prop(bpy.context.scene, 'MiN_yaml_preferences', text="")
row = layout.row()
row.operator("microscopynodes.reset_yaml")
return
row.prop(self, 'cache_path', text='Data storage "Path" default:')
row = layout.row()
row.label(text='Data storage "Temporary" default:')
row.label(text=tempfile.gettempdir())
col = layout.column(align=True)
col.label(text="Default channel settings to set for new files.")
col.prop(self, "n_default_channels")
col.template_list("SCENE_UL_Channels", "", self, "channels", bpy.context.scene, "MiN_ch_index", rows=6,sort_lock=True)
col = layout.column()
# col.label(text="Transformations upon import:")
col.prop(self, "surf_resolution")
col.prop(self, "invert_color")
row = layout.row()
row.prop(bpy.context.scene, 'MiN_remake',
text = 'Overwrite files (debug, does not persist between sessions)', icon_value=0, emboss=True)
class ResetPreferenceYamlOperator(bpy.types.Operator):
""" Unsets the preference yaml path """
bl_idname ="microscopynodes.reset_yaml"
bl_label = "Use Blender Preferences"
def execute(self, context):
context.scene.MiN_yaml_preferences = ""
return {'FINISHED'}
class DictWithElements:
# wraps a dictionary to access elements by dct.element - same method call as addonpreferences
def __init__(self, dictionary):
self.__dict__ = dictionary
def addon_preferences(context: bpy.types.Context | None = None):
if context is None:
context = bpy.context
try:
if hasattr(context, 'scene') and context.scene.MiN_yaml_preferences != "":
with open(context.scene.MiN_yaml_preferences) as stream:
return DictWithElements(yaml.safe_load(stream))
except KeyError as e:
print(e)
try:
return context.preferences.addons[__package__].preferences
except KeyError:
print('CANNOT FIND PREFERENCES')
return None
INIT_COLORS = [
(1.0, 1.0, 1.0),
(0/255, 157/255, 224/255),
(224/255, 0/255, 37/255),
(224/255, 214/255, 0/255),
(117/255, 0/255, 224/255),
(0/255, 224/255, 87/255),
]
CLASSES = [MicroscopyNodesPreferences, ResetPreferenceYamlOperator] | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/ui/channel_list.py | .py | 4,369 | 95 | import bpy
from bpy.types import UIList
import os
# from ..min_nodes.shader_nodes import draw_category_menus
def update_ix(self, context):
context.scene.MiN_ch_index = self.ix
class ChannelDescriptor(bpy.types.PropertyGroup):
# Initialization of these classes is done in set_channels - these defaults are not used by mic nodes itself
ix : bpy.props.IntProperty() # channel in the image array
update_func = update_ix
if os.environ.get('MIN_TEST', False):
update_func = None
name : bpy.props.StringProperty(description="Channel name (editable)", update = update_func )
volume : bpy.props.BoolProperty(description="Load data as volume", default=True, update=update_func )
emission : bpy.props.BoolProperty(description="Volume data emits light on load\n(off is recommended for EM)", default=True, update=update_func )
surface : bpy.props.BoolProperty(description="Load isosurface object.\nAlso useful for binary masks", default=False, update=update_func )
labelmask : bpy.props.BoolProperty(description="Do not use on regular images.\nLoads separate values in the mask as separate mesh objects", default=False, update=update_func )
# -- internal --
threshold : bpy.props.FloatProperty(default=-1)
cmap : bpy.props.EnumProperty(
name = "Default Colormaps",
items=[
("SINGLE_COLOR", "Single Color","Settable single color, will generate map from black to color" ,"MESH_PLANE", 0),
("VIRIDIS", "Viridis", "bids:viridis","IPO_LINEAR", 1),
("PLASMA", "Plasma","bids:plasma", "IPO_LINEAR", 2),
("COOLWARM", "Coolwarm","matplotlib:coolwarm", "LINCURVE", 3),
("ICEFIRE", "IceFire","seaborn:icefire", "LINCURVE", 4),
("TAB10", "Tab10","seaborn:tab10", "OUTLINER_DATA_POINTCLOUD", 5),
("BRIGHT", "Bright","tol:bright", "OUTLINER_DATA_POINTCLOUD", 6),
],
description= "Colormap for this channel",
default='SINGLE_COLOR',
update = update_func
)
single_color : bpy.props.FloatVectorProperty(subtype="COLOR", min=0, max=1, update= update_func)
class SCENE_UL_Channels(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
self.use_filter_show =False #filtering is currently unsupported
channel = item
row1 = layout.row( align=True)
split = row1.split(factor=0.9, align=True) # splitting to reduce the size of the color picker
row = split.row(align=True)
row.prop(channel, "name", text="", emboss=True)
volumecheckbox = "OUTLINER_OB_VOLUME" if channel.volume else "VOLUME_DATA"
row.prop(channel, "volume", text="", emboss=True, icon=volumecheckbox)
surfcheckbox = "OUTLINER_OB_SURFACE" if channel.surface else "SURFACE_DATA"
row.prop(channel, "surface", text="", emboss=True, icon=surfcheckbox)
maskcheckbox = "OUTLINER_OB_POINTCLOUD" if channel.labelmask else "POINTCLOUD_DATA"
row.prop(channel, "labelmask", text="", emboss=True, icon=maskcheckbox)
row.separator()
emitcheckbox = "OUTLINER_OB_LIGHT" if channel.emission else "LIGHT_DATA"
row.prop(channel, "emission", text="", emboss=False, icon=emitcheckbox)
row.prop(channel, "cmap", text="", emboss=False, icon_only=True)
row = split.column(align=True)
if channel.cmap == 'SINGLE_COLOR':
row.prop(channel, "single_color", text="")
else:
row.label(text=channel.cmap.lower().capitalize())
def invoke(self, context, event):
pass
def set_channels(self, context):
from .preferences import addon_preferences
bpy.context.scene.MiN_channelList.clear()
for ch in range(bpy.context.scene.MiN_channel_nr):
channel = bpy.context.scene.MiN_channelList.add()
default = addon_preferences(bpy.context).channels[ch % len(addon_preferences(bpy.context).channels)]
for key in default.keys():
try:
setattr(channel, key, default[key])
except:
setattr(channel, key, getattr(default, key))
if ch >= len(addon_preferences(bpy.context).channels):
channel.name = f"Channel {ch}"
channel.ix = ch
CLASSES = [ChannelDescriptor, SCENE_UL_Channels] | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/ui/panel.py | .py | 5,926 | 147 | import bpy
from ..handle_blender_structs.dependent_props import *
from ..file_to_array import selected_array_option
from .preferences import addon_preferences
class TIFLoadPanel(bpy.types.Panel):
bl_idname = "SCENE_PT_zstackpanel"
bl_label = "Microscopy Nodes"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
def draw(self, context):
layout = self.layout
scn = bpy.context.scene
col = layout.column(align=True)
col.label(text=".tif or .zarr:")
row = col.row(align=True)
row.prop(bpy.context.scene, 'MiN_input_file', text= '')
row.operator("microscopynodes.select_path", text="", icon='FILEBROWSER')
if bpy.context.scene.MiN_selected_array_option != "" and len(bpy.context.scene.MiN_array_options) != 0:
row =col.row(align=True)
row.prop(bpy.context.scene, 'MiN_selected_array_option')
row.enabled = True
if len(bpy.context.scene.MiN_array_options) == 0:
row.enabled = False
if selected_array_option().is_rescaled:
row.prop(bpy.context.scene, 'MiN_pixel_sizes_are_rescaled', icon="FIXED_SIZE", icon_only=True)
# col.menu(menu='SCENE_MT_ArrayOptionMenu', text=selected_array_option().ui_text)
# # Create two columns, by using a split layout.
split = layout.split()
# First column
col1 = split.column(align=True)
col1.alignment='RIGHT'
if selected_array_option() is None or not selected_array_option().is_rescaled or not bpy.context.scene.MiN_pixel_sizes_are_rescaled:
col1.label(text="xy pixel size:")
col1.label(text="z pixel size:")
else:
if selected_array_option().path != "":
col1.label(text=f"{selected_array_option().path} xy pixel size:")
col1.label(text=f"{selected_array_option().path} z pixel size:")
else:
col1.label(text=f"xy pixel size (after rescaling):")
col1.label(text=f"z pixel size (after rescaling):")
col1.label(text="axes:")
col2 = split.column(align=True)
rowxy = col2.row(align=True)
rowxy.prop(scn, "MiN_xy_size", emboss=True)
rowxy.prop(scn, "MiN_unit", emboss=False)
rowz = col2.row(align=True)
rowz.prop(scn, "MiN_z_size", emboss=True)
rowz.prop(scn, "MiN_unit", emboss=False)
col2.prop(scn, "MiN_axes_order", emboss=True)
if 't' in scn.MiN_axes_order:
col1.label(text='time:')
rowt = col2.row(align=True)
rowt.prop(scn,'MiN_load_start_frame')
rowt.prop(scn,'MiN_load_end_frame')
if not bpy.context.scene.MiN_enable_ui:
col1.enabled=False
col2.enabled=False
col = layout.column(align=False)
col.template_list("SCENE_UL_Channels", "", bpy.context.scene, "MiN_channelList", bpy.context.scene, "MiN_ch_index", rows=max(len(bpy.context.scene.MiN_channelList),1),sort_lock=True)
if not bpy.context.scene.MiN_enable_ui:
col.enabled=False
col.separator()
row = col.row(align=True)
row.label(text="", icon='FILE_REFRESH')
row.prop(bpy.context.scene, 'MiN_reload', icon="OUTLINER_OB_EMPTY")
if bpy.context.scene.MiN_reload is not None:
row.prop(bpy.context.scene, 'MiN_update_data', icon="FILE")
row.prop(bpy.context.scene, 'MiN_update_settings', icon="MATERIAL_DATA")
# layout.separator()
col.separator()
# col = layout.column(align=False)
# row = col.row(align=False)
if bpy.context.scene.MiN_reload is None:
col.operator("microscopynodes.load", text="Load")
else:
col.operator("microscopynodes.load", text="Reload")
if not bpy.context.scene.MiN_enable_ui:
col.enabled=False
col.prop(context.scene, 'MiN_progress_str', emboss=False)
box = layout.box()
row = box.row(align=True)
if context.scene.MiN_yaml_preferences != "":
row.label(text=f"Preferences are overriden from {context.scene.MiN_yaml_preferences}", icon="ERROR")
row= box.row()
row.prop(bpy.context.scene, 'MiN_yaml_preferences', text="")
row = box.row()
row.operator("microscopynodes.reset_yaml")
return
row.label(text="Data Storage:", icon="FILE_FOLDER")
row.prop(addon_preferences(context), 'cache_option', text="", icon="NONE", emboss=True)
if addon_preferences().cache_option == 'PATH':
row = box.row()
row.prop(addon_preferences(context), 'cache_path', text="")
if addon_preferences().cache_option == 'WITH_PROJECT' and bpy.path.abspath('//') == '':
row = box.row()
row.label(text = "Don't forget to save your blend file :)")
row = box.row(align=True)
row.prop(bpy.context.scene, 'MiN_overwrite_background_color',
text = '', icon="WORLD",icon_only=True,emboss=True)
row.prop(bpy.context.scene, 'MiN_overwrite_render_settings',
text = '', icon="SCENE",icon_only=True,emboss=True)
row.separator()
row.label(text="", icon='CON_SIZELIKE')
if bpy.context.scene.MiN_unit == "AU":
row.prop(addon_preferences(bpy.context), 'import_scale_no_unit_spoof', emboss=True,text="")
else:
row.prop(addon_preferences(bpy.context), 'import_scale', emboss=True,text="")
row.label(text="", icon='ORIENTATION_PARENT')
row.prop(addon_preferences(bpy.context), 'import_loc', emboss=True,text="")
CLASSES = [TIFLoadPanel]
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/load_labelmask.py | .py | 9,969 | 249 | import numpy as np
import bpy
import bmesh
from pathlib import Path
import json
import os
from ..handle_blender_structs import *
from .load_generic import *
from .. import min_nodes
class LabelmaskIO(DataIO):
min_type = min_keys.LABELMASK
def dissolve(self, obj, obj_id):
m = obj.data
bm = bmesh.new()
bm.from_mesh(m)
bmesh.ops.dissolve_limit(bm, angle_limit=0.0872665, verts=bm.verts, edges=bm.edges)
bm.to_mesh(m)
bm.free()
m.update()
return
def export_ch(self, ch, cache_dir, remake, axes_order):
import zmesh
axes_order = axes_order.replace('c', '') # only gets a single channel
abcfiles = []
mask = ch['data']
parentcoll = get_current_collection()
tmp_collection, _ = collection_by_name('tmp')
mask_objects = {}
# mesher = zmesh.Mesher([bpy.context.scene.MiN_xy_size,bpy.context.scene.MiN_xy_size,bpy.context.scene.MiN_z_size])
mesher = zmesh.Mesher((1,1,1))
for timestep in range(0,bpy.context.scene.MiN_load_end_frame+1):
bpy.ops.object.select_all(action='DESELECT')
if timestep >= len_axis('t', axes_order, mask.shape):
break
fname = str(Path(cache_dir) / f"mask_ch{ch['ix']}_res_{ch['surf_resolution']}_t_{timestep:04}.abc")
if timestep < bpy.context.scene.MiN_load_start_frame:
if not Path(fname).exists(): #make dummy file for sequencing
open(fname, 'a').close()
continue
abcfiles.append(fname)
if (Path(fname).exists() and os.path.getsize(fname) > 0):
if remake:
Path(fname).unlink()
else:
continue
timeframe_arr = take_index(mask, timestep, 't', axes_order).compute()
timeframe_arr = expand_to_xyz(timeframe_arr, axes_order.replace('t', ''))
mesher.mesh(timeframe_arr, close=True)
for obj_id in mesher.ids():
zmeshed = mesher.get(obj_id,
normals=False,
reduction_factor=ch['surf_resolution']*30,
max_error=ch['surf_resolution']*3,
voxel_centered=False,
)
mesher.erase(obj_id)
obj_id_val = obj_id + 1
if obj_id_val in mask_objects:
obj = mask_objects[obj_id_val]
else:
objname=f"ch{ch['ix']}_obj{obj_id_val}_"
bpy.ops.mesh.primitive_cube_add()
obj=bpy.context.view_layer.objects.active
obj.name = objname
obj.data.name = objname
mask_objects[obj_id_val] = obj
mesh = obj.data
mesh.clear_geometry()
mesh.from_pydata(zmeshed.vertices,[], zmeshed.faces)
bpy.ops.object.mode_set(mode = 'OBJECT')
self.dissolve(obj, obj_id)
# obj.select_set(True) #TODO see if this works
mesher.clear()
for obj in tmp_collection.all_objects:
obj.select_set(True)
bpy.ops.wm.alembic_export(filepath=fname,
visible_objects_only=False,
selected=True,
vcolors = False,
flatten=False,
orcos=True,
export_custom_properties=False,
start = 0,
end = 1,
evaluation_mode = "RENDER",
)
for obj in tmp_collection.all_objects:
obj.data.clear_geometry()
for obj in mask_objects.values():
obj.select_set(True)
bpy.ops.object.delete(use_global=False)
bpy.data.collections.remove(tmp_collection)
collection_activate(*parentcoll)
for files in Path(abcfiles[0]).parent.glob("*_ch{ch['ix']}_*.abc"):
# handles remapping of time series
if files.name not in [Path(f).name for f in abcfiles]:
files.unlink()
return [{'abcfiles':abcfiles}]
def import_data(self, ch, scale):
mask_objs = []
mask_coll, mask_lcoll = make_subcollection(f"{ch['name']}_{self.min_type.name.lower()}", duplicate=True)
maskfiles = ch['local_files'][self.min_type][0]
bpy.ops.wm.alembic_import(filepath=maskfiles['abcfiles'][0], is_sequence=(len(maskfiles['abcfiles']) >1))
locnames_newnames = {}
oids = []
for obj in mask_coll.all_objects: # for blender renaming
oid = int(obj.name.split('_')[1].removeprefix('obj'))
ch = int(obj.name.split('_')[0].removeprefix('ch'))
obj.modifiers.new(type='NODES', name=f'object id + channel {oid}')
obj.modifiers[-1].node_group = self.gn_oid_tree(oid, ch)
obj.scale = scale
oids.append(oid)
return mask_coll, {'max': max(oids)}
def gn_oid_tree(self, oid, ch):
node_group = bpy.data.node_groups.get(f"object id {oid}, {ch}")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'GeometryNodeTree', name =f"object id {oid}")
links = node_group.links
nodes = node_group.nodes
interface = node_group.interface
interface.new_socket("Geometry", in_out="INPUT",socket_type='NodeSocketGeometry')
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-400, 0)
oidnode = nodes.new('FunctionNodeInputInt')
oidnode.integer = oid
oidnode.label = 'object id'
oidnode.location = (-100, -200)
chnode = nodes.new('FunctionNodeInputInt')
chnode.integer = ch
chnode.label = 'channel'
chnode.location = (-100, -400)
store = node_group.nodes.new("GeometryNodeStoreNamedAttribute")
store.data_type = 'FLOAT_COLOR'
store.domain = 'CORNER'
store.location =(150, 0)
store.inputs.get("Name").default_value = "object id"
links.new(group_input.outputs.get('Geometry'), store.inputs[0])
links.new(oidnode.outputs[0], store.inputs.get("Value"))
store2 = node_group.nodes.new("GeometryNodeStoreNamedAttribute")
store2.data_type = 'FLOAT_COLOR'
store2.domain = 'CORNER'
store2.location =(350, 0)
store2.inputs.get("Name").default_value = "channel"
links.new(store.outputs[0], store2.inputs[0])
links.new(chnode.outputs[0], store2.inputs.get("Value"))
interface.new_socket("Geometry", in_out="OUTPUT",socket_type='NodeSocketGeometry')
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (500, 0)
links.new(store2.outputs[0], group_output.inputs[0])
return node_group
class LabelmaskObject(ChannelObject):
min_type = min_keys.LABELMASK
def add_material(self, ch):
# do not check whether it exists, so a new load will force making a new mat
mat = super().add_material(ch)
mat.blend_method = "BLEND"
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
if nodes.get("Principled BSDF") is None:
try:
nodes.remove(nodes.get("Principled Volume"))
except Exception as e:
print(e)
pass
princ = nodes.new("ShaderNodeBsdfPrincipled")
if nodes.get("Material Output") is None:
outnode = nodes.new(type='ShaderNodeOutputMaterial')
outnode.name = 'Material Output'
links.new(princ.outputs[0], nodes.get('Material Output').inputs[0])
princ = nodes.get("Principled BSDF")
princ.name = f"[{ch['identifier']}] principled"
idnode = nodes.new("ShaderNodeVertexColor")
idnode.layer_name = 'object id'
idnode.location = (-800, 300)
remap = nodes.new('ShaderNodeGroup')
remap.node_tree = min_nodes.shader_nodes.remap_oid_node()
remap.name = '[remap_oid]'
remap.location = (-600, 300)
remap.show_options = False
remap.inputs.get('# Objects').default_value = ch['metadata'][self.min_type]['max']
links.new(idnode.outputs.get('Color'), remap.inputs.get('Value'))
color_lut = nodes.new(type="ShaderNodeValToRGB")
color_lut.location = (-350, 300)
color_lut.width = 300
color_lut.name = "[color_lut]"
color_lut.outputs[1].hide = True
links.new(remap.outputs[0], color_lut.inputs[0])
links.new(color_lut.outputs[0], princ.inputs.get("Base Color"))
links.new(color_lut.outputs[0], princ.inputs[27])
return mat
def update_material(self, mat, ch):
try:
nodes = mat.node_tree.nodes
min_nodes.shader_nodes.set_color_ramp_from_ch(ch, nodes.get('[color_lut]'))
nodes.get('[remap_oid]').inputs.get('Revolving Colormap').default_value = (nodes.get('[color_lut]').color_ramp.interpolation == 'CONSTANT')
nodes.get('[remap_oid]').inputs.get('# Colors').default_value =max(len(nodes.get('[color_lut]').color_ramp.elements), 5)
princ = mat.node_tree.nodes.get(f"[{ch['identifier']}] principled")
if ch['emission'] and princ.inputs[28].default_value == 0.0:
princ.inputs[28].default_value = 0.5
elif not ch['emission'] and princ.inputs[28].default_value == 0.5:
princ.inputs[28].default_value = 0
except Exception as e:
print(e)
pass
return
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/load_volume.py | .py | 18,486 | 427 | import bpy
from mathutils import Color
from pathlib import Path
import numpy as np
import math
import itertools
from .load_generic import *
from ..handle_blender_structs import *
from .. import min_nodes
NR_HIST_BINS = 2**16
def get_leading_trailing_zero_float(arr):
min_val = max(np.argmax(arr > 0)-1, 0) / len(arr)
max_val = min(len(arr) - (np.argmax(arr[::-1] > 0)-1), len(arr)) / len(arr)
return min_val, max_val
class VolumeIO(DataIO):
min_type = min_keys.VOLUME
def export_ch(self, ch, cache_dir, remake, axes_order):
file_meta = []
xyz_shape = [len_axis(dim, axes_order, ch['data'].shape) for dim in 'xyz']
maxlen = np.inf
if bpy.context.scene.MiN_chunk:
maxlen = 2048
slices = [self.split_axis_to_chunks(dimshape, ch['ix'], maxlen) for dimshape in xyz_shape]
for block in itertools.product(*slices):
chunk = ch['data']
for dim, sl in zip('xyz', block):
chunk = take_index(chunk, indices = np.arange(sl.start, sl.stop), dim=dim, axes_order=axes_order)
directory, time_vdbs, time_hists = self.make_vdbs(chunk, block, axes_order, remake, cache_dir, ch)
file_meta.append({"directory" : directory, "vdbfiles": time_vdbs, 'histfiles' : time_hists, 'pos':(block[0].start, block[1].start, block[2].start)})
return file_meta
def split_axis_to_chunks(self, length, ch_ix, maxlen):
# chunks to max 2048 length, with ch_ix dependent offsets
offset = 0
if length > maxlen:
offset = (300 * ch_ix) % 2048
n_splits = int((length // (maxlen+1))+ 1)
splits = [length/n_splits * split for split in range(n_splits + 1)]
splits[-1] = math.ceil(splits[-1])
splits = [math.floor(split) + offset for split in splits]
if offset > 0:
splits.insert(0, 0)
while splits[-2] > length:
del splits[-1]
splits[-1] = length
slices = [slice(start, end) for start, end in zip(splits[:-1], splits[1:])]
return slices
def make_vdbs(self, imgdata, block, axes_order, remake, cache_dir, ch):
# non-lazy functions are allowed on only single time-frames
x_ix, y_ix, z_ix = [sl.start for sl in block]
# imgdata = imgdata.compute()
time_vdbs = []
time_hists = []
identifier3d = f"x{x_ix}y{y_ix}z{z_ix}"
dirpath = Path(cache_dir)/f"{identifier3d}"
dirpath.mkdir(exist_ok=True,parents=True)
for t in range(0, bpy.context.scene.MiN_load_end_frame+1):
if t >= len_axis('t', axes_order, imgdata.shape):
break
identifier5d = f"{identifier3d}c{ch['ix']}t{t:04}"
frame = take_index(imgdata, t, 't', axes_order)
frame_axes_order = axes_order.replace('t',"")
vdbfname = dirpath / f"{identifier5d}.vdb"
histfname = dirpath / f"{identifier5d}_hist.npy"
if t < bpy.context.scene.MiN_load_start_frame and not vdbfname.exists():
# Makes dummy vdb files to keep sequence reading of Blender correct if the loaded frames are offset
# existence of histogram file is then used to see if this is a dummy
open(vdbfname, 'a').close()
continue
time_vdbs.append({"name":str(vdbfname.name)})
time_hists.append({"name":str(histfname.name)})
if( not vdbfname.exists() or not histfname.exists()) or remake :
if vdbfname.exists():
vdbfname.unlink()
if histfname.exists():
histfname.unlink()
log(f"loading chunk {identifier5d}")
arr = frame.compute()
arr = expand_to_xyz(arr, frame_axes_order)
try:
arr = arr.astype(np.float32) / min(np.iinfo(imgdata.dtype).max, np.iinfo(np.int32).max) # scale between 0 and 1, capped to allow uint32 to at least not break
except ValueError as e:
arr = arr.astype(np.float32) / ch['max_val'].compute()
# hists could be done better with bincount, but this doesnt work with floats and seems harder to maintain
histogram = np.histogram(arr, bins=NR_HIST_BINS, range=(0.,1.)) [0]
histogram[0] = 0
np.save(histfname, histogram, allow_pickle=False)
log(f"write vdb {identifier5d}")
self.make_vdb(vdbfname, arr, f"c{ch['ix']}")
return str(dirpath), time_vdbs, time_hists
def make_vdb(self, vdbfname, arr, gridname):
try:
import openvdb as vdb
except:
bpy.utils.expose_bundled_modules()
import openvdb as vdb
pass
grid = vdb.FloatGrid()
grid.name = f"data"
grid.copyFromArray(arr)
# For future OME-Zarr transforms - something like this:
# grid.transform = vdb.createLinearTransform(np.array([[ 2. , 0. , 0. , 8.5],[ 0. , 2. , 0. , 8.5],[ 0. , 0. , 2. , 10.5],[ 0. , 0. , 0. , 1. ]]).T)
vdb.write(str(vdbfname), grids=[grid])
return
def import_data(self, ch, scale):
vol_collection, vol_lcoll = make_subcollection(f"{ch['name']} {'volume'}", duplicate=True)
metadata = {}
collection_activate(vol_collection, vol_lcoll)
histtotal = np.zeros(NR_HIST_BINS)
for chunk in ch['local_files'][self.min_type]:
bpy.ops.object.volume_import(filepath=chunk['vdbfiles'][0]['name'],directory=chunk['directory'], files=chunk['vdbfiles'], align='WORLD', location=(0, 0, 0))
vol = bpy.context.active_object
pos = chunk['pos']
strpos = f"{pos[0]}{pos[1]}{pos[2]}"
vol.scale = scale
vol.data.frame_offset = -1 + bpy.context.scene.MiN_load_start_frame
vol.data.frame_start = bpy.context.scene.MiN_load_start_frame
vol.data.frame_duration = bpy.context.scene.MiN_load_end_frame - bpy.context.scene.MiN_load_start_frame + 1
vol.data.render.clipping =0
# vol.data.display.density = 1e-5
# vol.data.display.interpolation_method = 'CLOSEST'
vol.location = tuple((np.array(chunk['pos']) * scale))
for hist in chunk['histfiles']:
histtotal += np.load(Path(chunk['directory'])/hist['name'], allow_pickle=False)
# defaults
metadata['range'] = (0, 1)
metadata['histogram'] = np.zeros(NR_HIST_BINS)
metadata['datapointer'] = vol.data
if np.sum(histtotal)> 0:
metadata['range'] = get_leading_trailing_zero_float(histtotal)
metadata['histogram'] = histtotal[int(metadata['range'][0] * NR_HIST_BINS): int(metadata['range'][1] * NR_HIST_BINS)]
threshold = threshold_isodata(hist=metadata['histogram'] )
metadata['threshold'] = threshold/len(metadata['histogram'] )
cs = np.cumsum(metadata['histogram'])
percentile = np.searchsorted(cs, np.percentile(cs, 90))
if percentile > threshold:
metadata['threshold_upper'] = percentile / len(metadata['histogram'] )
elif ch['threshold'] != -1: # THIS IS TO BE DEPRECATED - LABEL SUPPORT FOR ZARR
metadata['threshold'] = ch['threshold']
else:
# this is for 0,1 range int32 data
metadata['range'] = (0, 1e-9)
metadata['threshold'] = 0.3
metadata['threshold_upper'] = 1
return vol_collection, metadata
class VolumeObject(ChannelObject):
min_type = min_keys.VOLUME
def draw_histogram(self, nodes, loc, width, hist):
histnode =nodes.new(type="ShaderNodeFloatCurve")
histnode.location = loc
histmap = histnode.mapping
histnode.width = width
histnode.label = 'Histogram (non-interactive)'
histnode.name = '[Histogram]'
histnode.inputs.get('Factor').hide = True
histnode.inputs.get('Value').hide = True
histnode.outputs.get('Value').hide = True
histnorm = hist / np.max(hist)
if len(histnorm) > 150:
histnorm = binned_statistic_sum(np.arange(len(histnorm)), histnorm, bins=150)
histnorm /= np.max(histnorm)
for ix, val in enumerate(histnorm):
if ix == 0:
histmap.curves[0].points[-1].location = ix/len(histnorm), val
histmap.curves[0].points.new((ix + 0.9)/len(histnorm), val)
if ix==len(histnorm)-1:
histmap.curves[0].points[-1].location = ix/len(histnorm), val
else:
histmap.curves[0].points.new(ix/len(histnorm), val)
histmap.curves[0].points.new((ix + 0.9)/len(histnorm), val)
histmap.curves[0].points[ix].handle_type = 'VECTOR'
return histnode
def update_material(self, mat, ch):
nodes = mat.node_tree.nodes
links = mat.node_tree.links
node_names = [node.name for node in nodes]
if self.min_type in ch['metadata']:
if '[Histogram]' in node_names and ch['metadata'][self.min_type] is not None:
histnode= nodes["[Histogram]"]
self.draw_histogram(nodes, histnode.location,histnode.width, ch['metadata'][self.min_type]['histogram'])
nodes.remove(histnode)
try:
ch_load = nodes[f"[channel_load_{ch['identifier']}]"]
shader_in_color = nodes['[shader_in_color]']
shader_in_alpha = nodes['[shader_in_alpha]']
shader_out = nodes['[shader_out]']
lut = nodes['[color_lut]']
except KeyError as e:
print(e, " skipping update of shader")
return
min_nodes.shader_nodes.set_color_ramp_from_ch(ch, lut)
if '[shaderframe]' not in node_names:
shaderframe = nodes.new('NodeFrame')
shaderframe.name = '[shaderframe]'
shaderframe.use_custom_color = True
shaderframe.color = (0.2,0.2,0.2)
shader_in_color.parent = shaderframe
shader_in_alpha.parent = shaderframe
shader_out.parent = shaderframe
else:
shaderframe = nodes['[shaderframe]']
ch_load.label = ch['name']
# removes of other type, if any of current type exist, don't update
setting, remove = 'absorb', 'emit'
if ch['emission']:
setting, remove = 'emit', 'absorb'
for node in nodes:
if remove in node.name:
nodes.remove(node)
elif setting in node.name:
return
if ch['emission']:
emit = nodes.new(type='ShaderNodeEmission')
emit.name = '[emit]'
emit.location = (250,0)
links.new(shader_in_color.outputs[0], emit.inputs.get('Color'))
links.new(shader_in_alpha.outputs[0], emit.inputs[1])
links.new(emit.outputs[0], shader_out.inputs[0])
emit.parent=shaderframe
else:
adsorb = nodes.new(type='ShaderNodeVolumeAbsorption')
adsorb.name = 'absorb [absorb]'
adsorb.location = (50,-100)
links.new(shader_in_color.outputs[0], adsorb.inputs.get('Color'))
links.new(shader_in_alpha.outputs[0], adsorb.inputs.get('Density'))
scatter = nodes.new(type='ShaderNodeVolumeScatter')
scatter.name = 'scatter absorb'
scatter.location = (250,-200)
links.new(shader_in_color.outputs[0], scatter.inputs.get('Color'))
links.new(shader_in_alpha.outputs[0], scatter.inputs.get('Density'))
scatter.parent=shaderframe
add = nodes.new(type='ShaderNodeAddShader')
add.name = 'add [absorb]'
add.location = (450, -100)
links.new(adsorb.outputs[0], add.inputs[0])
links.new(scatter.outputs[0], add.inputs[1])
links.new(add.outputs[0], shader_out.inputs[0])
add.parent=shaderframe
try:
for node in nodes:
if (len(node.inputs) > 0 and not node.hide) and node.type != 'VALTORGB':
node.inputs[0].show_expanded = True
if node.inputs.get('Strength') is not None:
node.inputs.get('Strength').show_expanded= True
if node.inputs.get('Density') is not None:
node.inputs.get('Density').show_expanded= True
shader_in_alpha.inputs[0].show_expanded=True
nodes['[volume_alpha]'].inputs[0].show_expanded = True
except:
print('could not set outliner options expanded in shader')
return
def add_material(self, ch):
mat = super().add_material(ch)
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
if nodes.get("Principled BSDF") is not None:
nodes.remove(nodes.get("Principled BSDF"))
if nodes.get("Principled Volume") is not None:
nodes.remove(nodes.get("Principled Volume"))
node_attr = nodes.new(type='ShaderNodeAttribute')
node_attr.location = (-1600, 0)
node_attr.name = f"[channel_load_{ch['identifier']}]"
try:
ch['metadata'][self.min_type]['datapointer'].grids.load()
node_attr.attribute_name = ch['metadata'][self.min_type]['datapointer'].grids[0].name
except Exception:
node_attr.attribute_name = f"data"
node_attr.label = ch['name']
node_attr.hide =True
normnode = nodes.new(type="ShaderNodeMapRange")
normnode.location = (-1400, 0)
normnode.label = "Normalize data"
normnode.inputs[1].default_value = ch['metadata'][self.min_type]['range'][0]
normnode.inputs[2].default_value = ch['metadata'][self.min_type]['range'][1]
links.new(node_attr.outputs.get("Fac"), normnode.inputs[0])
normnode.hide = True
ramp_node = nodes.new(type="ShaderNodeValToRGB")
ramp_node.location = (-1200, 0)
ramp_node.width = 1000
ramp_node.color_ramp.elements[0].position = ch['metadata'][self.min_type]['threshold']
ramp_node.color_ramp.elements[0].color = (1,1,1,0)
ramp_node.color_ramp.elements[1].color = (1,1,1,1)
ramp_node.color_ramp.elements[1].position = 1
ramp_node.name = '[alpha_ramp]'
ramp_node.label = "Pixel Intensities"
if 'threshold_upper' in ch['metadata'][self.min_type]:
ramp_node.color_ramp.elements[1].position = ch['metadata'][self.min_type]['threshold_upper']
ramp_node.outputs[0].hide = True
links.new(normnode.outputs.get('Result'), ramp_node.inputs.get("Fac"))
self.draw_histogram(nodes, (-1200, 300), 1000, ch['metadata'][self.min_type]['histogram'])
alphanode = nodes.new('ShaderNodeGroup')
alphanode.node_tree = min_nodes.shader_nodes.volume_alpha_node()
alphanode.name = '[volume_alpha]'
alphanode.location = (-300, -120)
alphanode.show_options = False
links.new(ramp_node.outputs.get('Alpha'), alphanode.inputs.get("Value"))
alphanode.width = 300
color_lut = nodes.new(type="ShaderNodeValToRGB")
color_lut.location = (-300, 120)
color_lut.width = 300
color_lut.name = "[color_lut]"
color_lut.outputs[1].hide = True
links.new(ramp_node.outputs[1], color_lut.inputs[0])
shader_in_color = nodes.new('NodeReroute')
shader_in_color.name = f"[shader_in_color]"
shader_in_color.location = (100, 0)
links.new(color_lut.outputs[0], shader_in_color.inputs[0])
shader_in_alpha = nodes.new('NodeReroute')
shader_in_alpha.name = f"[shader_in_alpha]"
shader_in_alpha.location = (100, -50)
links.new(alphanode.outputs[0], shader_in_alpha.inputs[0])
shader_out = nodes.new('NodeReroute')
shader_out.location = (600, 0)
shader_out.name = f"[shader_out]"
if nodes.get("Material Output") is None:
outnode = nodes.new(type='ShaderNodeOutputMaterial')
outnode.name = 'Material Output'
links.new(shader_out.outputs[0], nodes.get("Material Output").inputs.get('Volume'))
nodes.get("Material Output").location = (700,00)
return mat
# Simplified rewrite of skimage.filters.threshold_isodata from
# https://github.com/scikit-image/scikit-image/blob/v0.25.2/skimage/filters/thresholding.py
# avoids packaging all of skimage for just this function
def threshold_isodata(image=None, nbins=256, return_all=False, hist=None):
if hist is None:
hist, edges = np.histogram(image.ravel(), bins=nbins)
bin_centers = (edges[:-1] + edges[1:]) / 2
else:
if isinstance(hist, tuple):
hist, bin_centers = hist
else:
bin_centers = np.arange(len(hist))
if len(bin_centers) == 1:
return bin_centers if return_all else bin_centers[0]
counts = hist.astype(float)
csuml = np.cumsum(counts)
csumh = csuml[-1] - csuml
intensity_sum = counts * bin_centers
csum_intensity = np.cumsum(intensity_sum)
lower = csum_intensity[:-1] / csuml[:-1]
higher = (csum_intensity[-1] - csum_intensity[:-1]) / csumh[:-1]
all_mean = (lower + higher) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
distances = all_mean - bin_centers[:-1]
thresholds = bin_centers[:-1][(distances >= 0) & (distances < bin_width)]
if thresholds.size == 0:
thresholds = np.array([bin_centers[np.argmin(np.abs(distances))]])
return thresholds if return_all else thresholds[0]
# simplified version of https://github.com/scipy/scipy/blob/v1.16.2/scipy/stats/_binned_statistic.py
def binned_statistic_sum(x, values, bins):
x = np.asarray(x)
values = np.asarray(values)
bins = np.linspace(x.min(), x.max(), bins + 1) # bin edges
bin_indices = np.searchsorted(bins, x, side='right') - 1
bin_indices = np.clip(bin_indices, 0, bins.size - 2)
sums = np.zeros(bins.size - 1, dtype=values.dtype)
np.add.at(sums, bin_indices, values) # sum values in each bin
return sums | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/__init__.py | .py | 169 | 6 | from .load_generic import *
from .load_axes import *
from .load_labelmask import *
from .load_volume import *
from .load_surfaces import *
from .load_slice_cube import * | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/load_axes.py | .py | 9,508 | 240 | import bpy
import numpy as np
from pathlib import Path
from ..handle_blender_structs import *
from .. import min_nodes
def load_axes(size_px, pixel_size, scale, scale_factor, axes_obj=None, container=None):
if axes_obj is not None:
mod = get_min_gn(axes_obj)
update_axes(mod, size_px, pixel_size, scale)
axes_obj.location = np.array(axes_obj.location)+ ( np.array(axes_obj.location)*(scale_factor - 1))
return axes_obj
center_loc = np.array([0.5,0.5,0]) # offset of center (center in x, y, z of obj)
center = tuple(center_loc * size_px*scale )
axes_obj = init_axes(size_px, pixel_size, scale, center, container)
axes_obj.parent = container
return axes_obj
def update_axes(mod, size_px, pixel_size, scale):
node_group = mod.node_group
nodes = node_group.nodes
if 'per tick' in node_group.interface.items_tree[1].name:
node_group.interface.items_tree[1].name = f"{get_readable_enum('MiN_unit', bpy.context.scene.MiN_unit)} per tick"
for k, v in zip(["size_px","pixel_size", "scale"], [size_px, pixel_size, scale]):
nodes[f"[Microscopy Nodes {k}]"].vector = v
return
def init_axes(size_px, pixel_size, scale, location, container):
axes_obj = bpy.ops.mesh.primitive_cube_add(location=location)
axes_obj = bpy.context.view_layer.objects.active
axes_obj.data.name = 'axes'
axes_obj.name = 'axes'
bpy.ops.object.modifier_add(type='NODES')
node_group = bpy.data.node_groups.new(f'axes', 'GeometryNodeTree')
axes_obj.modifiers[-1].name = f"[Microscopy Nodes axes]"
axes_obj.modifiers[-1].node_group = node_group
nodes = node_group.nodes
links = node_group.links
inputnode = node_group.nodes.new('NodeGroupInput')
inputnode.location = (-400, -200)
axnode = nodes.new('FunctionNodeInputVector')
axnode.name = '[Microscopy Nodes size_px]'
axnode.label = "n pixels"
axnode.location = (-400, 200)
for axix in range(len(size_px)):
axnode.vector[axix] = size_px[axix]
initscale_node = nodes.new('FunctionNodeInputVector')
initscale_node.name = '[Microscopy Nodes scale]'
initscale_node.label = "Scale transform on load"
initscale_node.location = (-400, 0)
initscale_node.vector = scale
scale_node = nodes.new('FunctionNodeInputVector')
scale_node.label = 'scale (unit/px)'
scale_node.name = '[Microscopy Nodes pixel_size]'
scale_node.vector = pixel_size
scale_node.location = (-800, -200)
axnode_um = nodes.new('ShaderNodeVectorMath')
axnode_um.operation = "MULTIPLY"
axnode_um.name = "size (µm)"
axnode_um.label = "size (µm)"
axnode_um.location = (-50, 200)
links.new(axnode.outputs[0], axnode_um.inputs[0])
links.new(scale_node.outputs[0], axnode_um.inputs[1])
axnode_bm = nodes.new('ShaderNodeVectorMath')
axnode_bm.operation = "MULTIPLY"
axnode_bm.name = "size (m)"
axnode_bm.label = "size (m)"
axnode_bm.location = (-50, 50)
links.new(axnode.outputs[0], axnode_bm.inputs[0])
links.new(initscale_node.outputs[0], axnode_bm.inputs[1])
selfinfo = nodes.new('GeometryNodeObjectInfo')
selfinfo.inputs[0].default_value = axes_obj
selfinfo.location = (-1050, -100)
containerinfo = nodes.new('GeometryNodeObjectInfo')
containerinfo.inputs[0].default_value = container
containerinfo.location = (-1050, 200)
div_obj_scale = nodes.new('ShaderNodeVectorMath')
div_obj_scale.operation = "DIVIDE"
div_obj_scale.location = (-800, 0)
links.new(selfinfo.outputs.get("Scale"), div_obj_scale.inputs[0])
links.new(containerinfo.outputs.get("Scale"), div_obj_scale.inputs[1])
mult_obj_scale = nodes.new('ShaderNodeVectorMath')
mult_obj_scale.operation = "MULTIPLY"
mult_obj_scale.location = (-600, 0)
links.new(div_obj_scale.outputs[0], mult_obj_scale.inputs[0])
links.new(scale_node.outputs[0], mult_obj_scale.inputs[1])
links.new( mult_obj_scale.outputs[0], axnode_um.inputs[1])
crosshatch = nodes.new('GeometryNodeGroup')
crosshatch.node_tree = min_nodes.crosshatch_node_group()
crosshatch.location = (-50, -140)
axes_select = nodes.new('GeometryNodeGroup')
axes_select.node_tree = min_nodes.axes_multiplexer_node_group()
axes_select.label = "Subselect axes"
axes_select.name = "Axis Selection"
axes_select.width = 150
axes_select.location = (-50, -320)
scale_node = nodes.new('GeometryNodeGroup')
scale_node.node_tree = min_nodes.scale_node_group()
scale_node.width = 300
scale_node.location = (200, 100)
node_group.interface.new_socket(name=f"{get_readable_enum('MiN_unit', bpy.context.scene.MiN_unit)} per tick", in_out="INPUT",socket_type='NodeSocketFloat')
node_group.interface.new_socket(name='Grid', in_out="INPUT",socket_type='NodeSocketBool')
links.new(inputnode.outputs[0], scale_node.inputs.get(f'µm per tick'))
links.new(inputnode.outputs[1], scale_node.inputs.get('Grid'))
node_group.interface.new_socket(name='Line thickness', in_out="INPUT",socket_type='NodeSocketFloat')
links.new(inputnode.outputs[2], scale_node.inputs.get('Line thickness'))
axes_obj.modifiers[-1][node_group.interface.items_tree[-1].identifier] =1.0
links.new(axnode_bm.outputs[0], scale_node.inputs.get('Size (m)'))
links.new(axnode_um.outputs[0], scale_node.inputs.get('Size (µm)'))
links.new(axes_select.outputs[0], scale_node.inputs.get('Axis Selection'))
axes_mat = init_material_axes()
scale_node.inputs.get("Material").default_value = axes_mat
# crude version of Heckbert 1990 tick number algorithm, with minimum for perspective
max_um = np.max(size_px * pixel_size)
target_nr_of_ticks = 7
min_ticks = 3
nice_nrs = np.outer(np.array([1,2,5]), np.array([10**mag for mag in range(-4, 8)]))
ticks = max_um // nice_nrs
dists = np.abs(ticks[ticks >= min_ticks] - target_nr_of_ticks)
tick_um = nice_nrs[ticks >= min_ticks].flatten()[np.argmin(dists)]
scale_node.inputs.get(f'µm per tick').default_value = tick_um
# set input values
axes_obj.modifiers[-1][node_group.interface.items_tree[0].identifier] = tick_um
axes_obj.modifiers[-1][node_group.interface.items_tree[1].identifier] = True
for ax_input in axes_select.inputs:
node_group.interface.new_socket(name=ax_input.name, in_out="INPUT",socket_type='NodeSocketBool')
links.new(inputnode.outputs.get(ax_input.name), ax_input)
axes_obj.modifiers[-1][node_group.interface.items_tree[-1].identifier] = True
node_group.interface.new_socket("Geometry",in_out="OUTPUT", socket_type='NodeSocketGeometry')
outnode = nodes.new('NodeGroupOutput')
outnode.location = (800,0)
links.new(scale_node.outputs[0], outnode.inputs[0])
if axes_obj.data.materials:
axes_obj.data.materials[0] = axes_mat
else:
axes_obj.data.materials.append(axes_mat)
return axes_obj
def init_material_axes():
mat = bpy.data.materials.new('axes')
mat.blend_method = "BLEND"
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
if nodes.get("Principled BSDF") is not None:
nodes.remove(nodes.get("Principled BSDF"))
gridnormal = nodes.new("ShaderNodeAttribute")
gridnormal.attribute_name = 'orig_normal'
gridnormal.location = (-800, -100)
viewvec = nodes.new("ShaderNodeCameraData")
viewvec.location = (-800, -300)
vectransform = nodes.new("ShaderNodeVectorTransform")
vectransform.location = (-600, -300)
vectransform.vector_type = 'VECTOR'
vectransform.convert_from = "CAMERA"
vectransform.convert_to = "OBJECT"
links.new(viewvec.outputs[0], vectransform.inputs[0])
dot = nodes.new("ShaderNodeVectorMath")
dot.operation = "DOT_PRODUCT"
dot.location = (-400, -200)
links.new(gridnormal.outputs[1], dot.inputs[0])
links.new(vectransform.outputs[0], dot.inputs[1])
lesst = nodes.new("ShaderNodeMath")
lesst.operation = "LESS_THAN"
lesst.location =(-200, -200)
links.new(dot.outputs.get("Value"), lesst.inputs[0])
lesst.inputs[1].default_value = 0
culling_bool = nodes.new("ShaderNodeAttribute")
culling_bool.attribute_name = 'frontface culling'
culling_bool.location = (-200, -400)
comb = nodes.new("ShaderNodeMath")
comb.operation = "ADD"
comb.location =(0, -300)
links.new(lesst.outputs[0], comb.inputs[0])
links.new(culling_bool.outputs[2], comb.inputs[1])
and_op = nodes.new("ShaderNodeMath")
and_op.operation = "COMPARE"
and_op.location =(200, -300)
links.new(comb.outputs[0], and_op.inputs[0])
and_op.inputs[1].default_value = 2.0
and_op.inputs[2].default_value = 0.01
colorattr = nodes.new("ShaderNodeRGB")
colorattr.location = (200, 150)
trbsdf = nodes.new("ShaderNodeBsdfTransparent")
trbsdf.location = (200, -100)
mix = nodes.new("ShaderNodeMixShader")
mix.location = (450, 0)
links.new(colorattr.outputs[0], mix.inputs[1])
mix.inputs[1].show_expanded = True
links.new(trbsdf.outputs[0], mix.inputs[2])
links.new(and_op.outputs[0], mix.inputs[0])
if nodes.get("Material Output") is None:
outnode = nodes.new(type='ShaderNodeOutputMaterial')
outnode.name = 'Material Output'
out = nodes.get("Material Output")
out.location = (650, 0)
links.new(mix.outputs[0], out.inputs[0])
return mat
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/load_slice_cube.py | .py | 1,546 | 34 | import bpy
from .. import handle_blender_structs
import numpy as np
def load_slice_cube(size_px, scale, scale_factor, container, slicecube=None):
if slicecube is None:
bpy.ops.mesh.primitive_cube_add(location=size_px*scale/2)
slicecube = bpy.context.active_object
slicecube.name = "slice cube"
slicecube.scale = size_px * scale /2
bpy.ops.object.modifier_add(type='NODES')
# slicecube.modifiers[-1].name = f"Slice cube empty modifier (for reloading)"
slicecube.modifiers[-1].name = f"[Microscopy Nodes slicecube]"
mat = bpy.data.materials.new(f'Slice Cube')
mat.blend_method = "HASHED"
mat.use_nodes = True
if mat.node_tree.nodes.get("Principled BSDF") is None:
mat.node_tree.nodes.new('ShaderNodeBsdfPrincipled')
if mat.node_tree.nodes.get("Material Output") is None:
out = mat.node_tree.nodes.new(type="ShaderNodeOutputMaterial")
out.location = (400,0)
mat.node_tree.links.new(
mat.node_tree.nodes['Principled BSDF'].outputs['BSDF'],
mat.node_tree.nodes['Material Output'].inputs['Surface']
)
mat.node_tree.nodes['Principled BSDF'].inputs.get("Alpha").default_value = 0
slicecube.data.materials.append(mat)
slicecube.parent = container
slicecube.location = np.array(slicecube.location)+ ( np.array(slicecube.location)*(scale_factor - 1))
slicecube.scale = np.array(slicecube.scale) * scale_factor
return slicecube
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/load_surfaces.py | .py | 5,457 | 129 | import bpy
from .load_generic import *
from ..handle_blender_structs import *
from .. import min_nodes
class SurfaceIO(DataIO):
def import_data(self, ch, scale):
if min_keys.VOLUME in ch['collections']:
return ch['collections'][min_keys.VOLUME], ch['metadata'][min_keys.VOLUME]
from .load_volume import VolumeIO
return VolumeIO().import_data(ch, scale)
class SurfaceObject(ChannelObject):
min_type = min_keys.SURFACE
def add_material(self, ch):
mat = super().add_material(ch)
mat.blend_method = "HASHED"
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
if nodes.get("Principled BSDF") is None:
try:
nodes.remove(nodes.get("Principled Volume"))
except Exception as e:
print(e)
pass
princ = nodes.new("ShaderNodeBsdfPrincipled")
links.new(princ.outputs[0], nodes.get('Material Output').inputs[0])
princ = nodes.get("Principled BSDF")
princ.name = f"[{ch['identifier']}] principled"
color_lut = nodes.new(type="ShaderNodeValToRGB")
color_lut.location = (princ.location[0]-400, princ.location[1])
color_lut.width = 300
color_lut.name = "[color_lut]"
color_lut.outputs[1].hide = True
color_lut.inputs[0].default_value = 1
links.new(color_lut.outputs[0], princ.inputs.get('Base Color'))
links.new(color_lut.outputs[0], princ.inputs[27])
princ.inputs.get('Alpha').default_value = 0.8
return mat
def append_channel_to_holder(self, ch):
super().append_channel_to_holder(ch)
in_node = get_safe_node_input(self.node_group)
nodes = self.node_group.nodes
links = self.node_group.links
# can be explicitly named as this should only be called upon appending a channel
edit_in = nodes[f"edit_in_{ch['identifier']}"]
edit_out = nodes[f"edit_out_{ch['identifier']}"]
editframe = nodes[f"editframe_{ch['identifier']}"]
v2m = nodes.new('GeometryNodeVolumeToMesh')
v2m.name = f"VOL_TO_MESH_{ch['identifier']}"
v2m.location = (edit_in.location[0] + 400, edit_in.location[1])
v2m.parent = editframe
links.new(edit_in.outputs[0], v2m.inputs.get('Volume'))
links.new(v2m.outputs.get('Mesh'), edit_out.inputs[0])
socket_ix = get_socket(self.node_group, ch, return_ix=True, min_type="SWITCH")[1]
threshold_socket = new_socket(self.node_group, ch, 'NodeSocketFloat', min_type='THRESHOLD', ix=socket_ix+1)
threshold_socket.min_value = 0.0
threshold_socket.max_value = 1.001
threshold_socket.attribute_domain = 'POINT'
self.gn_mod[threshold_socket.identifier] = ch['metadata'][self.min_type]['threshold']
normnode = self.node_group.nodes.new(type="ShaderNodeMapRange")
normnode.location =(edit_in.location[0] + 200, edit_in.location[1]-150)
normnode.label = "Normalize data"
normnode.inputs[3].default_value = ch['metadata'][self.min_type]['range'][0]
normnode.inputs[4].default_value = ch['metadata'][self.min_type]['range'][1]
links.new(in_node.outputs.get(threshold_socket.name), normnode.inputs[0])
links.new(normnode.outputs[0], v2m.inputs.get("Threshold"))
normnode.hide = True
return
def update_gn(self, ch):
if f"VOL_TO_MESH_{ch['identifier']}" not in [node.name for node in self.node_group.nodes]:
return
v2m = self.node_group.nodes[f"VOL_TO_MESH_{ch['identifier']}"]
if ch['surf_resolution'] == 0:
v2m.resolution_mode='GRID'
return
else:
v2m.resolution_mode='VOXEL_SIZE'
for i in range(4):
socket = get_socket(self.node_group, ch, min_type='VOXEL_SIZE', internal_append=str(i))
if socket is not None:
if i == ch['surf_resolution']:
return
self.node_group.interface.remove(item=socket)
socket_ix = get_socket(self.node_group, ch, min_type="SWITCH",return_ix=True)[1]
socket = new_socket(self.node_group, ch, 'NodeSocketFloat', min_type='VOXEL_SIZE',internal_append=f"{ch['surf_resolution']}", ix=socket_ix+1)
default_settings = [None, 0.5, 4, 15] # resolution step sizes
in_node = get_safe_node_input(self.node_group)
self.node_group.links.new(in_node.outputs.get(socket.name), v2m.inputs.get('Voxel Size'))
self.gn_mod[socket.identifier] = default_settings[ch['surf_resolution']]
return
def update_material(self, mat, ch):
try:
princ = mat.node_tree.nodes.get(f"[{ch['identifier']}] principled")
color = min_nodes.shader_nodes.get_lut(ch['cmap'], ch['single_color'])[-1]
colornode = mat.node_tree.nodes.get(f"[color_lut]")
min_nodes.shader_nodes.set_color_ramp_from_ch(ch, colornode)
if ch['emission'] and princ.inputs[28].default_value == 0.0:
princ.inputs[28].default_value = 0.5
elif not ch['emission'] and princ.inputs[28].default_value == 0.5:
princ.inputs[28].default_value = 0
except Exception as e:
print(e, 'in update surface shader')
pass
return
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/load_components/load_generic.py | .py | 8,309 | 218 | import bpy, bpy_types
from ..handle_blender_structs import *
import numpy as np
def ChannelObjectFactory(min_key, obj):
if min_key == min_keys.VOLUME:
from .load_volume import VolumeObject
return VolumeObject(obj)
elif min_key == min_keys.SURFACE:
from .load_surfaces import SurfaceObject
return SurfaceObject(obj)
elif min_key == min_keys.LABELMASK:
from .load_labelmask import LabelmaskObject
return LabelmaskObject(obj)
def DataIOFactory(min_key):
if min_key == min_keys.VOLUME:
from .load_volume import VolumeIO
return VolumeIO()
elif min_key == min_keys.SURFACE:
from .load_surfaces import SurfaceIO
return SurfaceIO()
elif min_key == min_keys.LABELMASK:
from .load_labelmask import LabelmaskIO
return LabelmaskIO()
class DataIO():
min_type = min_keys.NONE
def export_ch(self, ch, axes_order, remake, cache_dir):
# return paths to local files with metadata in list of dcts
return []
def import_data(self, ch, scale):
# return collection, metadata
return None, None
class ChannelObject():
min_type = min_keys.NONE
obj = None
gn_mod = None
node_group = None
def __init__(self, obj):
if obj is None:
obj = self.init_obj()
self.obj = obj
self.gn_mod = get_min_gn(obj)
self.node_group =self.gn_mod.node_group
def init_obj(self):
if self.min_type == min_keys.VOLUME: # makes the icon show up
bpy.ops.object.volume_add(align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
else:
bpy.ops.mesh.primitive_cube_add()
obj = bpy.context.view_layer.objects.active
name = self.min_type.name.lower()
obj.name = name
bpy.ops.object.modifier_add(type='NODES')
node_group = bpy.data.node_groups.new(name, 'GeometryNodeTree')
obj.modifiers[-1].node_group = node_group
obj.modifiers[-1].name = f"[Microscopy Nodes {name}]"
node_group.interface.new_socket(name='Geometry', in_out="OUTPUT",socket_type='NodeSocketGeometry')
inputnode = node_group.nodes.new('NodeGroupInput')
inputnode.location = (-900, 0)
outnode = node_group.nodes.new('NodeGroupOutput')
outnode.location = (800, -100)
for dim in range(3):
obj.lock_location[dim] = True
obj.lock_rotation[dim] = True
obj.lock_scale[dim] = True
return obj
def add_material(self, ch):
mat = bpy.data.materials.new(f"{ch['name']} {self.min_type.name.lower()}")
self.obj.data.materials.append(mat)
return mat
def update_ch_data(self, ch):
if self.min_type in ch['collections'] and not self.ch_present(ch):
self.append_channel_to_holder(ch)
loadnode = self.node_group.nodes[f"channel_load_{ch['identifier']}"]
loadnode.label = ch['name']
if loadnode.parent is not None:
loadnode.parent.label = f"{ch['name']} data"
clear_collection(loadnode.inputs[0].default_value)
loadnode.inputs[0].default_value = ch['collections'][self.min_type]
return
def update_ch_settings(self, ch):
if not self.ch_present(ch):
return
for ix, socket in enumerate(self.node_group.interface.items_tree):
if isinstance(socket, bpy.types.NodeTreeInterfaceSocket) and ch['identifier'] in socket.default_attribute_name:
set_name_socket(socket, ch['name'])
self.update_gn(ch)
for mat in self.obj.data.materials:
if any([ch['identifier'] in node.name for node in mat.node_tree.nodes]):
self.update_material(mat, ch)
socket = get_socket(self.node_group, ch, min_type="SWITCH")
if socket is not None:
self.gn_mod[socket.identifier] = bool(ch[self.min_type])
return
def ch_present(self, ch):
return f"channel_load_{ch['identifier']}" in [node.name for node in self.node_group.nodes]
def update_material(self, mat, ch):
return
def update_gn(self, ch):
return
def append_channel_to_holder(self, ch):
# assert that layout is reasonable or make this:
joingeo, out_node, out_input = get_safe_nodes_last_output(self.node_group, make=True)
in_node = get_safe_node_input(self.node_group, make=True)
if joingeo is not None and joingeo.type == "REALIZE_INSTANCES":
joingeo = joingeo.inputs[0].links[0].from_node
if joingeo is None or joingeo.type != "JOIN_GEOMETRY":
joingeo = self.node_group.nodes.new('GeometryNodeJoinGeometry')
insert_last_node(self.node_group, joingeo, safe=True)
if self.min_type != min_keys.VOLUME:
realize = self.node_group.nodes.new('GeometryNodeRealizeInstances')
insert_last_node(self.node_group, realize, safe=True)
if out_node.location[0] - 1200 < in_node.location[0]: # make sure there is enough space
out_node.location[0] = in_node.location[0]+1200
# add switch socket
socket = new_socket(self.node_group, ch, 'NodeSocketBool', min_type="SWITCH")
node_socket = in_node.outputs.get(socket.name)
# make new channel
min_y_loc = in_node.location[1] + 300
for node in self.node_group.nodes:
if node.name not in [in_node.name, out_node.name, joingeo.name]:
min_y_loc = min(min_y_loc, node.location[1])
in_ch, out_ch = self.channel_nodes(in_node.location[0] + 400, min_y_loc - 300, ch)
self.node_group.links.new(node_socket, in_ch)
self.node_group.links.new(out_ch, joingeo.inputs[-1])
return
def channel_nodes(self, x, y, ch):
nodes = self.node_group.nodes
links = self.node_group.links
interface = self.node_group.interface
loadnode = nodes.new('GeometryNodeCollectionInfo')
loadnode.location = (x , y + 100)
loadnode.hide = True
loadnode.label = ch['name']
loadnode.transform_space='RELATIVE'
# reload-func:
loadnode.name = f"channel_load_{ch['identifier']}"
switch = nodes.new('GeometryNodeSwitch')
switch.location = (x, y + 50)
switch.input_type = 'GEOMETRY'
links.new(loadnode.outputs.get('Instances'), switch.inputs.get("True"))
switch.hide = True
switch.label = "Include channel"
dataframe = nodes.new('NodeFrame')
loadnode.parent = dataframe
switch.parent = dataframe
dataframe.label = f"{ch['name']} data"
dataframe.name = f"dataframe_{ch['identifier']}"
reroutes = [switch]
for x_, y_ in [(220, 40), (0, -150), (850,0), (0, 150)]:
x += x_
y += y_
reroutes.append(nodes.new('NodeReroute'))
reroutes[-1].location= (x, y)
links.new(reroutes[-2].outputs[0], reroutes[-1].inputs[0])
x += 50
editframe = nodes.new('NodeFrame')
reroutes[2].parent = editframe
reroutes[2].name = f"edit_in_{ch['identifier']}"
reroutes[3].parent = editframe
reroutes[3].name = f"edit_out_{ch['identifier']}"
editframe.label = f"edit geometry"
editframe.name = f"editframe_{ch['identifier']}"
setmat = nodes.new('GeometryNodeSetMaterial')
setmat.name = f"set_material_{ch['identifier']}"
setmat.inputs.get('Material').default_value = self.add_material(ch)
links.new(reroutes[-1].outputs[0], setmat.inputs.get('Geometry'))
setmat.location = (x, y)
setmat.hide= True
return switch.inputs.get("Switch"), setmat.outputs[0]
def set_parent_and_slicer(self, parent, slice_cube, ch):
self.obj.parent = parent
for mat in self.obj.data.materials:
if mat.node_tree.nodes.get("Slice Cube") is None:
node_handling.insert_slicing(mat.node_tree, slice_cube)
if self.min_type in ch['collections']:
for obj in ch['collections'][self.min_type].all_objects:
obj.parent = parent | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeSliceCube.py | .py | 3,105 | 81 | import bpy
from .nodeElementWiseCompare import element_wise_compare_node_group
def slice_cube_node_group():
node_group = bpy.data.node_groups.get("Slice Cube")
if node_group:
return node_group
node_group = bpy.data.node_groups.new(type = 'ShaderNodeTree', name = "Slice Cube")
links = node_group.links
interface = node_group.interface
nodes = node_group.nodes
interface.new_socket("Shader",in_out="INPUT",socket_type='NodeSocketShader')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Slicing Object",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Invert",in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Shader",in_out="OUTPUT",socket_type='NodeSocketShader')
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-200,0)
less_than = nodes.new('ShaderNodeGroup')
less_than.node_tree = element_wise_compare_node_group("LESS_THAN")
less_than.width = 250
less_than.location = (0, 150)
links.new(group_input.outputs.get('Slicing Object'),less_than.inputs.get('Vector'))
less_than.inputs.get('Value').default_value = -1
greater_than = nodes.new('ShaderNodeGroup')
greater_than.node_tree = element_wise_compare_node_group("GREATER_THAN")
greater_than.width = 250
greater_than.location = (0, -150)
links.new(group_input.outputs.get('Slicing Object'),greater_than.inputs.get('Vector'))
greater_than.inputs.get('Value').default_value = 1
add = nodes.new('ShaderNodeVectorMath')
add.location = (300, 100)
add.operation = "ADD"
links.new(less_than.outputs[0], add.inputs[0])
links.new(greater_than.outputs[0], add.inputs[1])
boolnot = nodes.new('ShaderNodeMath')
boolnot.location = (300, -100)
boolnot.operation = 'LESS_THAN'
boolnot.inputs[1].default_value = 0.5
links.new(group_input.outputs.get('Invert'), boolnot.inputs[0])
comp = nodes.new("ShaderNodeMath")
comp.location = (450, 100)
comp.operation = 'COMPARE'
links.new(add.outputs[0], comp.inputs[0])
comp.inputs[1].default_value = 0
comp.inputs[2].default_value = 0.1
comp2 = nodes.new("ShaderNodeMath")
comp2.location = (550, 0)
comp2.operation = 'COMPARE'
links.new(boolnot.outputs[0], comp2.inputs[0])
links.new(comp.outputs[0], comp2.inputs[1])
transparent = nodes.new(type='ShaderNodeBsdfTransparent')
transparent.location = (450, -200)
mix = nodes.new(type='ShaderNodeMixShader')
mix.location =(650, -200)
links.new(group_input.outputs.get("Shader"), mix.inputs[2])
links.new(transparent.outputs[0], mix.inputs[1])
links.new(comp2.outputs[0], mix.inputs[0])
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (800,-100)
links.new(mix.outputs[0], group_output.inputs[0])
return node_group
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeScale.py | .py | 8,406 | 190 | import bpy
import numpy as np
from .nodeScaleBox import scalebox_node_group
from .nodeGridVerts import grid_verts_node_group
from .nodesBoolmultiplex import axes_demultiplexer_node_group
#initialize scale node group
def scale_node_group():
node_group = bpy.data.node_groups.get("Scale bars (arbitrary pixel unit)")
if node_group:
return node_group
node_group = bpy.data.node_groups.new(type = 'GeometryNodeTree', name = "Scale bars (arbitrary pixel unit)")
links = node_group.links
interface = node_group.interface
# -- get Input --
interface.new_socket("Size (µm)",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].default_value = (7.0, 5.0, 4.0)
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 10000000.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Size (m)",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].default_value = (13.0, 10.0, 6.0)
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 10000000.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("µm per tick",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 10
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 3.4028234663852886e+38
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Grid",in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = True
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Line thickness",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 0.2
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 3.4028234663852886e+38
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Tick Geometry",in_out="INPUT",socket_type='NodeSocketGeometry')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Axis Selection",in_out="INPUT",socket_type='NodeSocketInt')
interface.items_tree[-1].default_value = 1111111
interface.items_tree[-1].min_value = 0
interface.items_tree[-1].max_value = 1111111
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Material",in_out="INPUT",socket_type='NodeSocketMaterial')
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-1000,0)
# -- make scale box and read out/store normals
scalebox = node_group.nodes.new('GeometryNodeGroup')
scalebox.node_tree = scalebox_node_group()
scalebox.location = (-500, 500)
links.new(group_input.outputs.get("Size (m)"), scalebox.inputs.get("Size (m)"))
links.new(group_input.outputs.get("Size (µm)"), scalebox.inputs.get("Size (µm)"))
links.new(group_input.outputs.get("µm per tick"), scalebox.inputs.get("µm per tick"))
links.new(group_input.outputs.get("Axis Selection"), scalebox.inputs.get("Axis Selection"))
normal = node_group.nodes.new("GeometryNodeInputNormal")
normal.location = (-320, 450)
store_normal = node_group.nodes.new("GeometryNodeStoreNamedAttribute")
store_normal.inputs.get("Name").default_value = "orig_normal"
store_normal.data_type = 'FLOAT_VECTOR'
store_normal.domain = 'EDGE'
store_normal.location = (-150, 700)
links.new(scalebox.outputs[0], store_normal.inputs[0])
links.new(normal.outputs[0], store_normal.inputs.get("Value"))
cap_normal = node_group.nodes.new("GeometryNodeCaptureAttribute")
cap_normal.location = (-150, 400)
links.new(scalebox.outputs[0], cap_normal.inputs[0])
links.new(normal.outputs[0], cap_normal.inputs[1])
# -- read out grid positions --
grid_verts = node_group.nodes.new('GeometryNodeGroup')
grid_verts.node_tree = grid_verts_node_group()
grid_verts.location = (-500, 100)
links.new(group_input.outputs.get("Size (m)"), grid_verts.inputs.get("Size (m)"))
not_grid = node_group.nodes.new("FunctionNodeBooleanMath")
not_grid.operation = 'NOT'
links.new(group_input.outputs.get("Grid"), not_grid.inputs[0])
not_grid.location = (-500, -100)
and_grid = node_group.nodes.new("FunctionNodeBooleanMath")
and_grid.operation = 'AND'
links.new(grid_verts.outputs[0], and_grid.inputs[0])
links.new(not_grid.outputs[0], and_grid.inputs[1])
and_grid.location = (-320, 0)
# -- scale down thickness --
thickness = node_group.nodes.new("ShaderNodeMath")
thickness.operation = "DIVIDE"
thickness.location = (-500, -200)
thickness.label = "line thickness scaled down"
links.new(group_input.outputs.get("Line thickness"), thickness.inputs[0])
thickness.inputs[1].default_value = 100
# # -- instantiate ticks --
ax_grid = node_group.nodes.new("FunctionNodeBooleanMath")
ax_grid.operation = 'NOT'
links.new(grid_verts.outputs[0], ax_grid.inputs[0])
ax_grid.location = (-250, -100)
iop = node_group.nodes.new("GeometryNodeInstanceOnPoints")
iop.location = (500, 100)
links.new(cap_normal.outputs[0], iop.inputs[0])
links.new(ax_grid.outputs[0], iop.inputs[1])
links.new(group_input.outputs.get("Tick Geometry"), iop.inputs[2])
store_normaltick = node_group.nodes.new("GeometryNodeStoreNamedAttribute")
store_normaltick.inputs.get("Name").default_value = "orig_normal"
store_normaltick.data_type = 'FLOAT_VECTOR'
store_normaltick.domain = 'INSTANCE'
store_normaltick.location = (750, 100)
links.new(iop.outputs[0], store_normaltick.inputs[0])
links.new(cap_normal.outputs[1], store_normaltick.inputs.get("Value"))
realize = node_group.nodes.new("GeometryNodeRealizeInstances")
realize.location = (900, 100)
links.new(store_normaltick.outputs[0], realize.inputs[0])
# -- make edges --
delgrid = node_group.nodes.new("GeometryNodeDeleteGeometry")
delgrid.mode = 'ALL'
delgrid.domain = 'POINT'
delgrid.location = (400, 600)
links.new(store_normal.outputs[0], delgrid.inputs[0])
links.new(and_grid.outputs[0], delgrid.inputs.get("Selection"))
m2c = node_group.nodes.new("GeometryNodeMeshToCurve")
m2c.location = (600, 600)
links.new(delgrid.outputs[0], m2c.inputs[0])
profile = node_group.nodes.new("GeometryNodeCurvePrimitiveQuadrilateral")
profile.location = (600, 400)
profile.mode = "RECTANGLE"
links.new(thickness.outputs[0], profile.inputs[0])
links.new(thickness.outputs[0], profile.inputs[1])
c2m = node_group.nodes.new("GeometryNodeCurveToMesh")
c2m.location = (800, 500)
links.new(m2c.outputs[0], c2m.inputs[0])
links.new(profile.outputs[0], c2m.inputs[1])
# -- output and passthrough values --
join = node_group.nodes.new("GeometryNodeJoinGeometry")
join.location = (1400, 0)
links.new(c2m.outputs[0], join.inputs[0])
links.new(realize.outputs[0], join.inputs[-1])
# -- make scale box and read out/store normals
demultiplex_axes = node_group.nodes.new('GeometryNodeGroup')
demultiplex_axes.node_tree = axes_demultiplexer_node_group()
demultiplex_axes.location = (1400, -300)
links.new(group_input.outputs.get('Axis Selection'), demultiplex_axes.inputs[0])
culling = node_group.nodes.new("GeometryNodeStoreNamedAttribute")
culling.label = "passthrough frontface culling to shader"
culling.inputs.get("Name").default_value = "frontface culling"
culling.data_type = 'BOOLEAN'
culling.domain = 'POINT'
culling.location = (1600, 10)
links.new(join.outputs[0], culling.inputs[0])
links.new(demultiplex_axes.outputs[0], culling.inputs.get('Value'))
material = node_group.nodes.new("GeometryNodeSetMaterial")
material.location = (2000,0)
links.new(culling.outputs[0], material.inputs[0])
links.new(group_input.outputs.get('Material'), material.inputs[2])
interface.new_socket("Geometry",in_out="OUTPUT",socket_type='NodeSocketGeometry')
interface.items_tree[-1].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (2200,0)
links.new(material.outputs[0], group_output.inputs[0])
return node_group
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/__init__.py | .py | 414 | 11 | from .nodeScale import scale_node_group
from .nodesBoolmultiplex import axes_multiplexer_node_group
from .nodeCrosshatch import crosshatch_node_group
from .nodeGridVerts import grid_verts_node_group
from .nodeScaleBox import scalebox_node_group
from .nodeBoundedMapRange import bounded_map_range_node_group
from .nodeSliceCube import slice_cube_node_group
from . import shader_nodes
CLASSES =shader_nodes.CLASSES | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeElementWiseCompare.py | .py | 1,763 | 46 | import bpy
def element_wise_compare_node_group(operation):
node_group = bpy.data.node_groups.get(f"Element-wise {operation}")
if node_group:
return node_group
node_group = bpy.data.node_groups.new(type = 'ShaderNodeTree', name = f"Element-wise {operation}")
links = node_group.links
interface = node_group.interface
nodes = node_group.nodes
interface.new_socket("Vector",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Value",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Vector",in_out="OUTPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-200,0)
sep = nodes.new(type='ShaderNodeSeparateXYZ')
sep.location = (0, 0)
links.new(group_input.outputs.get('Vector'), sep.inputs.get('Vector'))
comb = nodes.new(type='ShaderNodeCombineXYZ')
comb.location = (400, 0)
for ix, dim in enumerate("XYZ"):
compare = nodes.new(type='ShaderNodeMath')
compare.location = (200, ix*-200)
compare.operation = operation
# print(group_input.outputs.get('Value'),group_input.outputs.get('Threshold'))
links.new(group_input.outputs.get('Value'), compare.inputs[1])
links.new(sep.outputs.get(dim), compare.inputs.get('Value'))
links.new(compare.outputs[0], comb.inputs.get(dim))
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (600,0)
links.new(comb.outputs.get('Vector'), group_output.inputs[0])
return node_group
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeCrosshatch.py | .py | 2,644 | 63 | import bpy
def crosshatch_node_group():
node_group = bpy.data.node_groups.get("crosshatch")
if node_group:
return node_group
node_group = bpy.data.node_groups.new(type = 'GeometryNodeTree', name = "crosshatch")
links = node_group.links
interface = node_group.interface
interface.new_socket("size",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 15.0
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 3.4028234663852886e+38
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("thickness", in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 3.0
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 3.4028234663852886e+38
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-1000,0)
tick_size = node_group.nodes.new("ShaderNodeMath")
tick_size.operation = "DIVIDE"
tick_size.location = (-500, -500)
tick_size.label = "tick length per direction"
links.new(group_input.outputs.get("size"), tick_size.inputs[0])
tick_size.inputs[1].default_value = 100
tick_thickness = node_group.nodes.new("ShaderNodeMath")
tick_thickness.operation = "DIVIDE"
tick_thickness.location = (-500, -700)
tick_thickness.label = "tick thickness scaled down"
links.new(group_input.outputs.get("thickness"), tick_thickness.inputs[0])
tick_thickness.inputs[1].default_value = 100
cubes = []
for axix, ax in enumerate("XYZ"):
comb = node_group.nodes.new("ShaderNodeCombineXYZ")
comb.location = (-200, -300 - 200*axix)
for i in range(3):
links.new(tick_thickness.outputs[0], comb.inputs[i])
links.new(tick_size.outputs[0], comb.inputs[axix])
cube = node_group.nodes.new("GeometryNodeMeshCube")
cube.location = (0, -300 - 200*axix)
links.new(comb.outputs[0], cube.inputs[0])
cubes.append(cube)
join_tick = node_group.nodes.new("GeometryNodeJoinGeometry")
join_tick.location = (300, -500)
for cube in reversed(cubes):
links.new(cube.outputs[0], join_tick.inputs[0])
interface.new_socket("Geometry", in_out="OUTPUT",socket_type='NodeSocketGeometry')
interface.items_tree[-1].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (500,-200)
links.new(join_tick.outputs[0], group_output.inputs[0])
return node_group
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeBoundedMapRange.py | .py | 2,376 | 54 | import bpy
# UNUSED currently
# This is a reskin of Map Range node to make it easier to understand for novice users
# by removing things unnecessary for microscopy data, and bounding values to microscopynodes
# ranges
def bounded_map_range_node_group():
node_group = bpy.data.node_groups.get("Bounded Map Range")
if node_group:
return node_group
node_group = bpy.data.node_groups.new(type = 'ShaderNodeTree', name = "Bounded Map Range")
links = node_group.links
interface = node_group.interface
nodes = node_group.nodes
interface.new_socket("Data",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Minimum",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 0.0
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 1.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Maximum",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 1.0
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 1.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Intensity",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 1
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 10000.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Rescaled Data",in_out="OUTPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-200,0)
map_range = nodes.new(type='ShaderNodeMapRange')
map_range.location = (0, 0)
links.new(group_input.outputs.get('Data'), map_range.inputs.get('Value'))
links.new(group_input.outputs.get('Minimum'), map_range.inputs.get('From Min'))
links.new(group_input.outputs.get('Maximum'), map_range.inputs.get('From Max'))
links.new(group_input.outputs.get('Intensity'), map_range.inputs.get('To Max'))
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (200,0)
links.new(map_range.outputs.get('Result'), group_output.inputs[0])
return node_group | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeGridVerts.py | .py | 4,471 | 113 | import bpy
def grid_verts_node_group():
node_group = bpy.data.node_groups.get("_grid_verts")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'GeometryNodeTree', name = "_grid_verts")
links = node_group.links
interface = node_group.interface
# -- get IO --
#input Vector
interface.new_socket("Size (m)",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].default_value = (13.0, 10.0, 6.0)
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 10000000.0
interface.items_tree[-1].attribute_domain = 'POINT'
#node Group Input
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-800,0)
pos = node_group.nodes.new("GeometryNodeInputPosition")
pos.location = (-600, 130)
posXYZ = node_group.nodes.new("ShaderNodeSeparateXYZ")
posXYZ.location = (-400, 130)
posXYZ.label = "posXYZ"
links.new(pos.outputs[0], posXYZ.inputs[0])
compnodes = [[],[],[]]
for ix, side in enumerate(['min', 'max']):
loc = node_group.nodes.new("ShaderNodeVectorMath")
loc.operation = "MULTIPLY"
loc.location = (-600, -200 * ix)
loc.label = "location 0,0,0"
links.new(group_input.outputs.get("Size (m)"), loc.inputs[0])
if side == 'min':
loc.inputs[1].default_value = (-0.5,-0.5,0)
else:
loc.inputs[1].default_value = (0.5,0.5,1)
locXYZ = node_group.nodes.new("ShaderNodeSeparateXYZ")
locXYZ.location = (-400, -130*ix)
locXYZ.label = side + "XYZ"
links.new(loc.outputs[0], locXYZ.inputs[0])
for axix, ax in enumerate("XYZ"):
# element wise compare
compare = node_group.nodes.new("FunctionNodeCompare")
compare.data_type = 'FLOAT'
compare.operation = 'EQUAL'
compare.mode = 'ELEMENT'
compare.label = "value on " + side + " in " + ax
compare.location = (-200, ((ix*3)+axix) * -200 +300)
links.new(posXYZ.outputs[axix], compare.inputs[0])
links.new(locXYZ.outputs[axix], compare.inputs[1])
compnodes[axix].append(compare)
ornodes = []
for axix, ax in enumerate("XYZ"):
ornode = node_group.nodes.new("FunctionNodeBooleanMath")
ornode.operation = 'OR'
for nix,compnode in enumerate(compnodes[axix]):
links.new(compnode.outputs[0], ornode.inputs[nix])
ornode.location = (0, (axix) * -200 +100)
ornode.label = "vert in min or max of " + ax
ornodes.append(ornode)
andnodes = []
for i in range(3):
andnode = node_group.nodes.new("FunctionNodeBooleanMath")
andnode.operation = 'AND'
links.new(ornodes[i].outputs[0], andnode.inputs[0])
links.new(ornodes[i-1].outputs[0], andnode.inputs[1])
andnode.location = (200, i * -200 +100)
andnodes.append(andnode)
ornodes2 = []
ornode = node_group.nodes.new("FunctionNodeBooleanMath")
ornode.operation = 'OR'
links.new(andnodes[0].outputs[0], ornode.inputs[0])
links.new(andnodes[1].outputs[0], ornode.inputs[1])
ornode.location = (400, 100)
ornodes2.append(ornode)
ornode = node_group.nodes.new("FunctionNodeBooleanMath")
ornode.operation = 'OR'
links.new(andnodes[1].outputs[0], ornode.inputs[0])
links.new(andnodes[2].outputs[0], ornode.inputs[1])
ornode.location = (400, -100)
ornodes2.append(ornode)
nornode = node_group.nodes.new("FunctionNodeBooleanMath")
nornode.operation = 'NOR'
links.new(ornodes2[0].outputs[0], nornode.inputs[0])
links.new(ornodes2[1].outputs[0], nornode.inputs[1])
nornode.location = (600, 100)
#output Geometry
# interface.new_socket("Size (m)",in_out="INPUT",socket_type='NodeSocketVector')
# interface.items_tree[-1].default_value = (13.0, 10.0, 6.0)
# interface.items_tree[-1].min_value = 0.0
# interface.items_tree[-1].max_value = 10000000.0
# interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Boolean",in_out="OUTPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (800,100)
links.new(nornode.outputs[0], group_output.inputs[0])
return node_group
| Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.