keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | Autodesk/molecular-design-toolkit | deployment/pull-cache.sh | .sh | 1,125 | 43 | #!/usr/bin/env bash
if [ -z ${CI_BRANCH} ]; then
echo "\$CI_BRANCH" var not set.
exit 10
fi
function echocmd() {
echo "> $@"
$@
}
function run-pull(){
# pull an image. If successful, retags the image with the "cache" tag
img=$1
tag=$2
imgpath="${REPO}${img}-${tag}"
echocmd docker pull ${imgpath} | tee -a pull.log | egrep -i 'pull|already';
success=${PIPESTATUS[0]}
if [ "$success" -ne 0 ]; then
return ${success};
else
docker tag ${imgpath} ${img}:cache
fi
}
# we copy binaries out of this one for our build
chemdocker_tag=$(cat /opt/molecular-design-toolkit/moldesign/compute/CHEMDOCKER_TAG)
echocmd docker pull chemdocker/pyscf-build-1.3.1:${chemdocker_tag} | tee -a pull.log | egrep -i 'pull|already';
for img in moldesign_minimal \
moldesign_minimal_py2 \
moldesign_complete \
moldesign_complete_py2 \
moldesign_py_build \
moldesign_notebook; do
run-pull ${img} ${CI_BRANCH}-devbuild || run-pull ${img} master || \
echo " --> Failed to pull cache for ${img}"
echo
done
| Shell |
3D | Autodesk/molecular-design-toolkit | deployment/run-ci-tests.sh | .sh | 3,072 | 110 | #!/usr/bin/env bash
# Drives tests for our CI system. This looks for the following environment variables:
# Defined by codeship
# - CI_BRANCH
# - CI_COMMIT_MESSAGE
# - PROJECT_ID
# Defined in ../codeship-services.yml
# - TESTENV
# - PYVERSION
set -e # fail immediately if any command fails
if [ -z "${CI_BRANCH}" ]; then
echo "FAILURE: Variable \$CI_BRANCH not defined."
exit 1
fi
install_location=$(python -c "import moldesign, os; print(moldesign.__path__[0])")
test_location=$(dirname "${install_location}")
VERSION="${TESTENV}.py${PYVERSION}"
PYTESTFLAGS="moldesign/_tests/ -n 2 --spec --durations=20
--junit-xml=/opt/reports/junit.${VERSION}.xml --timeout=3600 --tb=short
--cov moldesign --cov-config /opt/molecular-design-toolkit/.coveragerc"
function send_status_update(){
python /opt/molecular-design-toolkit/deployment/send-test-status.py "${1}" "${2}"
}
function check_if_tests_should_run(){
echo "Should I run the tests in this environment?"
if [[ "${CI_COMMIT_MESSAGE}" == *"--no-tests"* ]]; then
echo "NO: found \"--skip-ci-tests\" flag in commit message; will not run any test suites"
exit 0
fi
if [[ "${CI_COMMIT_MESSAGE}" == *"--fast-tests"* && "${VERSION}" != "complete.py3" ]]; then
echo "NO: found \"--fast-ci-tests\" flag in commit message; run complete.py3 only"
exit 0
fi
if [[ "${CI_BRANCH}" =~ ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)((a|rc|b)(0|[1-9]\d*))?$ ]]
then
echo "YES: this is a release version: \"${CI_BRANCH}\""
return 0
else # otherwise, point to the appropriate docker image tag
mkdir -p ~/.moldesign
echo "default_version_tag: ${CI_BRANCH}" >> ~/.moldesign/moldesign.yml
fi
if [ "${TESTENV}" == "complete" ]; then
echo "YES: always run in 'complete' environment"
return 0
fi
case "${CI_BRANCH}" in
master|deploy|dev)
echo "YES: always run in branch \"${CI_BRANCH}\""
return 0
;;
esac
if [[ "${CI_COMMIT_MESSAGE}" == *"--testall"* ]]; then
runthem=true
echo "YES: found \"--testall\" flag in commit message"
fi
if [ "${runthem}" != "true" ]; then
echo "SKIPPING tests in this environment."
echo "To run these tests, add \"--testall\" to your commit message"
echo "(or work in the dev or deploy branches)"
exit 0
fi
}
function run_tests(){
send_status_update "na" "Starting tests for ${VERSION}"
cd ${test_location}
echo
echo "Test command running in working dir '$(pwd)':"
echo "py.test ${PYTESTFLAGS}"
echo
py.test ${PYTESTFLAGS} | tee /opt/reports/pytest.${VERSION}.log
exitstat=${PIPESTATUS[0]}
statline="$(tail -n1 /opt/reports/pytest.${VERSION}.log)"
# Make a copy of the coverage report
mkdir -p /opt/reports/env-coverage/
cp .coverage /opt/reports/env-coverage/coverage.${VERSION}
echo 'Test status:'
echo ${statline}
send_status_update "${exitstat}" "${statline}"
exit ${exitstat}
}
check_if_tests_should_run
run_tests
| Shell |
3D | Autodesk/molecular-design-toolkit | deployment/publish.sh | .sh | 987 | 33 | #!/bin/bash
# Publish a new release (triggered by a git tag that conforms to a PEP440 release)
# Exit 1 if there's a mismatch between the git tag and the package's version
#
# Expects to run in base directory of the repository
# fail immediately if any command fails:
set -e
echo "Now deploying moldesign-${CI_BRANCH}"
docker login -u ${DOCKERHUB_USER} -p ${DOCKERHUB_PASSWORD}
# Copy python package out of the docker image
sdist=moldesign-${CI_BRANCH}.tar.gz
docker run moldesign_py_build:dev cat dist/${sdist} > /opt/dist/${sdist}
# Push images to dockerhub
for img in moldesign_minimal \
moldesign_minimal_py2 \
moldesign_complete \
moldesign_complete_py2 \
moldesign_notebook; do
docker push ${REPO}${img}-${CI_BRANCH} | tee -a push.log | egrep -i 'pull|already'
done
# Push python package to PyPI
echo "Uploading version ${CI_BRANCH} to PyPI:"
twine upload -u ${PYPI_USER} -p ${PYPI_PASSWORD} /opt/dist/${sdist}
| Shell |
3D | Autodesk/molecular-design-toolkit | deployment/send-test-status.py | .py | 1,615 | 58 | #!/usr/bin/env python
"""
This script accesses the github API to send a custom status message
about test results
"""
import os
import sys
import github
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('exitcode', type=str)
parser.add_argument('msg', type=str)
parser.add_argument('--deployed', action='store_true')
args = parser.parse_args()
status = {'0':'success', 'na':'pending'}.get(args.exitcode, 'failure')
missing_env = []
for key in 'CI_COMMIT_ID TESTENV GITHUB_REPO_TOKEN CI_PROJECT_ID CI_BUILD_ID PYVERSION'.split():
if not os.environ.get(key, None):
missing_env.append(key)
# set by codeship CI
sha = os.environ.get('CI_COMMIT_ID', '_no_commitid')
testenv = os.environ.get('TESTENV', '_notestenv')
ghtoken = os.environ.get('GITHUB_REPO_TOKEN', '_notoken')
# projid = os.environ.get('CI_PROJECT_ID', '_no_projid')
projid = '214515' # hardcoded for now
buildid = os.environ.get('CI_BUILD_ID', '_no_buildid')
pyversion = os.environ.get('PYVERSION', '_no_pyversion')
data = dict(state=status,
target_url='https://app.codeship.com/projects/%s/builds/%s' %
(projid, buildid),
description=args.msg.replace("=","").strip(),
context='%s/py%s' % (testenv, pyversion))
if missing_env:
print("Not sending status update b/c of missing env vars: %s" % ','.join(missing_env))
print(data)
sys.exit(0)
g = github.Github(ghtoken)
repo = g.get_repo('autodesk/molecular-design-toolkit')
commit = repo.get_commit(sha)
if args.deployed:
raise NotImplementedError
else:
commit.create_status(**data)
| Python |
3D | Autodesk/molecular-design-toolkit | deployment/pull-chemdocker.sh | .sh | 329 | 11 | #!/usr/bin/env bash
chemdocker_tag=$(cat /opt/molecular-design-toolkit/moldesign/compute/CHEMDOCKER_TAG)
for image in nwchem-6.6 \
ambertools-16 \
ambertools-17 \
opsin-2.1.0; do
docker pull chemdocker/${image}:${chemdocker_tag} | tee -a pull.log | egrep -i 'pull|already';
done
| Shell |
3D | Autodesk/molecular-design-toolkit | deployment/print-environment.sh | .sh | 538 | 19 | #!/usr/bin/env bash
echo
echo " ==== ${TESTENV} Test Environment for Python ${PYVERSION} ==== "
echo
echo " * Python interpreter: $(python -c "import sys; print(sys.version)")"
echo
echo " * Conda version: $(conda --version 2>/dev/null || echo 'not installed')"
echo
# This will print logging messages about optional interfaces
python -c "import moldesign; moldesign.compute.packages.print_env()"
echo
echo " * Conda environment: $(conda list 2>/dev/null || echo "not installed")"
echo
echo " * Installed python packages: "
pip freeze
| Shell |
3D | Autodesk/molecular-design-toolkit | deployment/push-and-tag.sh | .sh | 668 | 30 | #!/bin/bash
set -e
if [ -z ${CI_BRANCH} ]; then
echo "\$CI_BRANCH" var not set.
exit 1
fi
if [ -z ${DOCKERHUB_USER} ] || [ -z ${DOCKERHUB_PASSWORD} ]; then
echo "Dockerhub credentials not provided. Skipping push ..."
exit 0
fi
function echocmd() {
echo "> $@"
$@
}
docker login -u ${DOCKERHUB_USER} -p ${DOCKERHUB_PASSWORD}
for img in $@; do
build_img=${img}:dev
release_tag=${REPO}${img}-${CI_BRANCH}
artifact_tag=${release_tag}-devbuild
echocmd docker tag ${build_img} ${release_tag}
echocmd docker tag ${build_img} ${artifact_tag}
echocmd docker push ${artifact_tag} | tee -a push.log | egrep -i 'push|already';
done | Shell |
3D | rhyan10/G-SchNetOE62 | PCA_functions.py | .py | 32,943 | 714 | import os
import struct
import numpy as np
import ase.db as adb
from sklearn.decomposition import PCA, IncrementalPCA
from dscribe.descriptors import Descriptor, SOAP, MBTR
# Object type imports
from typing import Generator, Tuple, Union, Any, Optional
from numpy.typing import ArrayLike
from ase.db.core import Database
class db_iter_PCA:
'''Contains functions for running PCA on `dbAutoAnalysis`-parsed databases.
Designed to read in a series of ASE databases corresponding to the iterations
of a certain energy type (HOMO, LUMO or HL). These databases must have been
parsed with the `dbAutoAnalysis` class to extract the requisite bonding
features and save them into the database rows. Can also be used to apply a
previously fitted PCA model to a new database, such as the parsed
OE62 database.
The PCA in question reduces the dimensionality of all molecules\' chosen
structural descriptors (either SOAP or MBTR), as well as their bonding
descriptors, assembled from the features in each parsed database. It also
extracts the HOMO, LUMO and HOMO-LUMO gap energies from the databases, for
use in clustering plots.
The output arrays from this PCA can then be saved into a .npz archive for
later use, as the code takes a long time to run and effort should be made
not to rerun calculations unless really neccessary.
'''
def __init__(self,
db_folder: str,
db_type: str,
descriptor_type: str,
n_PCA_components: int,
n_descriptor_jobs: int,
print_descriptor_progress: bool = False):
'''Initialise PCA functions, sanitise user inputs.
Args:
db_folder: Path to directory containing all the relevant databases.
db_type: Type of datasets to load in. Choose from \'HOMO\', \'LUMO\',
\'HL\' or \'OE62\'.
descriptor_type: Type of structural descriptor to generate for each
molecule. Choose from \'SOAP\' or \'MBTR\'
n_PCA_components: Number of principal components to keep from each PCA. If
applying an existing PCA to a new db, this must be the same number which
was used for that PCA.
n_descriptor_jobs: Number of parallel jobs to run when computing
descriptors.
print_descriptor_progress: Whether to write out progress of the
descriptor construction routines. Can cause crashes when constructing
descriptors in parallel (when n_descriptor_jobs > 1)
'''
# Load in arguments as class attributes.
self.db_folder = db_folder
self.db_type = db_type
self.descriptor_type = descriptor_type
self.n_PCA_components = n_PCA_components
self.n_descriptor_jobs = n_descriptor_jobs
self.verbose_descs = print_descriptor_progress
# Set other class variables.
self.implemented_elements = ['H', 'Li', 'B', 'C', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'As', 'Se', 'Te', 'Br', 'I']
self.implemented_bond_orders = [1, 2, 3, 4, 5]
self.implemented_ring_sizes = np.arange(3, 10)
# Sanitise inputs
if not os.path.exists(self.db_folder):
raise ValueError(f'Path {self.db_folder} does not exist.')
if self.db_type not in ['HOMO', 'LUMO', 'HL', 'OE62', 'Initial']:
raise ValueError(f'db_type {self.db_type} not recognised.')
if self.descriptor_type not in ['SOAP', 'MBTR']:
raise ValueError(f'descriptor_type {self.descriptor_type} not recognised.')
if self.n_descriptor_jobs > 1 and self.verbose_descs:
print('Warning: printing descriptor construction progress when constructing descriptors in parallel has been known to cause crashes. Proceed with caution.')
# Create bonding descriptor template.
self.bonding_properties = []
for index, i in enumerate(self.implemented_elements):
self.bonding_properties.append(f'natoms_{i}')
self.bonding_properties.append(f'n_aromatic_{i}')
for j in self.implemented_elements[index:]:
for k in self.implemented_bond_orders:
self.bonding_properties.append(f'nbonds_{i}{k}{j}')
for i in self.implemented_bond_orders:
self.bonding_properties.append(f'nbonds_{i}')
for i in self.implemented_ring_sizes:
self.bonding_properties.append(f'nrings_{i}')
self.bonding_properties.append(f'nrings_{i}_aromatic')
self.bonding_properties.append('aromaticity')
self.n_bonding_features = len(self.bonding_properties)
self.energy_properties = ['HOMO', 'LUMO', 'HL']
def load_db(self, db_iter: Optional[int]=None) -> Tuple[Database, Generator[Any, None, None]]:
'''Loads an ASE database of a specified type and iteration number.
Args:
db_iter: The iteration of the database set to load.
Returns:
The ase.db.connect() object interface and a Generator for iterating
over all database rows (from db.select()).
'''
if self.db_type == 'OE62':
if db_iter is not None:
raise ValueError('db_type OE62 has no iterations, arg db_iter should be None')
else:
db_path = f'{self.db_folder}/{self.db_type}_parsed.db'
elif self.db_type == 'Initial':
if db_iter is not None:
raise ValueError('db_type Initial has no iterations, arg db_iter should be None')
else:
db_path = f'{self.db_folder}/{self.db_type}_parsed.db'
else:
if db_iter is None:
raise ValueError('db_types other than OE62 and Initial require db_iter to be an integer value.')
else:
db_path = f'{self.db_folder}/{self.db_type}{db_iter}_parsed.db'
# Check db file exists
if not os.path.exists(db_path):
raise RuntimeError(f'ASE database not found at {db_path}')
db = adb.connect(db_path)
iterator = db.select()
return db, iterator
def create_desc_gen(self) -> Descriptor:
'''Creates a descriptor generator object which can be called under a common name.
Descriptor generators are created by DScribe and can be used to generate
the structural descriptors (SOAP or MBTR) for large batches of molecules
in parallel.
Returns:
The descriptor generator object, which can be called with `desc.create()`
to generate descriptors for a set of molecules.
'''
if self.descriptor_type == 'SOAP':
desc = SOAP(
species = self.implemented_elements, # List of the elements which the descriptor can accept
periodic = False, # We are only studying isolated molecules
rcut = 3.5, # Cutoff for local region in Angstroms
nmax = 8, # Number of radial basis functions
lmax = 6, # Maximum degree of spherical harmonics
average='inner' # Averaging over all sites to create a global descriptor.
)
else:
desc = MBTR(
species = self.implemented_elements,
k2 = {
"geometry": {"function": "distance"},
"grid": {"min": 0.0, "max": 10.0, "sigma": 0.1, "n": 50},
"weighting": {"function": "exp", "scale": 0.75, "threshold": 1e-2}
},
k3 = {
"geometry": {"function": "angle"},
"grid": {"min": 0, "max": 180, "sigma": 5, "n": 50},
"weighting" : {"function": "exp", "scale": 0.5, "threshold": 1e-3}
},
periodic = False,
normalization = 'l2_each',
flatten=True
)
return desc
def construct_descriptors(self, db_iterator) -> ArrayLike:
'''Construct a SOAP/MBTR descriptor for each molecule in a database selection.
Iterates through a generator to extract an ASE Atoms object from each
database row, then generates a single flattened descriptor vector from
each Atoms object.
Descriptor vector creation can be parallelised by passing a value to
self.n_descriptor_jobs.
When creating SOAP descriptor vectors, this function additionally iterates
through every Atoms object and moves its centre of mass to (0, 0, 0). This
allows the SOAP descriptor to use a common reference point between molecules.
Args:
db_iterator: `Generator` object returned by an ASE database's `.select()`
method.
Returns:
NumPy array of descriptor vectors of size `n_molecules x n_features`.
'''
# Create base descriptor generator.
desc = self.create_desc_gen()
print(f'Created {self.descriptor_type} descriptor generator.')
# Create initial list of Atoms objects (may take up a significant chunk of memory).
atoms_list = [row.toatoms() for row in db_iterator]
n_molecules = len(atoms_list)
print(f'Loaded in {n_molecules} Atoms objects.')
if self.descriptor_type == 'SOAP':
print('Starting generation of SOAP descriptors...')
all_descriptors = desc.create(atoms_list, n_jobs=self.n_descriptor_jobs)
print('Done.')
# Explicitly try to deallocate memory in use by this large array.
del atoms_list
# return all_descriptors[:, 0, :]
return all_descriptors
else:
print('Starting generation of MBTR descriptors...')
all_descriptors = desc.create(atoms_list, n_jobs=self.n_descriptor_jobs)
# Explicitly try to deallocate memory in use by this large array.
del atoms_list
return all_descriptors
def fit_PCAs(self, return_fits=None) -> Tuple[ArrayLike, ArrayLike, ArrayLike, ArrayLike]:
'''Construct and fit PCAs for the structural and bonding descriptors.
Loops through all parsed databases of `self.db_type` in `self.db_folder`,
constructing descriptor vectors for each molecule in each database. These
descriptor vectors are composed of a structural descriptor vector (SOAP or
MBTR) and a bonding descriptor vector (bond order data from parsed database).
Due to the size of the structural descriptor vectors, these get fed into an
incremental PCA (iPCA) model as training data, database by database, in order
to maintain memory efficiency. The bonding descriptor vectors are fed into a
regular PCA as a single batch of training data. These two PCAs are used to
reduce the dimensionality of the two vectors to a `self.n_PCA_components` length
vector for each molecule.
Args:
return_fits: If a str is passed, save the fitted PCA objects to a .npz archive
under this filename.
Returns:
An [n_molecules x 2 x n_PCA_components] ndarray of the final principal
components. The first row along axis 1 represents the principal structural
components, while the second row represents the principal bonding components.
Returns another ndarray of size [n_molecules x 3] with columns representing
the HOMO, LUMO and HOMO-LUMO gap energies of each molecule. Also returns a
third ndarray of size [2 x n_PCA_components] containing the variance ratios
for each principal component. The first column represents variance ratios of
the principal structural components, while the second column represents variance
ratios of the principal bonding components. Finally, returns a [n_molecules]
ndarray of the database iteration that each corresponding molecule came from.
'''
print('----------------------------------------------')
print('Initialising...')
db_iters = 0
for f in os.listdir(self.db_folder):
if f.startswith(self.db_type) and f.endswith('_parsed.db'): db_iters += 1
print(f'Number of {self.db_type} iterations: {db_iters}')
print('Initialisation complete. Starting iterations over databases.')
print('----------------------------------------------')
print()
# Set up global data arrays
all_struct_descs = None
all_bond_descs = None
all_energies = None
iteration_map = None
# Set up iPCA for structural descriptors
structure_PCA = IncrementalPCA(n_components=self.n_PCA_components)
for i in range(1, db_iters+1):
print('----------------------------------------------')
print(f'Database: {self.db_type}{i}')
db, iterator = self.load_db(i)
n_molecules = len(db)
if iteration_map is None:
iteration_map = np.ones(n_molecules, dtype=int)
else:
iteration_map = np.concatenate((iteration_map, np.full(n_molecules, i, dtype=int)))
print('Database loaded, constructing structural descriptors.')
print()
iter_struct_descs = self.construct_descriptors(iterator)
print('Constructing bonding descriptors and collecting energies.')
iter_bond_descs = np.zeros((n_molecules, self.n_bonding_features))
iter_energies = np.zeros((n_molecules, 3))
iterator = db.select()
row_counter = -1
for row in iterator:
row_counter += 1
row_features = row.key_value_pairs.keys()
for j, feature in enumerate(self.bonding_properties):
if feature in row_features:
iter_bond_descs[row_counter, j] = row[feature]
for j, e_type in enumerate(self.energy_properties):
iter_energies[row_counter, j] = row[e_type]
# Concatenate to global arrays
print('Concatenating descriptors to global arrays.')
if all_struct_descs is None:
all_struct_descs = iter_struct_descs
else:
all_struct_descs = np.concatenate((all_struct_descs, iter_struct_descs))
if all_bond_descs is None:
all_bond_descs = iter_bond_descs
else:
all_bond_descs = np.concatenate((all_bond_descs, iter_bond_descs))
if all_energies is None:
all_energies = iter_energies
else:
all_energies = np.concatenate((all_energies, iter_energies))
# Incrementally train PCA
print('Training iPCA on structural descriptors...')
structure_PCA.partial_fit(iter_struct_descs)
print('Done.')
# Try to force Python to free up memory
del iter_struct_descs
del iter_bond_descs
print('Iteration complete.')
print('----------------------------------------------')
print()
# Once all databases have been looped through, train bonding PCA on the overall array.
print('----------------------------------------------')
print('Database loops complete, training PCA on bonding descriptors...')
bonding_PCA = PCA(n_components=self.n_PCA_components)
bonding_PCA.fit(all_bond_descs)
print('Done.')
print('Applying dimensionality reduction to both sets of descriptors...')
n_molecules_tot = len(all_bond_descs)
pca_results = np.zeros((n_molecules_tot, 2, self.n_PCA_components))
ratio_results = np.zeros((2, self.n_PCA_components))
struct_pca_results = structure_PCA.transform(all_struct_descs)
print('Transformed structural descriptors.')
pca_results[:, 0, :] = struct_pca_results
ratio_results[0, :] = structure_PCA.explained_variance_ratio_
# Free up memory
del struct_pca_results
print('Placed structural principal components in final results array.')
bond_pca_results = bonding_PCA.transform(all_bond_descs)
print('Transformed bonding descriptors.')
pca_results[:, 1, :] = bond_pca_results
ratio_results[1, :] = bonding_PCA.explained_variance_ratio_
# Free up memory
del bond_pca_results
print('Placed bonding principal components in final results array.')
if return_fits is not None:
self.save_PCA_objects(return_fits, bonding_PCA, structure_PCA)
print(f'Saved PCA objects to {return_fits}.')
print('Finished.')
print('----------------------------------------------')
return pca_results, all_energies, ratio_results, iteration_map
def fit_OE62_PCA(self, return_fits=None, split_db: bool=False) -> Tuple[ArrayLike, ArrayLike, ArrayLike]:
'''Construct and fit PCAs for the structural and bonding descriptors.
Loops through molecules in parsed OE62 database, constructing descriptor vectors
for each molecule. These descriptor vectors are composed of a structural
descriptor vector (SOAP or MBTR) and a bonding descriptor vector (bond order
data from parsed database).
Both descriptor vectors are fed into a regular PCA as single batches of training
data. These two PCAs are used to reduce the dimensionality of the two vectors to
a `self.n_PCA_components` length vector for each molecule.
Args:
return_fits: If a str is passed, save the fitted PCA objects to a .npz archive
under this filename.
split_db: Whether to divide the database in two when constructing the
structural descriptors. Useful for very large databases.
Returns:
An [n_molecules x 2 x n_PCA_components] ndarray of the final principal
components. The first row along axis 1 represents the principal structural
components, while the second row represents the principal bonding components.
Returns another ndarray of size [n_molecules x 3] with columns representing
the HOMO, LUMO and HOMO-LUMO gap energies of each molecule. Also returns a
third ndarray of size [2 x n_PCA_components] containing the variance ratios
for each principal component. The first column represents variance ratios of
the principal structural components, while the second column represents variance
ratios of the principal bonding components.
'''
print('----------------------------------------------')
print('Initialising...')
db, iterator = self.load_db()
n_molecules = len(db)
print('Initialisation complete. Starting PCA.')
print('----------------------------------------------')
print()
print('Constructing structural descriptors.')
if not split_db:
struct_descs = self.construct_descriptors(iterator)
else:
print(' - split_db enabled, constructing descriptors for first half of database.')
iterator_1 = db.select(f'id<{n_molecules/2}')
struct_descs_1 = self.construct_descriptors(iterator_1)
print(' - Constructing descriptors for second half of database.')
iterator_2 = db.select(f'id>={n_molecules/2}')
struct_descs_2 = self.construct_descriptors(iterator_2)
print(' - Concatenating split descriptor set.')
struct_descs = np.concatenate((struct_descs_1, struct_descs_2))
# Manually free up memory
del struct_descs_1; del struct_descs_2
print('Constructing bonding descriptors and collecting energies.')
bond_descs = np.zeros((n_molecules, self.n_bonding_features))
energies = np.zeros((n_molecules, 3))
iterator = db.select()
row_counter = -1
for row in iterator:
row_counter += 1
row_features = row.key_value_pairs.keys()
for j, feature in enumerate(self.bonding_properties):
if feature in row_features:
bond_descs[row_counter, j] = row[feature]
for j, e_type in enumerate(self.energy_properties):
energies[row_counter, j] = row[e_type]
# Once database has been looped through, train PCAs on the overall arrays.
print('----------------------------------------------')
print('Descriptor generation complete, training PCA on bonding descriptors...')
bonding_PCA = PCA(n_components=self.n_PCA_components)
bonding_PCA.fit(bond_descs)
print('Done.')
print('Training PCA on structural descriptors...')
structure_PCA = PCA(n_components=self.n_PCA_components)
structure_PCA.fit(struct_descs)
print('Applying dimensionality reduction to both sets of descriptors...')
pca_results = np.zeros((n_molecules, 2, self.n_PCA_components))
ratio_results = np.zeros((2, self.n_PCA_components))
struct_pca_results = structure_PCA.transform(struct_descs)
print('Transformed structural descriptors.')
pca_results[:, 0, :] = struct_pca_results
ratio_results[0, :] = structure_PCA.explained_variance_ratio_
# Free up memory
del struct_pca_results
print('Placed structural principal components in final results array.')
bond_pca_results = bonding_PCA.transform(bond_descs)
print('Transformed bonding descriptors.')
pca_results[:, 1, :] = bond_pca_results
ratio_results[1, :] = bonding_PCA.explained_variance_ratio_
# Free up memory
del bond_pca_results
print('Placed bonding principal components in final results array.')
if return_fits is not None:
self.save_PCA_objects(return_fits, bonding_PCA, structure_PCA)
print(f'Saved PCA objects to {return_fits}.')
print('Finished.')
print('----------------------------------------------')
return pca_results, energies, ratio_results
def transform_db(self, bonding_PCA: PCA, structure_PCA: IncrementalPCA, split_db: bool=False):
'''Use a set of existing fitted PCA models to run PCA on an unfitted database.
Currently only supports transforming bonding and structural descriptors
from OE62 dataset or initially generated G-SchNet dataset.
Args:
bonding_PCA: Fitted PCA object to transform a database's bonding
descriptors with.
structure_PCA: Fitted PCA object to transform a database's structural
descriptors with.
split_db: Whether to divide the database in two when constructing the
structural descriptors. Useful for very large databases.
Returns:
An [n_molecules x 2 x n_PCA_components] ndarray of the final principal
components. The first row along axis 1 represents the principal structural
components, while the second row represents the principal bonding components.
Returns another ndarray of size [n_molecules x 3] with columns representing
the HOMO, LUMO and HOMO-LUMO gap energies of each molecule.
'''
# Set up global data arrays
struct_descs = None
bond_descs = None
energies = None
db, iterator = self.load_db()
n_molecules = len(db)
print('----------------------------------------------')
print('Database loaded, constructing structural descriptors.')
if not split_db:
struct_descs = self.construct_descriptors(iterator)
else:
print(' - split_db enabled, constructing descriptors for first half of database.')
iterator_1 = db.select(f'id<{n_molecules/2}')
struct_descs_1 = self.construct_descriptors(iterator_1)
print(' - Constructing descriptors for second half of database.')
iterator_2 = db.select(f'id>={n_molecules/2}')
struct_descs_2 = self.construct_descriptors(iterator_2)
print(' - Concatenating split descriptor set.')
struct_descs = np.concatenate((struct_descs_1, struct_descs_2))
# Manually free up memory
del struct_descs_1; del struct_descs_2
print()
print('Constructing bonding descriptors and collecting energies.')
bond_descs = np.zeros((n_molecules, self.n_bonding_features))
energies = np.zeros((n_molecules, 3))
iterator = db.select()
row_counter = -1
for row in iterator:
row_counter += 1
row_features = row.key_value_pairs.keys()
for j, feature in enumerate(self.bonding_properties):
if feature in row_features:
bond_descs[row_counter, j] = row[feature]
for j, e_type in enumerate(self.energy_properties):
energies[row_counter, j] = row[e_type]
print('Descriptor construction complete.')
print('Applying dimensionality reduction to both sets of descriptors...')
pca_results = np.zeros((n_molecules, 2, self.n_PCA_components))
struct_pca_results = structure_PCA.transform(struct_descs)
print('Transformed structural descriptors.')
pca_results[:, 0, :] = struct_pca_results
# Free up memory
del struct_pca_results
print('Placed structural principal components in final results array.')
bond_pca_results = bonding_PCA.transform(bond_descs)
print('Transformed bonding descriptors.')
pca_results[:, 1, :] = bond_pca_results
# Free up memory
del bond_pca_results
print('Placed bonding principal components in final results array.')
print('Finished.')
print('----------------------------------------------')
return pca_results, energies
def transform_db_set(self, bonding_PCA: PCA, structure_PCA: PCA) -> Tuple[ArrayLike, ArrayLike, ArrayLike]:
'''Use a set of existing fitted PCA models to run PCA on an unfitted set of databases.
Can be used to transform a set of parsed G-SchNet iteration databases
with a set of bonding/structural PCAs fitted on other data, e.g. on
the parsed OE26 database.
Args:
bonding_PCA: Fitted PCA object to transform a database's bonding
descriptors with.
structure_PCA: Fitted PCA object to transform a database's structural
descriptors with.
Returns:
An [n_molecules x 2 x n_PCA_components] ndarray of the final principal
components. The first row along axis 1 represents the principal structural
components, while the second row represents the principal bonding components.
Returns another ndarray of size [n_molecules x 3] with columns representing
the HOMO, LUMO and HOMO-LUMO gap energies of each molecule. Finally, returns
a [n_molecules] ndarray of the database iteration that each corresponding
molecule came from.
'''
print('----------------------------------------------')
print('Initialising...')
db_iters = 0
for f in os.listdir(self.db_folder):
if f.startswith(self.db_type) and f.endswith('_parsed.db'): db_iters += 1
print(f'Number of {self.db_type} iterations: {db_iters}')
print('Initialisation complete. Starting iterations over databases.')
print('----------------------------------------------')
print()
# Set up global data arrays
all_struct_descs = None
all_bond_descs = None
all_energies = None
iteration_map = None
for i in range(1, db_iters+1):
print('----------------------------------------------')
print(f'Database: {self.db_type}{i}')
db, iterator = self.load_db(i)
n_molecules = len(db)
if iteration_map is None:
iteration_map = np.ones(n_molecules, dtype=int)
else:
iteration_map = np.concatenate((iteration_map, np.full(n_molecules, i, dtype=int)))
print('Database loaded, constructing structural descriptors.')
print()
iter_struct_descs = self.construct_descriptors(iterator)
print('Constructing bonding descriptors and collecting energies.')
iter_bond_descs = np.zeros((n_molecules, self.n_bonding_features))
iter_energies = np.zeros((n_molecules, 3))
iterator = db.select()
row_counter = -1
for row in iterator:
row_counter += 1
row_features = row.key_value_pairs.keys()
for j, feature in enumerate(self.bonding_properties):
if feature in row_features:
iter_bond_descs[row_counter, j] = row[feature]
for j, e_type in enumerate(self.energy_properties):
iter_energies[row_counter, j] = row[e_type]
# Concatenate to global arrays
print('Concatenating descriptors to global arrays.')
if all_struct_descs is None:
all_struct_descs = iter_struct_descs
else:
all_struct_descs = np.concatenate((all_struct_descs, iter_struct_descs))
if all_bond_descs is None:
all_bond_descs = iter_bond_descs
else:
all_bond_descs = np.concatenate((all_bond_descs, iter_bond_descs))
if all_energies is None:
all_energies = iter_energies
else:
all_energies = np.concatenate((all_energies, iter_energies))
# Try to force Python to free up memory
del iter_struct_descs
del iter_bond_descs
print('Iteration complete.')
print('----------------------------------------------')
print()
print('Descriptor construction complete.')
print('Applying dimensionality reduction to both sets of descriptors...')
n_molecules_tot = len(all_bond_descs)
pca_results = np.zeros((n_molecules_tot, 2, self.n_PCA_components))
struct_pca_results = structure_PCA.transform(all_struct_descs)
print('Transformed structural descriptors.')
pca_results[:, 0, :] = struct_pca_results
# Free up memory
del struct_pca_results
print('Placed structural principal components in final results array.')
bond_pca_results = bonding_PCA.transform(all_bond_descs)
print('Transformed bonding descriptors.')
pca_results[:, 1, :] = bond_pca_results
# Free up memory
del bond_pca_results
print('Placed bonding principal components in final results array.')
print('Finished.')
print('----------------------------------------------')
return pca_results, all_energies, iteration_map
def save_PCA_results(self, filename: str, pca_results: ArrayLike, energies: ArrayLike,
pca_variance_ratios: Optional[ArrayLike]=None, iter_map: Optional[ArrayLike]=None):
'''Save final results as an uncompressed .npz archive.
Args:
filename: Filename to save archive to.
pca_results: First return from fit_PCAs() or transform_db().
energies: Second return from fit_PCAs() or transform_db().
pca_variance_ratios: Third return from fit_PCAs().
iter_map: Fourth return from fit_PCAs()
'''
np.savez(filename, pca_results=pca_results, energies=energies, pca_variance_ratios=pca_variance_ratios, iter_map=iter_map)
def save_PCA_objects(self, filename: str, b_pca: PCA, s_pca: PCA):
'''Save final trained PCA objects as an uncompressed .npz archive.
These can then be used to retrieve principal components of other
databases, using the PCA fitting done here.
Args:
filename: Filename to save archive to.
b_pca: Trained bonding PCA object from fit_PCAs().
s_pca: Trained structural PCA object from fit_PCAs().
'''
np.savez(filename, b_pca=b_pca, s_pca=s_pca)
| Python |
3D | rhyan10/G-SchNetOE62 | qm9_data.py | .py | 10,151 | 252 | import logging
import os
import re
import shutil
import tarfile
import tempfile
from urllib import request as request
from urllib.error import HTTPError, URLError
from base64 import b64encode, b64decode
import numpy as np
import torch
from ase.db import connect
from ase.io.extxyz import read_xyz
from ase.units import Debye, Bohr, Hartree, eV
from schnetpack import Properties
from schnetpack.datasets import DownloadableAtomsData
from utility_classes import ConnectivityCompressor
from qm9_preprocess_dataset import preprocess_dataset
class QM9gen(DownloadableAtomsData):
""" QM9 benchmark dataset for organic molecules with up to nine non-hydrogen atoms
from {C, O, N, F}.
This class adds convenience functions to download QM9 from figshare,
pre-process the data such that it can be used for moleculec generation with the
G-SchNet model, and load the data into pytorch.
Args:
path (str): path to directory containing qm9 database
subset (list, optional): indices of subset, set to None for entire dataset
(default: None).
download (bool, optional): enable downloading if qm9 database does not
exists (default: True)
precompute_distances (bool, optional): if True and the pre-processed
database does not yet exist, the pairwise distances of atoms in the
dataset's molecules will be computed during pre-processing and stored in
the database (increases storage demand of the dataset but decreases
computational cost during training as otherwise the distances will be
computed once in every epoch, default: True)
remove_invalid (bool, optional): if True QM9 molecules that do not pass the
valence check will be removed from the training data (note 1: the
validity is per default inferred from a pre-computed list in our
repository but will be assessed locally if the download fails,
note2: only works if the pre-processed database does not yet exist,
default: True)
References:
.. [#qm9_1] https://ndownloader.figshare.com/files/3195404
"""
# general settings for the dataset
available_atom_types = [1, 3, 5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 52, 53] # all atom types found in the dataset
atom_types_valence = [1, 1, 3, 4, 3, 2, 1, 4, 5, 6, 1, 5, 6, 1, 6, 1] # valence constraints of the atom types
radial_limits = [0.85, 2] # minimum and maximum distance between neighboring atoms
# properties
A = 'rotational_constant_A'
B = 'rotational_constant_B'
C = 'rotational_constant_C'
mu = 'dipole_moment'
alpha = 'isotropic_polarizability'
homo = 'homo'
lumo = 'lumo'
gap = 'gap'
r2 = 'electronic_spatial_extent'
zpve = 'zpve'
U0 = 'energy_U0'
U = 'energy_U'
H = 'enthalpy_H'
G = 'free_energy'
Cv = 'heat_capacity'
properties = [
A, B, C, mu, alpha,
homo, lumo, gap, r2, zpve,
U0, U, H, G, Cv
]
units = [1., 1., 1., Debye, Bohr ** 3,
Hartree, Hartree, Hartree,
Bohr ** 2, Hartree,
Hartree, Hartree, Hartree,
Hartree, 1.,
]
units_dict = dict(zip(properties, units))
connectivity_compressor = ConnectivityCompressor()
def __init__(self, path, subset=None, download=True, precompute_distances=True,
remove_invalid=True):
self.path = path
self.dbpath = os.path.join(self.path, f'qm9gen.db')
self.precompute_distances = precompute_distances
self.remove_invalid = remove_invalid
super().__init__(self.dbpath, subset=subset,
available_properties=self.properties,
units=self.units, download=download)
def create_subset(self, idx):
"""
Returns a new dataset that only consists of provided indices.
Args:
idx (numpy.ndarray): subset indices
Returns:
schnetpack.data.AtomsData: dataset with subset of original data
"""
idx = np.array(idx)
subidx = idx if self.subset is None or len(idx) == 0 \
else np.array(self.subset)[idx]
return type(self)(self.path, subidx, download=False)
def get_properties(self, idx):
_idx = self._subset_index(idx)
with connect(self.dbpath) as conn:
row = conn.get(_idx + 1)
at = row.toatoms()
# extract/calculate structure
properties = {}
properties[Properties.Z] = torch.LongTensor(at.numbers.astype(np.int))
positions = at.positions.astype(np.float32)
positions -= at.get_center_of_mass() # center positions
properties[Properties.R] = torch.FloatTensor(positions)
properties[Properties.cell] = torch.FloatTensor(at.cell.astype(np.float32))
# recover connectivity matrix from compressed format
con_mat = self.connectivity_compressor.decompress(row.data['con_mat'])
# save in dictionary
properties['_con_mat'] = torch.FloatTensor(con_mat.astype(np.float32))
# extract pre-computed distances (if they exist)
if 'dists' in row.data:
properties['dists'] = row.data['dists']
# get atom environment
nbh_idx, offsets = self.environment_provider.get_environment(at)
# store neighbors, cell, and index
properties[Properties.neighbors] = torch.LongTensor(nbh_idx.astype(np.int))
properties[Properties.cell_offset] = torch.FloatTensor(
offsets.astype(np.float32))
properties["_idx"] = torch.LongTensor(np.array([idx], dtype=np.int))
return at, properties
def _download(self):
works = True
if not os.path.exists(self.dbpath):
qm9_path = os.path.join(self.path, f'qm9.db')
if not os.path.exists(qm9_path):
works = works and self._load_data()
works = works and self._preprocess_qm9()
return works
def _load_data(self):
logging.info('Downloading GDB-9 data...')
tmpdir = tempfile.mkdtemp('gdb9')
tar_path = os.path.join(tmpdir, 'gdb9.tar.gz')
raw_path = os.path.join(tmpdir, 'gdb9_xyz')
url = 'https://ndownloader.figshare.com/files/3195389'
try:
request.urlretrieve(url, tar_path)
logging.info('Done.')
except HTTPError as e:
logging.error('HTTP Error:', e.code, url)
return False
except URLError as e:
logging.error('URL Error:', e.reason, url)
return False
logging.info('Extracting data from tar file...')
tar = tarfile.open(tar_path)
tar.extractall(raw_path)
tar.close()
logging.info('Done.')
logging.info('Parsing xyz files...')
with connect(os.path.join(self.path, 'qm9.db')) as con:
ordered_files = sorted(os.listdir(raw_path),
key=lambda x: (int(re.sub('\D', '', x)), x))
for i, xyzfile in enumerate(ordered_files):
xyzfile = os.path.join(raw_path, xyzfile)
if (i + 1) % 10000 == 0:
logging.info('Parsed: {:6d} / 133885'.format(i + 1))
properties = {}
tmp = os.path.join(tmpdir, 'tmp.xyz')
with open(xyzfile, 'r') as f:
lines = f.readlines()
l = lines[1].split()[2:]
for pn, p in zip(self.properties, l):
properties[pn] = float(p) * self.units[pn]
with open(tmp, "wt") as fout:
for line in lines:
fout.write(line.replace('*^', 'e'))
with open(tmp, 'r') as f:
ats = list(read_xyz(f, 0))[0]
con.write(ats, data=properties)
logging.info('Done.')
shutil.rmtree(tmpdir)
return True
def _preprocess_qm9(self):
# try to download pre-computed list of invalid molecules
logging.info('Downloading pre-computed list of invalid QM9 molecules...')
raw_path = os.path.join(self.path, 'qm9_invalid.txt')
url = 'https://github.com/atomistic-machine-learning/G-SchNet/blob/master/' \
'qm9_invalid.txt?raw=true'
try:
request.urlretrieve(url, raw_path)
logging.info('Done.')
invalid_list = np.loadtxt(raw_path)
except HTTPError as e:
logging.error('HTTP Error:', e.code, url)
logging.info('CAUTION: Could not download pre-computed list, will assess '
'validity during pre-processing.')
invalid_list = None
except URLError as e:
logging.error('URL Error:', e.reason, url)
logging.info('CAUTION: Could not download pre-computed list, will assess '
'validity during pre-processing.')
invalid_list = None
# check validity of molecules and store connectivity matrices and inter-atomic
# distances in database as a pre-processing step
qm9_db = os.path.join(self.path, f'qm9.db')
valence_list = \
np.array([self.available_atom_types, self.atom_types_valence]).flatten('F')
preprocess_dataset(datapath=qm9_db, valence_list=valence_list,
n_threads=8, n_mols_per_thread=125, logging_print=True,
new_db_path=self.dbpath,
precompute_distances=self.precompute_distances,
remove_invalid=self.remove_invalid,
invalid_list=invalid_list)
return True
def get_available_properties(self, available_properties):
# we don't use properties other than stored connectivity matrices (and
# distances, if they were precomputed) so we skip this part
return available_properties
| Python |
3D | rhyan10/G-SchNetOE62 | loopLUMO.py | .py | 6,231 | 115 | import numpy as np
import statistics
import ase.io
import ase
import ase.io.xyz
import argparse
import subprocess
import ase.io
import pickle
import sys
import shutil
import time
sys.path.append('../G-SchNetOE62')
import utility_functions
from utility_functions import print_atom_bond_ring_stats
from ase import neighborlist
from ase.build import molecule
import numpy as np
import os
from scipy import sparse
from analysis import MoleculeAnalysis
from analysis import SchNetHAnalysis
import matplotlib.pyplot as plt
size=22
params={'legend.fontsize': 'large',
'figure.figsize': (7,5),
'axes.labelsize':size,
'axes.titlesize':size,
'ytick.labelsize':size,
'xtick.labelsize':size,
'axes.titlepad':10
}
plt.rcParams.update(params)
#fig=plt.figure()
SCHNARC="/home/chem/mssdjc/software/SchNarc/src/scripts/"
if __name__ == "__main__":
bond_length_pairs = [['C','C'],['C','O'],['C','H']]
bond_angle_trios = [['C','C','C'],['C','O','C'],['C','C','O']]
number_of_loops = 20
generated_database_size = 200000
datapath = "./data/"
database_size = 11152
for i in range(12,number_of_loops):
if i != 12:
train = subprocess.Popen(['python ../G-SchNetOE62/gschnet_script.py train gschnet '+datapath+' ./models/iteration'+str(i)+'/ --pretrained_path ./models/iteration'+str(i-1)+'/ --dataset_name template_data --split '+str(round(database_size*0.8))+' '+str(round(database_size*0.1))+' --cuda --draw_random_samples 10 --batch_size 1 --max_epochs 5000'],shell=True)
train.wait()
generate = subprocess.Popen(['python ../G-SchNetOE62/gschnet_script.py generate gschnet ./models/iteration'+str(i)+'/ '+str(generated_database_size)+' --cuda'],shell=True)
generate.wait()
filter_ = subprocess.Popen(['python ../G-SchNetOE62/template_filter_generated.py ./models/iteration'+str(i)+'/generated/generated.mol_dict'],shell=True)
filter_.wait()
path = os.path.join("./models/iteration"+str(i)+"/", "analysis")
os.system("mkdir %s"%path)
geoms = ase.io.read('./models/iteration'+str(i)+'/generated/generated_molecules.db',':')
number_of_molecules = MoleculeAnalysis.get_molecule_sizes(geoms)
ring_data = MoleculeAnalysis.get_rings(geoms)
#os.system("mkdir ./models/iteration"+str(i)+"/analysis")
with open('./models/iteration'+str(i)+'/analysis/number_of_molecules.pkl', 'wb') as f:
pickle.dump(number_of_molecules, f)
with open('./models/iteration'+str(i)+'/analysis/rings.pkl', 'wb') as f:
pickle.dump(ring_data, f)
for bond in bond_length_pairs:
bond_lengths = MoleculeAnalysis.get_bond_distances(geoms,bond)
with open('./models/iteration'+str(i)+'/analysis/'+(bond[0]+bond[1])+'.pkl', 'wb') as f:
pickle.dump(bond_lengths, f)
for bond_angle in bond_angle_trios:
bond_lengths = MoleculeAnalysis.get_angles(geoms,bond_angle)
with open('./models/iteration'+str(i)+'/analysis/'+(bond_angle[0]+bond_angle[1]+bond_angle[2])+'.pkl', 'wb') as f:
pickle.dump(bond_lengths, f)
orbital_energies_prediction1 = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/PBE0_1 --parallel --batch_size 1 --cuda'],shell=True)
orbital_energies_prediction1.wait()
orbital_energies_prediction2 = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/PBE0_2 --parallel --batch_size 1 --cuda'],shell=True)
orbital_energies_prediction2.wait()
quasi_energies_prediction = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/Delta --parallel --batch_size 1 --cuda'],shell=True)
quasi_energies_prediction.wait()
path = os.path.join("./models/iteration"+str(i)+"/", "energy_predictions")
os.system("mkdir %s"%path)
shutil.move("./Models/PBE0_1/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/PBE01_predictions.npz")
shutil.move("./Models/PBE0_2/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/PBE02_predictions.npz")
shutil.move("./Models/Delta/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/Delta_predictions.npz")
dbname1="./models/iteration"+str(i)+"/energy_predictions/PBE01_predictions.npz"
dbname2="./models/iteration"+str(i)+"/energy_predictions/PBE02_predictions.npz"
dbname3="./models/iteration"+str(i)+"/energy_predictions/Delta_predictions.npz"
pbe0 = np.load(dbname1,allow_pickle=True)["eigenvalues_pbe0"]
pbe0_2 =np.load(dbname2,allow_pickle=True)["eigenvalues_pbe0"]
delta = np.load(dbname3,allow_pickle=True)["delta_eigenvalues_pbe0_gbw"]
geoms = ase.io.read('./models/iteration'+str(i)+'/generated/generated_molecules.db',':')
sorted_gw,geoms = SchNetHAnalysis.energy_analysis(pbe0,pbe0_2,delta,geoms)
HOMO = sorted_gw[:,50].reshape(-1)
LUMO = sorted_gw[:,51].reshape(-1)
with open('./models/iteration'+str(i)+'/energy_predictions/HOMO_energies.pkl', 'wb') as f:
pickle.dump(HOMO, f)
with open('./models/iteration'+str(i)+'/energy_predictions/LUMO_energies.pkl', 'wb') as f:
pickle.dump(LUMO, f)
####### Edit below for HOMO or HLGAP #######
std = np.std(LUMO)
mean = np.mean(LUMO)
new_db = []
for j, energy in enumerate(LUMO):
if energy < mean - std:
new_db.append(geoms[j])
os.system("mv ./data/train.db ./data/train%i.db"%(i-1))
ase.io.write("./data/train.db", new_db)
os.remove("./data/train_gschnet.db")
database_size = len(new_db)
print(database_size)
############################################
| Python |
3D | rhyan10/G-SchNetOE62 | template_filter_generated_working.py | .py | 16,663 | 347 |
import numpy as np
import pickle
import os
import argparse
import time
from scipy.spatial.distance import pdist
from schnetpack import Properties
from utility_classes import Molecule, ConnectivityCompressor
from utility_functions import update_dict
from ase import Atoms
from ase.db import connect
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
main_parser.add_argument('data_path',
help='Path to generated molecules in .mol_dict format, '
'a database called "generated_molecules.db" with the '
'filtered molecules along with computed statistics '
'("generated_molecules_statistics.npz") will be '
'stored in the same directory as the input file/s '
'(if the path points to a directory, all .mol_dict '
'files in the directory will be merged and filtered '
'in one pass)')
main_parser.add_argument('--valence',
default=[1,1,3,1, 5,3, 6,4, 7,3, 8,2, 9,1, 14,4, 15,5, 16,6, 17,1, 33,5, 34,6, 35,1, 52,6, 53,1], type=int,
nargs='+',
help='the valence of atom types in the form '
'[type1 valence type2 valence ...] '
'(default: %(default)s)')
main_parser.add_argument('--filters', type=str, nargs='*',
default=['valence', 'disconnected', 'unique'],
choices=['valence', 'disconnected', 'unique'],
help='Select the filters applied to identify '
'invalid molecules (default: %(default)s)')
main_parser.add_argument('--store', type=str, default='valid',
choices=['all', 'valid'],
help='How much information shall be stored '
'after filtering: \n"all" keeps all '
'generated molecules and statistics, '
'\n"valid" keeps only valid molecules'
'(default: %(default)s)')
main_parser.add_argument('--print_file',
help='Use to limit the printing if results are '
'written to a file instead of the console ('
'e.g. if running on a cluster)',
action='store_true')
return main_parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
print_file = args.print_file
printed_todos = False
# read input file or fuse dictionaries if data_path is a folder
if not os.path.isdir(args.data_path):
if not os.path.isfile(args.data_path):
print(f'\n\nThe specified data path ({args.data_path}) is neither a file '
f'nor a directory! Please specify a different data path.')
raise FileNotFoundError
else:
with open(args.data_path, 'rb') as f:
res = pickle.load(f) # read input file
target_db = os.path.join(os.path.dirname(args.data_path),
'generated_molecules.db')
else:
print(f'\n\nFusing .mol_dict files in folder {args.data_path}...')
mol_files = [f for f in os.listdir(args.data_path)
if f.endswith(".mol_dict")]
if len(mol_files) == 0:
print(f'Could not find any .mol_dict files at {args.data_path}! Please '
f'specify a different data path!')
raise FileNotFoundError
res = {}
for file in mol_files:
with open(os.path.join(args.data_path, file), 'rb') as f:
cur_res = pickle.load(f)
update_dict(res, cur_res)
res = dict(sorted(res.items())) # sort dictionary keys
print(f'...done!')
target_db = os.path.join(args.data_path, 'generated_molecules.db')
# compute array with valence of provided atom types
max_type = max(args.valence[::2])
valence = np.zeros(max_type+1, dtype=int)
valence[args.valence[::2]] = args.valence[1::2]
# print the chosen settings
valence_str = ''
for i in range(max_type+1):
if valence[i] > 0:
valence_str += f'type {i}: {valence[i]}, '
filters = []
if 'valence' in args.filters:
filters += ['valency']
if 'disconnected' in args.filters:
filters += ['connectedness']
if 'unique' in args.filters:
filters += ['uniqueness']
if len(filters) >= 3:
edit = ', '
else:
edit = ' '
for i in range(len(filters) - 1):
filters[i] = filters[i] + edit
if len(filters) >= 2:
filters = filters[:-1] + ['and '] + filters[-1:]
string = ''.join(filters)
print(f'\n\n1. Filtering molecules according to {string}...')
print(f'\nTarget valence:\n{valence_str[:-2]}\n')
# initial setup of array for statistics and some counters
n_generated = 0
n_valid = 0
n_non_unique = 0
stat_heads = ['n_atoms', 'id', 'valid', 'duplicating', 'n_duplicates',
'known', 'equals', 'C', 'N', 'O', 'F', 'H','B','Li','Si','P','S','Cl','As','Se','Br','Te','I']#, 'H1C', 'H1N',
#'H1O', 'C1C', 'C2C', 'C3C', 'C1N', 'C2N', 'C3N', 'C1O',
#'C2O', 'C1F', 'N1N', 'N2N', 'N1O', 'N2O', 'N1F', 'O1O',
#'O1F', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R>8']
stats = np.empty((len(stat_heads), 0))
all_mols = []
connectivity_compressor = ConnectivityCompressor()
# iterate over generated molecules by length (all generated molecules with n
# atoms are stored in one batch, so we loop over all available lengths n)
# this is useful e.g. for finding duplicates, since we only need to compare
# molecules of the same length (and can actually further narrow down the
# candidates by looking at the exact atom type composition of each molecule)
start_time = time.time()
for n_atoms in res:
if not isinstance(n_atoms, int) or n_atoms == 0:
continue
print(n_atoms)
print("####")
prog_str = lambda x: f'Checking {x} for molecules of length {n_atoms}'
work_str = 'valence' if 'valence' in args.filters else 'dictionary'
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str(work_str) + ' (0.00%)', end='\r', flush=True)
else:
print(prog_str(work_str), flush=True)
d = res[n_atoms] # dictionary containing molecules of length n_atoms
all_pos = d[Properties.R] # n_mols x n_atoms x 3 matrix with atom positions
all_numbers = d[Properties.Z] # n_mols x n_atoms matrix with atom types
n_mols = len(all_pos)
valid = np.ones(n_mols, dtype=int) # all molecules are valid in the beginning
# check valency of molecules with length n
if 'valence' in args.filters:
if not printed_todos:
print('Please implement a procedure to check the valence in generated '
'molecules! Skipping valence check...')
# TODO
# Implement a procedure to assess the valence of generated molecules here!
# You can adapt and use the Molecule class in utility_classes.py,
# but the current code is tailored towards the QM9 dataset. In fact,
# the OpenBabel algorithm to kekulize bond orders is not very reliable
# and we implemented some heuristics in the Molecule class to fix these
# flaws for structures made of C, N, O, and F atoms. However, when using
# more complex structures with a more diverse set of atom types, we think
# that the reliability of bond assignment in OpenBabel might further
# degrade and therefore do no recommend to use valence checks for
# analysis unless it is very important for your use case.
# detect molecules with disconnected parts if desired
if 'disconnected' in args.filters:
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str("connectedness")+'...', end='\r', flush=True)
if not printed_todos:
print('Please implement a procedure to check the connectedness of '
'generated molecules! In this template script we will now remove '
'molecules where two atoms are closer than 0.3 angstrom as an '
'example processing step...')
# TODO
# Implement a procedure to assess the connectedness of generated
# molecules here! You can for example use a connectivity matrix obtained
# from kekulized bond orders (as we do in our QM9 experiments) or
# calculate the connectivity with a simple cutoff (e.g. all atoms less
# then 2.0 angstrom apart are connected, see get_connectivity function in
# template_preprocess_dataset script).
# We will remove all molecules where two atoms are closer than 0.3
# angstrom in the following as an example filtering step
# loop over all molecules of length n_atoms
for i in range(len(all_pos)):
positions = all_pos[i] # extract atom positions
dists = pdist(positions) # compute pair-wise distances
if np.any(dists) < 0.3: # check if any two atoms are closer than 0.3 A
valid[i] = 0 # mark current molecule as invalid
# identify identical molecules (e.g. using fingerprints)
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str('uniqueness')+'...', end='\r', flush=True)
if not printed_todos:
print('Please implement a procedure to check the uniqueness of '
'generated molecules! Skipping check for uniqueness...')
printed_todos = True
# TODO
# Implement procedure to identify duplicate structures here.
# This can (heuristically) be achieved in many ways but perfectly identifying
# all duplicate structures without false positives or false negatives is
# probably impossible (or computationally prohibitive).
# For our QM9 experiments, we compared fingerprints and canonical smiles
# strings of generated molecules using the Molecule class in utility_classes.py
# that provides functions to obtain these. It would also be possible to compare
# learned embeddings, e.g. from SchNet or G-SchNet, either as an average over
# all atoms, over all atoms of the same type, or combined with an algorithm
# to find the best match between atoms of two molecules considering the
# distances between embeddings. A similar procedure could be implemented
# using the root-mean-square deviation (RMSD) of atomic positions. Then it
# would be required to find the best match between atoms of two structures if
# they are rotated such that the RMSD given the match is minimal. Again,
# the best procedure really depends on the experimental setup, e.g. the
# goals of the experiment, used data and size of molecules in the dataset etc.
# duplicate_count contains the number of duplicates found for each structure
duplicate_count = np.zeros(n_mols, dtype=int)
# duplicating contains -1 for original structures and the id of the duplicated
# original structure for duplicates
duplicating = -np.ones(n_mols, dtype=int)
# remove duplicate structures from list of valid molecules if desired
if 'unique' in args.filters:
valid[duplicating != -1] = 0
# count number of non-unique structures
n_non_unique += np.sum(duplicate_count)
# store list of valid molecules in dictionary
d.update({'valid': valid})
# collect statistics of generated data
n_generated += len(valid)
n_valid += np.sum(valid)
# count number of atoms per type (here for C, N, O, F, and H as example)
n_of_types = [np.sum(all_numbers == i, axis=1) for i in [6, 7, 8, 9, 1,3,5,14,15,16, 17,33,34, 35, 52, 53]]
[print(i) for i in [6, 7, 8, 9, 1]]
stats_new = np.stack(
(np.ones(len(valid)) * n_atoms, # n_atoms
np.arange(0, len(valid)), # id
valid, # valid
duplicating, # id of duplicated molecule
duplicate_count, # number of duplicates
-np.ones(len(valid)), # known
-np.ones(len(valid)), # equals
*n_of_types, # n_atoms per type
),
axis=0)
stats = np.hstack((stats, stats_new))
if not print_file:
print('\033[K', end='\r', flush=True)
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
print(f'Needed {h:d}h{m:02d}m{s:02d}s.')
# Update and print results
res.update({'n_generated': n_generated,
'n_valid': n_valid,
'stats': stats,
'stat_heads': stat_heads})
print(f'Number of generated molecules: {n_generated}\n'
f'Number of duplicate molecules: {n_non_unique}')
if 'unique' in args.filters:
print(f'Number of unique and valid molecules: {n_valid}')
else:
print(f'Number of valid molecules (including duplicates): {n_valid}')
# Remove invalid molecules from results if desired
if args.store != 'all':
shrunk_res = {}
shrunk_stats = np.empty((len(stats), 0))
i = 0
for key in res:
if isinstance(key, str):
shrunk_res[key] = res[key]
continue
if key == 0:
continue
d = res[key]
start = i
end = i + len(d['valid'])
idcs = np.where(d['valid'])[0]
if len(idcs) < 1:
i = end
continue
# shrink stats
idx_id = stat_heads.index('id')
idx_known = stat_heads.index('known')
new_stats = stats[:, start:end]
new_stats = new_stats[:, idcs]
new_stats[idx_id] = np.arange(len(new_stats[idx_id])) # adjust ids
shrunk_stats = np.hstack((shrunk_stats, new_stats))
# shrink positions and atomic numbers
shrunk_res[key] = {Properties.R: d[Properties.R][idcs],
Properties.Z: d[Properties.Z][idcs]}
i = end
shrunk_res['stats'] = shrunk_stats
res = shrunk_res
# transfer results to ASE db
# get filename that is not yet taken for db
if os.path.isfile(target_db):
file_name, _ = os.path.splitext(target_db)
expand = 0
while True:
expand += 1
new_file_name = file_name + '_' + str(expand)
if os.path.isfile(new_file_name + '.db'):
continue
else:
target_db = new_file_name + '.db'
break
print(f'Transferring generated molecules to database at {target_db}...')
# open db
with connect(target_db) as conn:
# store metadata
conn.metadata = {'n_generated': int(n_generated),
'n_non_unique': int(n_non_unique),
'n_valid': int(n_valid),
'non_unique_removed_from_valid': 'unique' in args.filters}
# store molecules
for n_atoms in res:
if isinstance(n_atoms, str) or n_atoms == 0:
continue
d = res[n_atoms]
all_pos = d[Properties.R]
all_numbers = d[Properties.Z]
for pos, num in zip(all_pos, all_numbers):
at = Atoms(num, positions=pos)
conn.write(at)
# store gathered statistics in separate file
np.savez_compressed(os.path.splitext(target_db)[0] + f'_statistics.npz',
stats=res['stats'], stat_heads=res['stat_heads'])
| Python |
3D | rhyan10/G-SchNetOE62 | display_molecules.py | .py | 19,675 | 383 | import argparse
import sys
import os
import subprocess
import numpy as np
import tempfile
from ase.db import connect
from ase.io import write
from utility_classes import IndexProvider
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
main_parser.add_argument('--data_path', type=str, default=None,
help='Path to database with filtered, generated molecules '
'(.db format, needs to be provided if generated '
'molecules shall be displayed, default: %(default)s)')
main_parser.add_argument('--train_data_path', type=str,
help='Path to training data base (.db format, needs to be '
'provided if molecules from the training data set '
'shall be displayed, e.g. when using --train or '
'--test, default: %(default)s)',
default=None)
main_parser.add_argument('--select', type=str, nargs='*',
help='Selection strings that specify which molecules '
'shall be shown, if None all molecules from '
'data_path and/or train_data_path are shown, '
'providing multiple strings'
' will open multiple windows (one per string), '
'(default: %(default)s). The selection string has '
'the general format "Property,OperatorTarget" (e.g. '
'"C,>8"to filter for all molecules with more than '
'eight carbon atoms where "C" is the statistic '
'counting the number of carbon atoms in a molecule, '
'">" is the operator, and "8" is the target value). '
'Multiple conditions can be combined to form one '
'selection string using "&" (e.g "C,>8&R5,>0" to '
'get all molecules with more than 8 carbon atoms '
'and at least 1 ring of size 5). Prepending '
'"training" to the selection string will filter and '
'display molecules from the training data base '
'instead of generated molecules (e.g. "training C,>8"'
'). An overview of the available properties for '
'molecuels generated with G-SchNet trained on QM9 can'
' be found in the README.md.',
default=None)
main_parser.add_argument('--print_indices',
help='For each provided selection print out the indices '
'of molecules that match the respective selection '
'string',
action='store_true')
main_parser.add_argument('--export_to_dir', type=str,
help='Optionally, provide a path to an directory to which '
'indices of molecules matching the corresponding '
'query shall be written (one .npy-file (numpy) per '
'selection string, if None is provided, the '
'indices will not be exported, default: %(default)s)',
default=None)
main_parser.add_argument('--train',
help='Display all generated molecules that match '
'structures used during training and the '
'corresponding molecules from the training data set.',
action='store_true')
main_parser.add_argument('--test',
help='Display all generated molecules that match '
'held out test data structures and the '
'corresponding molecules from the training data set.',
action='store_true')
main_parser.add_argument('--novel',
help='Display all generated molecules that match neither '
'structures used during training nor those held out '
'as test data.',
action='store_true')
main_parser.add_argument('--block',
help='Make the call to ASE GUI blocking (such that the '
'script stops until the GUI window is closed).',
action='store_true')
return main_parser
def view_ase(mols, name, block=False):
'''
Display a list of molecules using the ASE GUI.
Args:
mols (list of ase.Atoms): molecules as ase.Atoms objects
name (str): the name that shall be displayed in the windows top bar
block (bool, optional): whether the call to ase gui shall block or not block
the script (default: False)
'''
dir = tempfile.mkdtemp('', 'generated_molecules_') # make temporary directory
filename = os.path.join(dir, name) # path of temporary file
format = 'traj' # use trajectory format for temporary file
command = sys.executable + ' -m ase gui -b' # command to execute ase gui viewer
write(filename, mols, format=format) # write molecules to temporary file
# show molecules in ase gui and remove temporary file and directory afterwards
if block:
subprocess.call(command.split() + [filename])
os.remove(filename)
os.rmdir(dir)
else:
subprocess.Popen(command.split() + [filename])
subprocess.Popen(['sleep 60; rm "{0}"'.format(filename)], shell=True)
subprocess.Popen(['sleep 65; rmdir "{0}"'.format(dir)], shell=True)
def print_indices(idcs, name='', per_line=10):
'''
Prints provided indices in a clean formatting.
Args:
idcs (list of int): indices that shall be printed
name (str): the selection string that was used to obtain the indices
per_line (int, optional): the number of indices that are printed per line (
default: 10)
'''
biggest = len(str(max(idcs)))
new_line = '\n'
format = f'>{biggest}d'
str_idcs = [f'{j:{format}} ' + (new_line if (i+1) % per_line == 0 else '')
for i, j in enumerate(idcs)]
print(f'\nAll {len(idcs)} indices for selection {name}:')
print(''.join(str_idcs))
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
# make sure that at least one path was provided
if args.data_path is None and args.train_data_path is None:
print(f'\nPlease specify --data_path to display generated molecules or '
f'--train_data_path to display training molecules (or both)!')
sys.exit(0)
# sort queries into those concerning generated structures and those concerning
# training data molecules
train_selections = []
gen_selections = []
if args.select is not None:
for selection in args.select:
if selection.startswith('training'):
# put queries concerning training structures aside for later
train_selections += [selection]
else:
gen_selections += [selection]
# make sure that the required paths were provided
if args.train or args.test:
if args.data_path is None:
print('\nYou need to specify --data_path (and optionally '
'--train_data_path) if using --train or --test!')
sys.exit(0)
if args.novel:
if args.data_path is None:
print('\nYou need to specify --data_path if you want to display novel '
'molecules!')
sys.exit(0)
if len(gen_selections) > 0:
if args.data_path is None:
print(f'\nYou need to specify --data_path to process the selections '
f'{gen_selections}!')
sys.exit(0)
if len(train_selections) > 0:
if args.train_data_path is None:
print(f'\nYou need to specify --train_data_path to process the selections '
f'{train_selections}!')
sys.exit(0)
# check if statistics files are needed
need_gen_stats = (len(gen_selections) > 0) or args.train or args.test or args.novel
need_train_stats = (len(train_selections) > 0) or args.train or args.test
# check if there is a database with generated molecules at the provided path
# and load accompanying statistics file
if args.data_path is not None:
if not os.path.isfile(args.data_path):
print(f'\nThe specified data path ({args.data_path}) is not a file! Please '
f'specify a different data path.')
raise FileNotFoundError
elif need_gen_stats:
stats_path = os.path.splitext(args.data_path)[0] + f'_statistics.npz'
if not os.path.isfile(stats_path):
print(f'\nCannot find statistics file belonging to {args.data_path} ('
f'expected it at {stats_path}. Please make sure that the file '
f'exists.')
raise FileNotFoundError
else:
stats_dict = np.load(stats_path)
index_provider = IndexProvider(stats_dict['stats'],
stats_dict['stat_heads'])
# check if there is a database with training molecules at the provided path
# and load accompanying statistics file
if args.train_data_path is not None:
if not os.path.isfile(args.train_data_path):
print(f'\nThe specified training data path ({args.train_data_path}) is '
f'not a file! Please specify --train_data_path correctly.')
raise FileNotFoundError
elif need_train_stats:
stats_path = os.path.splitext(args.train_data_path)[0] + f'_statistics.npz'
if not os.path.isfile(stats_path) and len(train_selections) > 0:
print(f'\nCannot find statistics file belonging to '
f'{args.train_data_path} (expected it at {stats_path}. Please '
f'make sure that the file exists.')
raise FileNotFoundError
else:
train_stats_dict = np.load(stats_path)
train_index_provider = IndexProvider(train_stats_dict['stats'],
train_stats_dict['stat_heads'])
# create folder(s) for export of indices if necessary
if args.export_to_dir is not None:
if not os.path.isdir(args.export_to_dir):
print(f'\nDirectory {args.export_to_dir} does not exist, creating '
f'it to store indices of molecules matching the queries!')
os.makedirs(args.export_to_dir)
else:
print(f'\nWill store indices of molecules matching the queries at '
f'{args.export_to_dir}!')
# display all generated molecules if desired
if (len(gen_selections) == 0) and not (args.train or args.test or args.novel) and\
args.data_path is not None:
with connect(args.data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in range(con.count())]
view_ase(_ats, 'all generated molecules', args.block)
# display generated molecules matching selection strings
if len(gen_selections) > 0:
for selection in gen_selections:
# display queries concerning generated molecules
idcs = index_provider.get_selected(selection)
if len(idcs) == 0:
print(f'\nNo molecules match selection {selection}!')
continue
with connect(args.data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in idcs]
if args.print_indices:
print_indices(idcs, selection)
view_ase(_ats, f'generated molecules ({selection})', args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, selection), idcs)
# display all training molecules if desired
if (len(train_selections) == 0) and not (args.train or args.test) and \
args.train_data_path is not None:
with connect(args.train_data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in range(con.count())]
view_ase(_ats, 'all molecules in the training data set', args.block)
# display training molecules matching selection strings
if len(train_selections) > 0:
# display training molecules that match the selection strings
for selection in train_selections:
_selection = selection.split()[1]
stats_queries = []
db_queries = []
# sort into queries handled by looking into the statistics or the db
for _sel_str in _selection.split('&'):
prop = _sel_str.split(',')[0]
if prop in train_stats_dict['stat_heads']:
stats_queries += [_sel_str]
elif len(prop.split('+')) > 0:
found = True
for p in prop.split('+'):
if p not in train_stats_dict['stat_heads']:
found = False
break
if found:
stats_queries += [_sel_str]
else:
db_queries += [_sel_str]
else:
db_queries += [_sel_str]
# process queries concerning the statistics
if len(stats_queries) > 0:
idcs = train_index_provider.get_selected('&'.join(stats_queries))
else:
idcs = range(connect(args.train_data_path).count())
# process queries concerning the db entries
if len(db_queries) > 0:
with connect(args.train_data_path) as con:
for query in db_queries:
head, condition = query.split(',')
if head not in con.get(1).data:
print(f'Entry {head} not found for molecules in the '
f'database, skipping query {query}.')
continue
else:
op = train_index_provider.rel_re.search(condition).group(0)
op = train_index_provider.op_dict[op] # extract operator
num = float(train_index_provider.num_re.search(
condition).group(0)) # extract numerical value
remaining_idcs = []
for idx in idcs:
if op(con.get(int(idx)+1).data[head], num):
remaining_idcs += [idx]
idcs = remaining_idcs
# extract molecules matching the query from db and display them
if len(idcs) == 0:
print(f'\nNo training molecules match selection {_selection}!')
continue
with connect(args.train_data_path) as con:
_ats = [con.get(int(idx)+1).toatoms() for idx in idcs]
if args.print_indices:
print_indices(idcs, selection)
view_ase(_ats, f'training data set molecules ({_selection})', args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, selection), idcs)
# display generated molecules that match structures used for training
if args.train:
idcs = index_provider.get_selected('known,>=1&known,<=2')
if len(idcs) == 0:
print(f'\nNo generated molecules found that match structures used '
f'during training!')
else:
with connect(args.data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in idcs]
if args.print_indices:
print_indices(idcs, 'generated train')
view_ase(_ats, f'generated molecules (matching train structures)',
args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, 'generated train'), idcs)
# display corresponding training structures
if args.train_data_path is not None:
_row_idx = list(stats_dict['stat_heads']).index('equals')
t_idcs = stats_dict['stats'][_row_idx, idcs].astype(int)
with connect(args.train_data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in t_idcs]
if args.print_indices:
print_indices(t_idcs, 'reference train')
view_ase(_ats, f'training molecules (train structures)', args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, 'reference train'), t_idcs)
# display generated molecules that match held out test structures
if args.test:
idcs = index_provider.get_selected('known,==3')
if len(idcs) == 0:
print(f'\nNo generated molecules found that match held out test '
f'structures!')
else:
with connect(args.data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in idcs]
if args.print_indices:
print_indices(idcs, 'generated test')
view_ase(_ats, f'generated molecules (matching test structures)',
args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, 'generated test'), idcs)
# display corresponding training structures
if args.train_data_path is not None:
_row_idx = list(stats_dict['stat_heads']).index('equals')
t_idcs = stats_dict['stats'][_row_idx, idcs].astype(int)
with connect(args.train_data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in t_idcs]
if args.print_indices:
print_indices(t_idcs, 'reference test')
view_ase(_ats, f'training molecules (test structures)', args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, 'reference test'), t_idcs)
# display generated molecules that are novel (i.e. that do not match held out
# test structures or structures used during training)
if args.novel:
idcs = index_provider.get_selected('known,==0')
if len(idcs) == 0:
print(f'\nNo novel molecules found!')
else:
with connect(args.data_path) as con:
_ats = [con.get(int(idx) + 1).toatoms() for idx in idcs]
if args.print_indices:
print_indices(idcs, 'novel')
view_ase(_ats, f'generated molecules (novel)', args.block)
if args.export_to_dir is not None:
np.save(os.path.join(args.export_to_dir, 'generated novel'), idcs)
| Python |
3D | rhyan10/G-SchNetOE62 | analysis.py | .py | 9,202 | 235 | import ase.io
import pickle
import sys
import time
sys.path.append('./GSchNetOE62')
from ase import neighborlist
from utility_classes import Molecule
import numpy as np
class MoleculeAnalysis():
@staticmethod
def get_neighbours(geoms,element):
dist=[]
nneighbours = []
ntype=[]
#g=[]
for mol in geoms:
# we add 1 A to the natural cutoffs defined in ase
found_geom = False
found_geom2= False
cutOff = np.array(neighborlist.natural_cutoffs(mol))
nl = neighborlist.NeighborList(cutOff, self_interaction=False, bothways=True)
nl.update(mol)
distances = mol.get_all_distances()
# iterate over all atoms in a molecule
atomtypes = mol.get_chemical_symbols()
n_neighbor = 0
for iatom_index, iatom_type in enumerate(atomtypes):
# ask if atom is O
nO=0
if iatom_type[0] == element:
nO+=1
n_neighbor=0
# get neighbors
neighborlist_mol = nl.get_neighbors(iatom_index)
n_neighbor+=len(neighborlist_mol[0])
for neighbor in neighborlist_mol[0]:
# avoid double counting
if neighbor < iatom_index:
pass
else:
# Do your routine here
# Ask if atom neighbor is a
dist.append(distances[iatom_index][neighbor])
ntype.append(atomtypes[neighbor])
nneighbours.append(n_neighbor)
#ase.io.write("outlier.db",g)
return ntype,nneighbours,dist
@staticmethod
def get_ntype(db):
natom = {}
available_atom_types = [1, 3, 5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 52, 53]
for iatom in available_atom_types:
natom[iatom]=[]
for mol in db:
atom = {}
for iatom in available_atom_types:
atom[iatom]=0
atypes=mol.get_atomic_numbers()
natoms = len(atypes)
for iatom in atypes:
atom[iatom]+=1
for iatom in available_atom_types:
natom[iatom].append(atom[iatom]/natoms)
return natom
@staticmethod
def get_molecule_sizes(geoms):
"""
Finds sizes of molecules for whole database
"""
molecule_sizes = []
for geom in geoms:
#print(geom)
#print(type(geom))
distances = geom.get_positions()
molecule_sizes.append(len(distances))
return molecule_sizes
@staticmethod
def get_bond_distances(geoms,elements):
"""
Finds bond lengths between two elements for all molecules in the database
:return: List of bond lengths for a particular atom type
"""
dist = []
selectmolec=[]
selectmolec2=[]
for mol in geoms:
# we add 1 A to the natural cutoffs defined in ase
found_geom = False
found_geom2= False
cutOff = np.array(neighborlist.natural_cutoffs(mol)) + 1
nl = neighborlist.NeighborList(cutOff, self_interaction=False, bothways=True)
nl.update(mol)
distances = mol.get_all_distances()
# iterate over all atoms in a molecule
atomtypes = mol.get_chemical_symbols()
for iatom_index, iatom_type in enumerate(atomtypes):
# ask if atom is C
if iatom_type[0] == elements[0]:
# get neighbors
neighborlist_mol = nl.get_neighbors(iatom_index)
for neighbor in neighborlist_mol[0]:
# avoid double counting
if neighbor < iatom_index:
pass
else:
# Do your routine here
# Ask if atom neighbor is a
if atomtypes[neighbor] == elements[1]:
dist.append(distances[iatom_index][neighbor])
"""if distances[iatom_index][neighbor] < 1.55 and distances[iatom_index][neighbor] > 1.5:
if found_geom == False:
found_geom = True
selectmolec.append(mol)
if distances[iatom_index][neighbor]<1.35:
if found_geom2 == False:
found_geom2 = True
selectmolec2.append(mol)"""
#import ase.io
#ase.io.write("CCmolecs.db",selectmolec)
#ase.io.write("COmolecs.db",selectmolec2)
return dist
@staticmethod
def get_angles(geoms,elements):
"""
Finds bond angle between three consecutive atoms for all molecules in the database
:return: List of angles
"""
angles = []
for mol in geoms:
# we add 0.5 A to the natural cutoffs defined in ase
cutOff = np.array(neighborlist.natural_cutoffs(mol))+1
nl = neighborlist.NeighborList(cutOff, self_interaction=False, bothways=True)
nl.update(mol)
# iterate over all atoms in a molecule
atomtypes = mol.get_chemical_symbols()
for iatom_index,iatom_type in enumerate(atomtypes):
# ask if atom is C
if iatom_type[0] == elements[0]:
#get neighbors
neighborlist_mol=nl.get_neighbors(iatom_index)
for neighbor in neighborlist_mol[0]:
#avoid double counting
if neighbor < iatom_index:
pass
else:
#Do your routine here
#Ask if atom neighbor is a
if atomtypes[neighbor] == elements[1]:
for neighbor2 in neighborlist_mol[0]:
if neighbor==neighbor2:
pass
else:
if atomtypes[neighbor2] == elements[2]:
angles.append(mol.get_angle(neighbor, iatom_index, neighbor2))
else:
pass
return angles
@staticmethod
def get_rings(geoms):
rings = {}
length_of_database = 0
for geom in geoms:
pos = geom.get_positions()
atypes = geom.get_atomic_numbers()
mol = Molecule(pos, atypes, store_positions=False)
ring_counts = mol.get_ring_counts()
#Only considers molecules of size more than 7
if len(pos) > 7:
length_of_database += 1
for i in ring_counts:
if str(i) in rings.keys():
rings[str(i)] = rings[str(i)] + 1
else:
rings[str(i)] = 1
for key in rings.keys():
rings[key] = rings[key] / length_of_database
return rings
class SchNetHAnalysis():
@staticmethod
def energy_analysis(pred,pred2,delta,geoms):
shift = 13.70494
gw = pred + delta - shift
# also sort geometries
geomsnew = []
# let's consider HOMO, HOMO-1, HOMO-2, and LUMO for sorting
# We used a mask for training to consider only values down to PBE0 eigenvalues -10
MAE = np.mean(np.abs((pred[:,48:51]-pred2[:,48:51])))
sorted_gw = []
for i in range(len(gw)):
if np.mean(np.abs(pred[i,48:51]-pred2[i,48:51]))>2*MAE or len(geoms[i])<=2:
pass
else:
sorted_gw.append(gw[i])
geomsnew.append(geoms[i])
sorted_gw = np.array(sorted_gw)
return sorted_gw,geomsnew
def energy_analysis_indices(pred,pred2,delta,geoms):
shift = 13.70494
gw = pred + delta - shift
# also sort geometries
geomsnew = []
# let's consider HOMO, HOMO-1, HOMO-2, and LUMO for sorting
# We used a mask for training to consider only values down to PBE0 eigenvalues -10
MAE = np.mean(np.abs((pred[:,48:51]-pred2[:,48:51])))
sorted_gw = []
indices=[]
for i in range(len(gw)):
if np.mean(np.abs(pred[i,48:51]-pred2[i,48:51]))>2*MAE or len(geoms[i])<=2:
pass
else:
sorted_gw.append(gw[i])
geomsnew.append(geoms[i])
indices.append(i)
sorted_gw = np.array(sorted_gw)
return sorted_gw,geomsnew,indices
| Python |
3D | rhyan10/G-SchNetOE62 | dbanalysis.py | .py | 23,022 | 545 | import ase.io
import ase.db
import sys
import os
import re
import numpy as np
import argparse
sys.path.append('./GSchNetOE62')
from utility_classes import Molecule
import math, sys, random, os
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
import json
import six
import sys
project_root = "/storage/chem/mssdjc/share/DesignJoe/screenedDBs/scscore/"
sys.path.append(project_root+"/scscore")
from standalone_model_numpy import SCScorer
#import SCScorer
class dbAutoAnalysis:
'''
Automatically runs the molecular analysis routines defined in MoleculeAnalysis()
on a given database, parsing the molecules inside and returning a new database
which is tagged with the implemented analysis metrics. Currently planned metrics
include:
* Number of X atoms
* Number of X-Y bonds
* Total number of rings
* Number of 3-, 4-, 5-, 6-, 7- and 8-rings
* Number of double bonds(?)
* Number of triple bonds(?)
* SMILES string
Each database entry also gets saved with its corresponding bond lengths for every
X-Y bond pair, and its corresponding bond angles for every X-Y-Z bond triplet.
Args:
verbose (boolean, optional): Whether to output all parsing steps or stay quiet.
save_chunksize (int, optional): Chunk size to break the database up into for more efficient writing to disk.
'''
def __init__(self, verbose=True, save_chunksize=50):
self.verbose = verbose
self.save_chunksize = save_chunksize
self.implemented_elements = ['H', 'Li', 'B', 'C', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'As', 'Se', 'Te', 'Br', 'I']
self.implemented_bond_orders = [1, 2, 3, 4, 5]
self.implemented_bond_orders_string = ['single', 'double', 'triple', 'quadruple', 'quintuple']
self.implemented_ring_counts = [3, 4, 5, 6, 7, 8]
self.implemented_properties = [
'natoms',
'nbonds_and_nrings',
'aromaticity',
'SMILES',
'InChI',
'SCscore'
]
# Create dict of analysis functions, sorted by key.
self.analysis_functions = {
'natoms': self.get_natoms,
'nbonds_and_nrings': self.get_nbonds_and_nrings,
'aromaticity': self.get_aromaticity,
'SMILES': self.get_SMILES,
'InChI': self.get_InChI,
'SCscore': self.get_SCscore
}
# Create pdb metadata.
self.db_meta_columns = [
'id',
'formula',
'HOMO',
'LUMO',
'HL',
'natoms',
'nbonds_1',
'nbonds_2',
'nbonds_3',
'nrings_4',
'nrings_5',
'nrings_6',
'nrings_7',
'aromaticity',
'SCscore'
]
self.db_meta_descs = {
'HOMO': ('HOMO', 'Predicted HOMO energy', 'eV'),
'LUMO': ('LUMO', 'Predicted LUMO energy', 'eV'),
'HL': ('HL', 'HOMO-LUMO gap energy', 'eV'),
'SMILES': ('SMILES', 'Canonical SMILES string', ''),
'InChI': ('InChI', 'InChI string', ''),
'aromaticity': ('aromaticity', '% non-H aromaticity of atoms', ''),
'SCscore': ('SCscore', 'SCscore Synthesisability', '')
}
for index, i in enumerate(self.implemented_elements):
for j in self.implemented_elements[index:]:
for k in self.implemented_bond_orders:
self.db_meta_descs[f'nbonds_{i}{k}{j}'] = (f'nbonds_{i}{k}{j}', f'Number of {i}-{j} {self.implemented_bond_orders_string[k-1]} bonds', '')
for i in self.implemented_bond_orders:
self.db_meta_descs[f'nbonds_{i}'] = (f'nbonds_{i}', f'Overall number of {self.implemented_bond_orders_string[i-1]} bonds', '')
for i in self.implemented_elements:
self.db_meta_descs[f'natoms_{i}'] = (f'natoms_{i}', f'Number of {i} atoms', '')
if i not in ['H', 'Li', 'F', 'Cl', 'Br', 'I']:
self.db_meta_descs[f'n_aromatic_{i}'] = (f'n_aromatic_{i}', f'Number of aromatic {i} atoms', '')
for i in self.implemented_ring_counts:
self.db_meta_descs[f'nrings_{i}'] = (f'nrings_{i}', f'Number of {i}-rings', '')
self.db_meta_descs[f'nrings_{i}_aromatic'] = (f'nrings_{i}_aromatic', f'Number of aromatic {i}-rings', '')
if self.verbose:
print('Properties currently implemented in parser:')
for prop in self.implemented_properties:
print(f' {prop}')
def __call__(self, db_file, action):
'''
Load in database, choose analysis type based on action.
Args:
db_file (string): Path to ASE database file.
action (string, one of [parse, update]): Action to perform on database. 'parse'
takes an unparsed database, such as the raw output database from an iteration,
and adds all implemented properties as database keys. 'update' takes an
existing parsed database and updates its keys to include any newly implemented
properties.
'''
# Check args
if action not in ['parse', 'update']:
raise ValueError('Unknown value for argument "action". Must be one of ["parse", "update"].')
if not os.path.exists(db_file):
raise ValueError(f'No file found at {db_file}. Check your path is correct.')
if action == 'parse':
# Connect to unparsed database.
self.db = ase.db.connect(db_file)
if self.verbose:
print(f'Connected to existing ASE database at {db_file}.')
print(f'Database contains {len(self.db)} entries.')
# Error if trying to use a database which has already been parsed.
if 'has_been_parsed' in self.db.metadata.keys():
raise RuntimeError(f'Database in {db_file} has already been parsed before! If you are trying to update this database, use "action=\'update\'" instead.')
# Create new parsed database.
db_folder, db_name_ext = os.path.split(db_file)
db_name, db_ext = os.path.splitext(db_name_ext)
pdb_file = f'{db_folder}/{db_name}_parsed{db_ext}'
self.pdb = ase.db.connect(pdb_file)
_metadata = self.pdb.metadata
_metadata.update({
'title': f'dbAutoAnalysis Parsed Version of {db_name_ext}',
'has_been_parsed': 'True'
})
_metadata.update({
'key_descriptions': self.db_meta_descs,
'default_columns': self.db_meta_columns
})
self.pdb.metadata = _metadata
# Hacky fix to update metadata now.
self.pdb = ase.db.connect(pdb_file)
if self.verbose: print(f'Created empty parsed database at {pdb_file}.')
# Analyse database and add parsed rows to pdb.
self.parse_db(update_only=False, db_write_file=pdb_file)
print(f'Parsing complete, parsed database saved to {pdb_file}')
else:
# Connect to outdated parsed database.
self.pdb = ase.db.connect(db_file)
if self.verbose:
print(f'Connected to existing ASE database at {db_file}.')
print(f'Database contains {len(self.pdb)} entries.')
# Error if trying to use a database which hasn't been parsed before.
if not self.pdb.metadata['has_been_parsed']:
raise RuntimeError(f'Database in {db_file} has not been parsed yet! If you are trying to parse this database for the first time, use "action=\'parse\'" instead.')
# Analyse pdb only where necessary and update with new tags.
self.parse_db(update_only=True, db_write_file=db_file)
_metadata = self.pdb.metadata
_metadata.update({
'key_descriptions': self.db_meta_descs,
'default_columns': self.db_meta_columns
})
self.pdb.metadata = _metadata
self.pdb = ase.db.connect(db_file)
print(f'Re-parsing complete, updated database saved.')
def parse_db(self, update_only, db_write_file):
'''
Run through a database, generating data for all tags in each molecule if update_only==False,
or generating only the data for newly implemented tags if update_only==True.
'''
# If parsing a new database.
if not update_only:
if self.verbose: print('Starting to parse properties from database...')
row_iterator = self.db.select()
# Divide the database up into savepoints, so that it doesn't have to save after each row.
num_rows = self.db.count()
if self.save_chunksize > num_rows:
raise ValueError('save_chunksize is greater than the number of rows in the database! Lower save_chunksize and try again.')
num_rows_divisible = num_rows - (num_rows % self.save_chunksize)
num_rows_remainder = num_rows - num_rows_divisible
savepoints = [i for i in range(self.save_chunksize, num_rows_divisible + self.save_chunksize, self.save_chunksize)]
if self.save_chunksize != num_rows:
savepoints.append(num_rows)
savepoints = [sp - 1 for sp in savepoints]
pdb_cache = None
for i, row in enumerate(row_iterator):
if self.verbose:
print('\n------------------------------------------')
print(f'| Molecule {i+1} |')
print('------------------------------------------')
tag_dict = {}
atoms = row.toatoms(add_additional_information=True)
# Loop through all properties
for prop in self.implemented_properties:
# Call correct analysis function to get the desired group of properties.
tags = self.analysis_functions[prop](atoms)
if self.verbose:
prop_keys = tags.keys()
for key in prop_keys:
print(f'{key}: {tags[key]}')
# Insert these properties into the tag dictionary.
tag_dict.update(tags)
# If there is any data in the row's current data_dict (eg. energies), place it in the tag_dict.
row_data = row.data
if row_data is not None:
for key in row_data.keys():
if isinstance(row_data[key], (np.ndarray)):
tag_dict[key] = float(row_data[key])
else:
tag_dict[key] = row_data[key]
# If there is any data in the row's current key_value_pairs (eg. energies), place it in the tag_dict.
row_kvp = row.key_value_pairs
if row_kvp is not None:
for key in row_kvp.keys():
if isinstance(row_kvp[key], (np.ndarray)):
tag_dict[key] = float(row_kvp[key])
else:
tag_dict[key] = row_kvp[key]
# Obtain extra arrays of all unique bond lengths and bond angles (not searchable)
if self.verbose: print('\nObtaining all bond lengths and bond angles...')
# bond_lengths, bond_angles = self.get_bond_lengths_and_angles(atoms)
# ext_table = {
# 'bond lengths': bond_lengths,
# 'bond angles': bond_angles
# }
# if self.verbose: print('Done.')
if self.verbose: print('This has not been implemented yet.')
ext_table = None
# Write implemented properties to non-searchable data dict.
data_dict = {'implemented_properties': self.implemented_properties}
# Cache this row.
rowdata = [atoms, tag_dict, data_dict, ext_table]
if pdb_cache is None:
pdb_cache = [rowdata]
else:
pdb_cache.append(rowdata)
if self.verbose:
print(f'\nSaved molecule {i+1} to the row cache.')
print('------------------------------------------')
# Check if we are at a savepoint.
if i in savepoints:
# If yes, write the cached rows to the parsed database.
with ase.db.connect(db_write_file) as db_writer:
for rowdata in pdb_cache:
if rowdata[3] is not None:
db_writer.write(rowdata[0], rowdata[1], data=rowdata[2], external_tables=rowdata[3])
else:
db_writer.write(rowdata[0], rowdata[1], data=rowdata[2])
if self.verbose:
print(f'\nSaved {len(pdb_cache)} cached rows to parsed database.')
# Reset database cache.
pdb_cache = None
# If updating an existing database.
else:
if self.verbose: print('Starting to parse unimplemented properties from database...')
row_iterator = self.pdb.select()
# Divide the database up into savepoints, so that it doesn't have to save after each row.
num_rows = self.pdb.count()
if self.save_chunksize > num_rows:
raise ValueError('save_chunksize is greater than the number of rows in the database! Lower save_chunksize and try again.')
num_rows_divisible = num_rows - (num_rows % self.save_chunksize)
num_rows_remainder = num_rows - num_rows_divisible
savepoints = [i for i in range(self.save_chunksize, num_rows_divisible + self.save_chunksize, self.save_chunksize)]
if self.save_chunksize != num_rows:
savepoints.append(num_rows)
savepoints = [sp - 1 for sp in savepoints]
pdb_cache = None
for i, row in enumerate(row_iterator):
if self.verbose:
print('\n------------------------------------------')
print(f'| Molecule {i+1} |')
print('------------------------------------------')
print('Searching for properties not implemented in database...')
row_properties = row.data['implemented_properties']
missing_properties = []
for prop in self.implemented_properties:
if prop not in row_properties:
missing_properties.append(prop)
if missing_properties == []:
print('No unimplemented properties detected!')
continue
else:
if self.verbose:
print(f'{len(missing_properties)} unimplemented properties detected:')
for prop in missing_properties:
print(f' {prop}')
print('')
tag_dict = {}
rowid = row.id
atoms = row.toatoms()
# Loop through necessary properties
for prop in missing_properties:
# Call correct analysis function to get the desired group of properties.
tags = self.analysis_functions[prop](atoms)
if self.verbose:
prop_keys = tags.keys()
for key in prop_keys:
print(f'{key}: {tags[key]}')
# Insert these properties into the tag dictionary.
tag_dict.update(tags)
# Update the row's implemented properties list.
data_dict = {'implemented_properties': row_properties.append(prop)}
# Cache this row.
rowdata = [rowid, tag_dict, data_dict]
if pdb_cache is None:
pdb_cache = [rowdata]
else:
pdb_cache.append(rowdata)
if self.verbose:
print(f'\nSaved molecule {i+1} to the row cache.')
print('------------------------------------------')
# Check if we are at a savepoint.
if i in savepoints:
# If yes, write the cached rows to the parsed database.
with ase.db.connect(db_write_file) as db_writer:
for rowdata in pdb_cache:
db_writer.update(rowdata[0], **rowdata[1], data=rowdata[2])
if self.verbose:
print(f'\nUpdated {len(pdb_cache)} cached rows in parsed database.')
# Reset database cache.
pdb_cache = None
def get_natoms(self, atoms):
'''
Gets numbers of atoms of each element in a molecular structure.
'''
element_count = None
for atom in atoms.get_chemical_symbols():
if element_count is None:
element_count = {atom: 1}
else:
if atom not in element_count.keys():
element_count.update({atom: 1})
else:
element_count[atom] += 1
formatted_count = {}
for elem in element_count.keys():
formatted_count[f'natoms_{elem}'] = element_count[elem]
return formatted_count
def get_nbonds_and_nrings(self, atoms):
'''
Gets numbers of bonds, separated by type, and numbers of rings, separated
by ring size, from a molecular structure.
'''
positions = atoms.positions
numbers = atoms.numbers
analysis = Molecule(positions, numbers)
nbonds_and_nrings = analysis.get_bond_stats(ring_analysis='OpenBabel')
formatted_nbonds_and_nrings = {}
for key in nbonds_and_nrings.keys():
bond_order = None
# If key belongs to an nrings descriptor (RDKit)...
if key[0] == 'R':
if key[1] == '>':
num_rings = key[2:]
else:
num_rings = key[1:]
formatted_nbonds_and_nrings[f'nrings_{num_rings}'] = nbonds_and_nrings[key]
# Else if key belongs to an nrings descriptor(OpenBabel)...
elif 'nrings_' in key:
formatted_nbonds_and_nrings[key] = nbonds_and_nrings[key]
# Otherwise, must be an nbonds descriptor.
else:
formatted_nbonds_and_nrings[f'nbonds_{key}'] = nbonds_and_nrings[key]
bond_order = re.findall('[0-9]+', key)[0]
if bond_order is None:
print(f'Could not extract bond order from key {key}.')
else:
bond_order = f'nbonds_{bond_order}'
if bond_order not in formatted_nbonds_and_nrings.keys():
formatted_nbonds_and_nrings[bond_order] = nbonds_and_nrings[key]
else:
formatted_nbonds_and_nrings[bond_order] += nbonds_and_nrings[key]
return formatted_nbonds_and_nrings
def get_aromaticity(self, atoms):
'''
Gets percentage of non-hydrogen aromatic atoms in a molecule, alongside
the numbers of aromatic atoms of each element type.
'''
positions = atoms.positions
numbers = atoms.numbers
analysis = Molecule(positions, numbers)
aro_percent, aro_dict = analysis.get_aromaticity()
aro_dict['aromaticity'] = aro_percent
return aro_dict
def get_SMILES(self, atoms):
'''
Gets a SMILES string from a molecular structure.
'''
positions = atoms.positions
numbers = atoms.numbers
analysis = Molecule(positions, numbers)
smiles = analysis.get_can()
return {'SMILES': smiles}
def get_InChI(self, atoms):
'''
Gets an InChI string from a molecular structure.
'''
positions = atoms.positions
numbers = atoms.numbers
analysis = Molecule(positions, numbers)
inchi = analysis.get_inchi_string()
return {'InChI': inchi}
def get_SCscore(self, atoms):
'''
Gets a molecule's synthesisability (SCscore) from its structure.
'''
positions = atoms.positions
numbers = atoms.numbers
analysis = Molecule(positions, numbers)
smiles = analysis.get_can()
# INSERT SCscore CODE HERE
model = SCScorer()
model.restore(os.path.join(project_root, 'models', 'full_reaxys_model_1024bool', 'model.ckpt-10654.as_numpy.json.gz'))
smiles, SCscore = model.get_score_from_smi(smiles)
print(SCscore)
return {'SCscore': SCscore}
def get_bond_lengths_and_angles(self, atoms):
'''
Gets arrays of all unique bond lengths and angles from a molecular structure.
'''
raise NotImplementedError()
# Check if being called as a standalone script.
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run dbAutoAnalysis on a given database.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'db',
help='Path to the database file to be parsed/updated.'
)
parser.add_argument(
'mode',
choices=['parse', 'update'],
help=('Which mode to run dbAutoAnalysis in. Running in parse mode will take an'
'unparsed database and fully parse it to attach the currently implemented'
'properties. Running in update mode will search each database entry for missing'
'properties, calculating them and updating database rows if needs be.')
)
parser.add_argument(
'--verbose',
default='True',
choices=['True', 'False'],
help='Whether to output all parsing steps or stay quiet.'
)
parser.add_argument(
'--chunksize',
default=50,
type=int,
help='Chunk size to break the database up into for more efficient writing to disk.'
)
args = parser.parse_args()
db_file = args.db
db_mode = args.mode
db_verb = args.verbose
if db_verb == 'True':
db_verb = True
else:
db_verb = False
db_chunksize = args.chunksize
dbAA = dbAutoAnalysis(db_verb, db_chunksize)
dbAA(db_file, db_mode)
exit
| Python |
3D | rhyan10/G-SchNetOE62 | template_data_bias.py | .py | 7,774 | 163 | import logging
from pathlib import Path
import numpy as np
import torch
from ase.db import connect
from schnetpack import Properties
from schnetpack.datasets import AtomsData
from utility_classes import ConnectivityCompressor
from template_preprocess_dataset import preprocess_dataset
class TemplateData(AtomsData):
""" Simple template dataset class. We assume molecules made of C, N, O, F,
and H atoms as illustration here.
The class basically serves as interface to a database. It initiates
pre-processing of the data in order to prepare it for usage with G-SchNet.
To this end, it calls the template_preprocess_dataset script which provides
very basic pre-processing (e.g. calculation of connectivity matrices) and can
also be adapted to the data at hand.
Single (pre-processed) data points are read from the database in the
get_properties method (which is called in __getitem__). The class builds upon
the AtomsData class from SchNetPack.
Args:
path (str): path to directory containing database
subset (list, optional): indices of subset, set to None for entire dataset
(default: None).
precompute_distances (bool, optional): if True and the pre-processed
database does not yet exist, the pairwise distances of atoms in the
dataset's molecules will be computed during pre-processing and stored in
the database (increases storage demand of the dataset but decreases
computational cost during training as otherwise the distances will be
computed once in every epoch, default: True)
remove_invalid (bool, optional): if True, molecules that do not pass the
implemented validity checks will be removed from the training data (
in the simple template_preprocess_dataset script this is only a check
for disconnectedness, i.e. if all atoms are connected by some path as
otherwise no proper generation trace can be sampled,
note: only works if the pre-processed database does not yet exist,
default: True)
"""
##### Adjust the following settings to fit your data: #####
# name of the database
db_name = 'OE62_gschnet.db'
# name of the database after pre-processing (if the same as db_name, the original
# database will be renamed to <db_name>.bak.db)
preprocessed_db_name = 'OE62_gschnet.db'
# all atom types found in molecules of the dataset
available_atom_types = [1, 3, 5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 52, 53] # for example H, C, N, O, and F
# valence constraints of the atom types (does not need to be provided unless a
# valence check is implemented, but this is not the case in the template script)
atom_types_valence = [1, 1, 3, 4, 3, 2, 1, 4, 5, 6, 1, 5, 6, 1, 6, 1]
# minimum and maximum distance between neighboring atoms in angstrom (this is
# used to determine which atoms are considered as connected in the connectivity
# matrix, i.e. for sampling generation traces during training, and also to restrict
# the grid around the focused atom during generation, as the next atom will always
# be a neighbor of the focused atom)
radial_limits = [0.5, 2]
# used to decompress connectivity matrices
connectivity_compressor = ConnectivityCompressor()
def __init__(self, path, subset=None, precompute_distances=True,
remove_invalid=True):
self.path_to_dir = Path(path)
self.db_path = self.path_to_dir / self.preprocessed_db_name
self.source_db_path = self.path_to_dir / self.db_name
self.precompute_distances = precompute_distances
self.remove_invalid = remove_invalid
# do pre-processing (if database is not already pre-processed)
found_connectivity = False
if self.db_path.is_file():
with connect(self.db_path) as conn:
n_mols = conn.count()
if n_mols > 0:
first_row = conn.get(1)
found_connectivity = 'con_mat' in first_row.data
if not found_connectivity:
self._preprocess_data()
super().__init__(str(self.db_path), subset=subset)
def create_subset(self, idx):
"""
Returns a new dataset that only consists of provided indices.
Args:
idx (numpy.ndarray): subset indices
Returns:
schnetpack.data.AtomsData: dataset with subset of original data
"""
idx = np.array(idx)
subidx = idx if self.subset is None or len(idx) == 0 \
else np.array(self.subset)[idx]
return type(self)(self.path_to_dir, subidx)
def get_properties(self, idx):
_idx = self._subset_index(idx)
with connect(self.db_path) as conn:
row = conn.get(_idx + 1)
at = row.toatoms()
# extract/calculate structure (atom positions, types and cell)
properties = {}
properties[Properties.Z] = torch.LongTensor(at.numbers.astype(np.int))
positions = at.positions.astype(np.float32)
positions -= at.get_center_of_mass() # center positions
properties[Properties.R] = torch.FloatTensor(positions)
properties[Properties.cell] = torch.FloatTensor(at.cell.astype(np.float32))
# recover connectivity matrix from compressed format
con_mat = self.connectivity_compressor.decompress(row.data['con_mat'])
# save in dictionary
properties['_con_mat'] = torch.FloatTensor(con_mat.astype(np.float32))
# extract pre-computed distances (if they exist)
if 'dists' in row.data:
properties['dists'] = row.data['dists'][:, None]
# get atom environment
nbh_idx, offsets = self.environment_provider.get_environment(at)
# store neighbors, cell, and index
properties[Properties.neighbors] = torch.LongTensor(nbh_idx.astype(np.int))
properties[Properties.cell_offset] = torch.FloatTensor(
offsets.astype(np.float32))
properties["_idx"] = torch.LongTensor(np.array([idx], dtype=np.int))
return at, properties
def _preprocess_data(self):
# check if pre-processing source db has different name than target db (if
# not, rename it)
source_db = self.path_to_dir / self.db_name
if self.db_name == self.preprocessed_db_name:
new_name = self.path_to_dir / (self.db_name + '.bak.db')
source_db.rename(new_name)
source_db = new_name
# look for pre-computed list of invalid molecules
invalid_list_path = self.source_db_path.parent / \
(self.source_db_path.stem + f'_invalid.txt')
if invalid_list_path.is_file():
invalid_list = np.loadtxt(invalid_list_path)
else:
invalid_list = None
# initialize pre-processing (calculation and validation of connectivity
# matrices as well as computation of pairwise distances between atoms)
valence_list = \
np.array([self.available_atom_types, self.atom_types_valence]).flatten('F')
#preprocess_dataset(datapath=source_db,
# cutoff=self.radial_limits[-1],
# valence_list=list(valence_list),
# logging_print=True,
# new_db_path=self.db_path,
# precompute_distances=self.precompute_distances,
# remove_invalid=self.remove_invalid,
# invalid_list=invalid_list)
return True
| Python |
3D | rhyan10/G-SchNetOE62 | utility_classes_ob3.py | .py | 36,255 | 876 | '''
Functionally identical to the regular utility_classes.py, except updated to use
OpenBabel 3 rather than OpenBabel 2 (tested on ver. 3.1.1)
'''
import operator
import re
import numpy as np
from openbabel import openbabel as ob
from openbabel import pybel
from multiprocessing import Process
from rdkit import Chem
from scipy.spatial.distance import squareform
class Molecule:
'''
Molecule class that allows to get statistics such as the connectivity matrix,
molecular fingerprint, canonical smiles representation, or ring count given
positions of atoms and their atomic numbers. Currently supports molecules made of
carbon, nitrogen, oxygen, fluorine, and hydrogen (such as in the QM9 benchmark
dataset). Mainly relies on routines from Open Babel and RdKit.
Args:
pos (numpy.ndarray): positions of atoms in euclidean space (n_atoms x 3)
atomic_numbers (numpy.ndarray): list with nuclear charge/type of each atom
(e.g. 1 for hydrogens, 6 for carbons etc.).
connectivity_matrix (numpy.ndarray, optional): optionally, a pre-calculated
connectivity matrix (n_atoms x n_atoms) containing the bond order between
atom pairs can be provided (default: None).
store_positions (bool, optional): set True to store the positions of atoms in
self.positions (only for convenience, not needed for computations, default:
False).
'''
type_infos = {1: {'name': 'H',
'n_bonds': 1},
3: {'name': 'Li',
'n_bonds': 1},
5: {'name': 'B',
'n_bonds': 3},
6: {'name': 'C',
'n_bonds': 4},
7: {'name': 'N',
'n_bonds': 3},
8: {'name': 'O',
'n_bonds': 2},
9: {'name': 'F',
'n_bonds': 1},
14: {'name':'Si',
'n_bonds': 4},
15: {'name': 'P',
'n_bonds': 5},
16: {'name': 'S',
'n_bonds': 6},
17: {'name': 'Cl',
'n_bonds': 1},
33: {'name': 'As',
'n_bonds': 5},
34: {'name': 'Se',
'n_bonds': 6},
35: {'name': 'Br',
'n_bonds': 1},
52: {'name': 'Te',
'n_bonds': 6},
53: {'name': 'I',
'n_bonds': 1},
}
type_charges = {'H': 1, 'Li':3,'B':5,'C': 6, 'N': 7, 'O': 8, 'F': 9,'Si':14,'P':15,'S':16,'Cl':17,'As':33,'Se':34,'Br':35,'Te':52,'I':53}
def __init__(self, pos, atomic_numbers, connectivity_matrix=None,
store_positions=False):
# set comparison metrics to None (will be computed just in time)
self._fp = None
self._fp_bits = None
self._can = None
self._mirror_can = None
self._inchi_key = None
self._bond_stats = None
self._fixed_connectivity = False
self._row_indices = {}
self._obmol = None
self._rings = None
self._n_atoms_per_type = None
self._connectivity = connectivity_matrix
# set statistics
self.n_atoms = len(pos)
self.numbers = atomic_numbers
self._unique_numbers = {*self.numbers} # set for fast query
self.positions = pos
if not store_positions:
self._obmol = self.get_obmol() # create obmol before removing pos
self.positions = None
def sanity_check(self):
'''
Check whether the sum of valence of all atoms can be divided by 2.
Returns:
bool: True if the test is passed, False otherwise
'''
count = 0
for atom in self.numbers:
count += self.type_infos[atom]['n_bonds']
if count % 2 == 0:
return True
else:
return False
def get_obmol(self):
'''
Retrieve the underlying Open Babel OBMol object.
Returns:
OBMol object: Open Babel OBMol representation
'''
if self._obmol is None:
if self.positions is None:
print('Error, cannot create obmol without positions!')
return
if self.numbers is None:
print('Error, cannot create obmol without atomic numbers!')
return
# use openbabel to infer bonds and bond order:
obmol = ob.OBMol()
obmol.BeginModify()
# set positions and atomic numbers of all atoms in the molecule
for p, n in zip(self.positions, self.numbers):
obatom = obmol.NewAtom()
obatom.SetAtomicNum(int(n))
obatom.SetVector(*p.tolist())
# infer bonds and bond order
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
obmol.EndModify()
self._obmol = obmol
return self._obmol
def get_fp(self):
'''
Retrieve the molecular fingerprint (the path-based FP2 from Open Babel is used,
which means that paths of length up to 7 are considered).
Returns:
pybel.Fingerprint object: moleculer fingerprint (use "fp1 | fp2" to
calculate the Tanimoto coefficient of two fingerprints)
'''
if self._fp is None:
# calculate fingerprint
self._fp = pybel.Molecule(self.get_obmol()).calcfp()
return self._fp
def get_fp_bits(self):
'''
Retrieve the bits set in the molecular fingerprint.
Returns:
Set of int: object containing the bits set in the molecular fingerprint
'''
if self._fp_bits is None:
self._fp_bits = {*self.get_fp().bits}
return self._fp_bits
def get_can(self):
'''
Retrieve the canonical SMILES representation of the molecule.
Returns:
String: canonical SMILES string
'''
if self._can is None:
# calculate canonical SMILES
self._can = pybel.Molecule(self.get_obmol()).write('can')
return self._can
def get_mirror_can(self):
'''
Retrieve the canonical SMILES representation of the mirrored molecule (the
z-coordinates are flipped).
Returns:
String: canonical SMILES string of the mirrored molecule
'''
if self._mirror_can is None:
# calculate canonical SMILES of mirrored molecule
self._flip_z() # flip z to mirror molecule using x-y plane
self._mirror_can = pybel.Molecule(self.get_obmol()).write('can')
self._flip_z() # undo mirroring
return self._mirror_can
def get_inchi_key(self):
'''
Retrieve the InChI-key of the molecule.
Returns:
String: InChI-key
'''
if self._inchi_key is None:
# calculate inchi key
self._inchi_key = pybel.Molecule(self.get_obmol()).\
write('inchikey')
return self._inchi_key
def _flip_z(self):
'''
Flips the z-coordinates of atom positions (to get a mirrored version of the
molecule).
'''
if self._obmol is None:
self.get_obmol()
for atom in ob.OBMolAtomIter(self._obmol):
x, y, z = atom.x(), atom.y(), atom.z()
atom.SetVector(x, y, -z)
self._obmol.ConnectTheDots()
self._obmol.PerceiveBondOrders()
def get_connectivity(self):
'''
Retrieve the connectivity matrix of the molecule.
Returns:
numpy.ndarray: (n_atoms x n_atoms) array containing the pairwise bond orders
between atoms (0 for no bond).
'''
if self._connectivity is None:
# get connectivity matrix
connectivity = np.zeros((self.n_atoms, len(self.numbers)))
for atom in ob.OBMolAtomIter(self.get_obmol()):
index = atom.GetIdx() - 1
# loop over all neighbors of atom
for neighbor in ob.OBAtomAtomIter(atom):
idx = neighbor.GetIdx() - 1
bond_order = neighbor.GetBond(atom).GetBondOrder() # Updated to new standard.
#print(f'{index}-{idx}: {bond_order}')
# do not count bonds between two hydrogen atoms
if (self.numbers[index] == 1 and self.numbers[idx] == 1
and bond_order > 0):
bond_order = 0
connectivity[index, idx] = bond_order
self._connectivity = connectivity
return self._connectivity
def get_ring_counts(self):
'''
Retrieve a list containing the sizes of rings in the symmetric smallest set
of smallest rings (S-SSSR from RdKit) in the molecule (e.g. [5, 6, 5] for two
rings of size 5 and one ring of size 6).
Returns:
List of int: list with ring sizes
'''
if self._rings is None:
# calculate symmetric SSSR with RdKit using the canonical smiles
# representation as input
can = self.get_can()
mol = Chem.MolFromSmiles(can)
if mol is not None:
ssr = Chem.GetSymmSSSR(mol)
self._rings = [len(ssr[i]) for i in range(len(ssr))]
else:
self._rings = [] # cannot count rings
return self._rings
def get_n_atoms_per_type(self):
'''
Retrieve the number of atoms in the molecule per type.
Returns:
numpy.ndarray: number of atoms in the molecule per type, where the order
corresponds to the order specified in Molecule.type_infos
'''
if self._n_atoms_per_type is None:
_types = np.array(list(self.type_infos.keys()), dtype=int)
self._n_atoms_per_type =\
np.bincount(self.numbers, minlength=np.max(_types)+1)[_types]
return self._n_atoms_per_type
def remove_unpicklable_attributes(self, restorable=True):
'''
Some attributes of the class cannot be processed by pickle. This method
allows to remove these attributes prior to pickling.
Args:
restorable (bool, optional): Set True to allow restoring the deleted
attributes later on (default: True)
'''
# set attributes which are not picklable (SwigPyObjects) to None
if restorable and self.positions is None and self._obmol is not None:
# store positions to allow restoring obmol object later on
pos = [atom.coords for atom in pybel.Molecule(self._obmol).atoms]
self.positions = np.array(pos)
self._obmol = None
self._fp = None
def tanimoto_similarity(self, other_mol, use_bits=True):
'''
Get the Tanimoto (fingerprint) similarity to another molecule.
Args:
other_mol (Molecule or pybel.Fingerprint/list of bits set):
representation of the second molecule (if it is not a Molecule object,
it needs to be a pybel.Fingerprint if use_bits is False and a list of bits
set in the fingerprint if use_bits is True).
use_bits (bool, optional): set True to calculate Tanimoto similarity
from bits set in the fingerprint (default: True)
Returns:
float: Tanimoto similarity to the other molecule
'''
if use_bits:
a = self.get_fp_bits()
b = other_mol.get_fp_bits() if isinstance(other_mol, Molecule) \
else other_mol
n_equal = len(a.intersection(b))
if len(a) + len(b) == 0: # edge case with no set bits
return 1.
return n_equal / (len(a)+len(b)-n_equal)
else:
fp_other = other_mol.get_fp() if isinstance(other_mol, Molecule)\
else other_mol
return self.get_fp() | fp_other
def _update_bond_orders(self, idc_lists):
'''
Updates the bond orders in the underlying OBMol object.
Args:
idc_lists (list of list of int): nested list containing bonds, i.e. pairs
of row indices (list1) and column indices (list2) which shall be updated
'''
con_mat = self.get_connectivity()
self._obmol.BeginModify()
for i in range(len(idc_lists[0])):
idx1 = idc_lists[0][i]
idx2 = idc_lists[1][i]
obbond = self._obmol.GetBond(int(idx1+1), int(idx2+1))
obbond.SetBO(int(con_mat[idx1, idx2]))
self._obmol.EndModify()
# reset fingerprints etc
self._fp = None
self._can = None
self._mirror_can = None
self._inchi_key = None
def get_fixed_connectivity(self, recursive_call=False):
'''
Attempts to fix the connectivity matrix using some heuristics (as some valid
QM9 molecules do not pass the valency check using the connectivity matrix
obtained with Open Babel, which seems to have problems with assigning correct
bond orders to aromatic rings containing Nitrogen).
Args:
recursive_call (bool, do not set True): flag that indicates a recursive
call (used internally, do not set to True)
Returns:
numpy.ndarray: (n_atoms x n_atoms) array containing the pairwise bond orders
between atoms (0 for no bond) after the attempted fix.
'''
# if fix has already been attempted, return the connectivity matrix
if self._fixed_connectivity:
return self._connectivity
# define helpers:
# increases bond order between two atoms in connectivity matrix
def increase_bond(con_mat, idx1, idx2):
con_mat[idx1, idx2] += 1
con_mat[idx2, idx1] += 1
return con_mat
# decreases bond order between two atoms in connectivity matrix
def decrease_bond(con_mat, idx1, idx2):
con_mat[idx1, idx2] -= 1
con_mat[idx2, idx1] -= 1
return con_mat
# returns only the rows of the connectivity matrix corresponding to atoms of
# certain types (and the indices of these atoms)
def get_typewise_connectivity(con_mat, types):
idcs = []
for type in types:
idcs += list(self._get_row_idcs(type))
return con_mat[idcs], np.array(idcs).astype(int)
# store old connectivity matrix for later comparison
old_mat = self.get_connectivity().copy()
# get connectivity matrix and find indices of N and C atoms
con_mat = self.get_connectivity()
if 6 not in self._unique_numbers and 7 not in self._unique_numbers:
# do not attempt fixing if there is no carbon and no nitrogen
return con_mat
N_mat, N_idcs = get_typewise_connectivity(con_mat, [7])
C_mat, C_idcs = get_typewise_connectivity(con_mat, [6])
NC_idcs = np.hstack((N_idcs, C_idcs)) # indices of all N and C atoms
NC_valences = self._get_valences()[NC_idcs] # array with valency constraints
# return connectivity if valency constraints of N and C atoms are already met
if np.all(np.sum(con_mat[NC_idcs], axis=1) == NC_valences):
return con_mat
# if a C or N atom is "overcharged" (total bond order too high) we decrease
# double to single bonds between N-N or N-C until it is not overcharged anymore
# (e.g. C=N=C -> C=N-C)
if 7 in self._unique_numbers: # only necessary if molecule contains N
for cur in NC_idcs:
type = self.numbers[cur]
if np.sum(con_mat[cur]) <= self.type_infos[type]['n_bonds']:
continue
if type == 6: # for carbon look only at nitrogen neighbors
neighbors = self._get_neighbors(cur, types=[7], strength=2)
else:
neighbors = self._get_neighbors(cur, types=[6, 7],
strength=2)
for neighbor in neighbors:
con_mat = decrease_bond(con_mat, cur, neighbor)
self._connectivity = con_mat
if np.sum(con_mat[cur]) == \
self.type_infos[type]['n_bonds']:
break
# get updated partial connectivity matrices for N and C
N_mat, _ = get_typewise_connectivity(con_mat, [7])
C_mat, _ = get_typewise_connectivity(con_mat, [6])
# increase total number of bonds by transferring the strength of a
# double C-N bond to two neighboring bonds, if the involved atoms
# are not yet saturated (e.g. H2C-H2C=N-H2C -> H2C=H2C-N=H2C)
if (np.sum(N_mat) < len(N_idcs) * 3 or np.sum(C_mat) < len(C_idcs) * 4) \
and 7 in self._unique_numbers:
for cur in NC_idcs:
type = self.numbers[cur]
if sum(con_mat[cur]) >= self.type_infos[type]['n_bonds']:
continue
CN_nbors = self._get_CN_neighbors(cur)
for nbor_1, nbor_2 in CN_nbors:
if con_mat[nbor_1, nbor_2] <= 1:
continue
else:
nbor_2_nbors = np.where(con_mat[nbor_2] == 1)[0]
for nbor_2_nbor in nbor_2_nbors:
nbor_2_nbor_type = self.numbers[nbor_2_nbor]
if (np.sum(con_mat[nbor_2_nbor]) <
self.type_infos[nbor_2_nbor_type]['n_bonds']):
con_mat = increase_bond(con_mat, cur, nbor_1)
con_mat = increase_bond(con_mat, nbor_2, nbor_2_nbor)
con_mat = decrease_bond(con_mat, nbor_1, nbor_2)
self._connectivity = con_mat
# increase bond strength between two undercharged neighbors C-N,
# C-C or N-N (e.g HN-CH2 -> HN=CH2, starting from those atoms with least
# available neighbors if there are multiple undercharged neighbors)
undercharged_pairs = True
while (undercharged_pairs):
NC_charges = np.sum(con_mat[NC_idcs], axis=1)
undercharged = NC_idcs[np.where(NC_charges < NC_valences)[0]]
partial_con_mat = con_mat[undercharged][:, undercharged]
# if non of the undercharged atoms are neighbors, stop
if np.sum(partial_con_mat) == 0:
break
# sort by number of undercharged neighbors
n_nbors = np.sum(partial_con_mat > 0, axis=0)
# mask indices with zero undercharged neighbors to ignore them when sorting
n_nbors[np.where(n_nbors == 0)[0]] = 1000
cur = np.argmin(n_nbors)
cur_nbor = np.where(partial_con_mat[cur] > 0)[0][0]
con_mat = increase_bond(con_mat, undercharged[cur], undercharged[cur_nbor])
self._connectivity = con_mat
# if the molecule still is not valid, try to flip double bonds if an atom
# forms a double bond and has at least one other neighbor that has too few bonds
# (e.g. C-N=C -> C=N-C) and repeat above heuristics with a recursive call of
# this function
if not recursive_call and \
not np.all(np.sum(con_mat[NC_idcs], axis=1) == NC_valences):
changed = False
candidates = np.where(np.any(con_mat[NC_idcs][:, NC_idcs] == 2, axis=0))[0]
for cand in NC_idcs[candidates]:
if np.sum(con_mat[cand, NC_idcs] == 2) == 0:
continue
NC_charges = np.sum(con_mat[NC_idcs], axis=1)
undercharged = NC_charges < NC_valences
uc_neighbors = np.logical_and(con_mat[cand, NC_idcs] == 1, undercharged)
if np.any(uc_neighbors):
uc_neighbor = NC_idcs[np.where(uc_neighbors)[0][0]]
oc_neighbor = NC_idcs[
np.where(con_mat[cand, NC_idcs] == 2)[0][0]]
con_mat = increase_bond(con_mat, cand, uc_neighbor)
con_mat = decrease_bond(con_mat, cand, oc_neighbor)
self._connectivity = con_mat
changed = True
if changed:
self._connectivity = self.get_fixed_connectivity(
recursive_call=True)
# store that fixing the connectivity matrix has already been attempted
if not recursive_call:
self._fixed_connectivity = True
if np.any(old_mat != self._connectivity):
# update bond orders in underlying OBMol object (where they changed)
self._update_bond_orders(np.where(old_mat != self._connectivity))
return self._connectivity
def _get_valences(self):
'''
Retrieve the valency constraints of all atoms in the molecule.
Returns:
numpy.ndarray: valency constraints (one per atom)
'''
valence = []
for atom in self.numbers:
valence += [self.type_infos[atom]['n_bonds']]
return np.array(valence)
def _get_CN_neighbors(self, idx):
'''
For a focus atom of type K returns indices of atoms C (carbon) and N (nitrogen)
on two-step paths of the form K-C-N (and K-C-C only for K=N since one atom
needs to be nitrogen).
Args:
idx (int): the index of the focus atom from which paths are examined
Returns:
list of lists: list1[i] contains an index of a direct neighbor of the
focus atom and list2[i] contains the index of a second neighbor on the
i-th identified two-step path
'''
con_mat = self.get_connectivity()
nbors = con_mat[idx] > 0
C_nbors = np.where(np.logical_and(self.numbers == 6, nbors))[0]
type = self.numbers[idx]
# mask types to exclude idx from neighborhood
_numbers = self.numbers.copy()
_numbers[idx] = 0
CN_nbors = np.where(np.logical_and(_numbers == 7, con_mat[C_nbors] > 0))
CN_nbors = [(C_nbors[CN_nbors[0][i]], CN_nbors[1][i])
for i in range(len(CN_nbors[0]))]
if type == 7: # for N atoms, also add C-C neighbors
CC_nbors = np.where(np.logical_and(
_numbers == 6, con_mat[C_nbors] > 0))
CC_nbors = [
(C_nbors[CC_nbors[0][i]], CC_nbors[1][i])
for i in range(len(CC_nbors[0]))]
CN_nbors += CC_nbors
return CN_nbors
def _get_neighbors(self, idx, types=None, strength=1):
'''
Retrieve the indices of neighbors of an atom.
Args:
idx (int): index of the atom
types (list of int, optional): restrict the returned neighbors to
contain only atoms of the specified types (set None to apply no type
filter, default: None)
strength (int, optional): restrict the returned neighbors to contain
only atoms with a certain minimal bond order to the atom at idx
(default: 1)
Returns:
list of int: indices of all neighbors that meet the requirements
'''
con_mat = self.get_connectivity()
neighbors = con_mat[idx] >= strength
if types is not None:
type_arr = np.zeros(len(neighbors)).astype(bool)
for type in types:
type_arr = np.logical_or(type_arr, self.numbers == type)
return np.where(np.logical_and(neighbors, type_arr))[0]
def get_bond_stats(self):
'''
Retrieve the bond and ring count of the molecule. The bond count is
calculated for every pair of types (e.g. C1N are all single bonds between
carbon and nitrogen atoms in the molecule, C2N are all double bonds between
such atoms etc.). The ring count is provided for rings from size 3 to 8 (R3,
R4, ..., R8) and for rings greater than size eight (R>8).
Returns:
dict (str->int): bond and ring counts
'''
if self._bond_stats is None:
# 1st analyze bonds
unique_types = np.sort(list(self._unique_numbers))
# get connectivity and read bonds from matrix
con_mat = self.get_connectivity()
d = {}
for i, type1 in enumerate(unique_types):
row_idcs = self._get_row_idcs(type1)
n_bonds1 = self.type_infos[type1]['n_bonds']
for type2 in unique_types[i:]:
col_idcs = self._get_row_idcs(type2)
n_bonds2 = self.type_infos[type2]['n_bonds']
max_bond_strength = min(n_bonds1, n_bonds2)
if n_bonds1 == n_bonds2: # exclude small trivial molecules
max_bond_strength -= 1
for n in range(1, max_bond_strength + 1):
id = self.type_infos[type1]['name'] + str(n) + \
self.type_infos[type2]['name']
d[id] = np.sum(con_mat[row_idcs][:, col_idcs] == n)
if type1 == type2:
d[id] = int(d[id]/2) # remove twice counted bonds
# 2nd analyze rings
ring_counts = self.get_ring_counts()
if len(ring_counts) > 0:
ring_counts = np.bincount(np.array(ring_counts))
n_bigger_8 = 0
for i in np.nonzero(ring_counts)[0]:
if i < 9:
d[f'R{i}'] = ring_counts[i]
else:
n_bigger_8 += ring_counts[i]
if n_bigger_8 > 0:
d[f'R>8'] = n_bigger_8
self._bond_stats = d
return self._bond_stats
def _get_row_idcs(self, type):
'''
Retrieve the indices of all atoms in the molecule corresponding to a selected
type.
Args:
type (int): the atom type (atomic number, e.g. 6 for carbon)
Returns:
list of int: indices of all atoms with the selected type
'''
if type not in self._row_indices:
self._row_indices[type] = np.where(self.numbers == type)[0]
return self._row_indices[type]
class ConnectivityCompressor():
'''
Utility class that provides methods to compress and decompress connectivity
matrices.
'''
def __init__(self):
pass
def compress(self, connectivity_matrix):
'''
Compresses a single connectivity matrix.
Args:
connectivity_matrix (numpy.ndarray): array (n_atoms x n_atoms)
containing the bond orders of bonds between atoms of a molecule
Returns:
dict (str/int->int): the length of the non-redundant connectivity
matrix (list with upper triangular part) and the indices of that list for
bond orders > 0
'''
smaller = squareform(connectivity_matrix) # get list of upper triangular part
d = {'n_entries': len(smaller)} # store length of list
for i in np.unique(smaller).astype(int): # store indices per bond order > 0
if i > 0:
d[int(i)] = np.where(smaller == i)[0]
return d
def decompress(self, idcs_dict):
'''
Retrieve the full (n_atoms x n_atoms) connectivity matrix from compressed
format.
Args:
idcs_dict (dict str/int->int): compressed connectivity matrix
(obtained with the compress method)
Returns:
numpy.ndarray: full connectivity matrix as an array of shape (n_atoms x
n_atoms)
'''
n_entries = idcs_dict['n_entries']
con_mat = np.zeros(n_entries)
for i in idcs_dict:
if isinstance(i, int) or i.isdigit():
con_mat[idcs_dict[i]] = int(i)
return squareform(con_mat)
def compress_batch(self, connectivity_batch):
'''
Compress a batch of connectivity matrices.
Args:
connectivity_batch (list of numpy.ndarray): list of connectivity matrices
Returns:
list of dict: batch of compressed connectivity matrices (see compress)
'''
dict_list = []
for matrix in connectivity_batch:
dict_list += [self.compress(matrix)]
return dict_list
def decompress_batch(self, idcs_dict_batch):
'''
Retrieve a list of full connectivity matrices from a batch of compressed
connectivity matrices.
Args:
idcs_dict_batch (list of dict): list with compressed connectivity
matrices
Return:
list numpy.ndarray: batch of full connectivity matrices (see decompress)
'''
matrix_list = []
for idcs_dict in idcs_dict_batch:
matrix_list += [self.decompress(idcs_dict)]
return matrix_list
class IndexProvider():
'''
Class which allows to filter a large set of molecules for desired structures
according to provided statistics. The filtering is done using a selection string
of the general format 'Statistics_nameDelimiterOperatorTarget_value'
(e.g. 'C,>8' to filter for all molecules with more than eight carbon atoms where
'C' is the statistic counting the number of carbon atoms in a molecule, ',' is the
delimiter, '>' is the operator, and '8' is the target value).
Args:
statistics (numpy.ndarray):
statistics of all molecules where columns correspond to molecules and rows
correspond to available statistics (n_statistics x n_molecules)
row_headlines (numpy.ndarray):
the names of the statistics stored in each row (e.g. 'F' for the number of
fluorine atoms or 'R5' for the number of rings of size 5)
default_filter (str, optional):
the default behaviour of the filter if no operator and target value are
given (e.g. filtering for 'F' will give all molecules with at least 1
fluorine atom if default_filter='>0' or all molecules with exactly 2
fluorine atoms if default_filter='==2', default: '>0')
delimiter (str, optional):
the delimiter used to separate names of statistics from the operator and
target value in the selection strings (default: ',')
'''
# dictionary mapping strings of available operators to corresponding function:
op_dict = {'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'>=': operator.ge}
rel_re = re.compile('<=|<|={1,2}|!=|>=|>') # regular expression for operators
num_re = re.compile('[\-]*[0-9]+[.]*[0-9]*') # regular expression for target values
def __init__(self, statistics, row_headlines, default_filter='>0', delimiter=','):
self.statistics = np.array(statistics)
self.headlines = list(row_headlines)
self.default_relation = self.rel_re.search(default_filter).group(0)
self.default_number = float(self.num_re.search(default_filter).group(0))
self.delimiter = delimiter
def get_selected(self, selection_str, idcs=None):
'''
Retrieve the indices of all molecules which fulfill the selection criteria.
The selection string is of the general format
'Statistics_nameDelimiterOperatorTarget_value' (e.g. 'C,>8' to filter for all
molecules with more than eight carbon atoms where 'C' is the statistic counting
the number of carbon atoms in a molecule, ',' is the delimiter, '>' is the
operator, and '8' is the target value).
The following operators are available:
'<'
'<='
'=='
'!='
'>='
'>'
The target value can be any positive or negative integer or float value.
Multiple statistics can be summed using '+' (e.g. 'F+N,=0' gives all
molecules that have no fluorine and no nitrogen atoms).
Multiple filters can be concatenated using '&' (e.g. 'H,>8&C,=5' gives all
molecules that have more than 8 hydrogen atoms and exactly 5 carbon atoms).
Args:
selection_str (str): string describing the criterion(s) for filtering (build
as described above)
idcs (numpy.ndarray, optional): if provided, only this subset of all
molecules is filtered for structures fulfilling the selection criteria
Returns:
list of int: indices of all the molecules in the dataset that fulfill the
selection criterion(s)
'''
delimiter = self.delimiter
if idcs is None:
idcs = np.arange(len(self.statistics[0])) # take all to begin with
criterions = selection_str.split('&') # split criteria
for criterion in criterions:
rel_strs = criterion.split(delimiter)
# add multiple statistics if specified
heads = rel_strs[0].split('+')
statistics = self.statistics[self.headlines.index(heads[0])][idcs]
for head in heads[1:]:
statistics += self.statistics[self.headlines.index(head)][idcs]
if len(rel_strs) == 1:
relation = self.op_dict[self.default_relation](
statistics, self.default_number)
elif len(rel_strs) == 2:
rel = self.rel_re.search(rel_strs[1]).group(0)
num = float(self.num_re.search(rel_strs[1]).group(0))
relation = self.op_dict[rel](statistics, num)
new_idcs = np.where(relation)[0]
idcs = idcs[new_idcs]
return idcs
class ProcessQ(Process):
'''
Multiprocessing.Process class that runs a provided function using provided
(keyword) arguments and puts the result into a provided Multiprocessing.Queue
object (such that the result of the function can easily be obtained by the host
process).
Args:
queue (Multiprocessing.Queue): the queue into which the results of running
the target function will be put (the object in the queue will be a tuple
containing the provided name as first entry and the function return as
second entry).
name (str): name of the object (is returned as first value in the tuple put
into the queue.
target (callable object): the function that is executed in the process's run
method
args (list of any): sequential arguments target is called with
kwargs (dict (str->any)): keyword arguments target is called with
'''
def __init__(self, queue, name=None, target=None, args=(), kwargs={}):
super(ProcessQ, self).__init__(None, target, name, args, kwargs)
self._name = name
self._q = queue
self._target = target
self._args = args
self._kwargs = kwargs
def run(self):
'''
Method representing the process's activity.
Invokes the callable object passed as the target argument, if any, with
sequential and keyword arguments taken from the args and kwargs arguments,
respectively. Puts the string passed as name argument and the returned result
of the callable object into the queue as (name, result).
'''
if self._target is not None:
res = (self.name, self._target(*self._args, **self._kwargs))
self._q.put(res)
| Python |
3D | rhyan10/G-SchNetOE62 | radial_calculation.py | .py | 1,692 | 40 | import ase.io
import ase
import numpy as np
import ase.neighborlist
db = ase.io.read("OE62.db", ":")
alldists = []
maxdists = []
neighbordists=[]
valencedict = {}
# make a dict that has an entry for each atom type
available_atom_types = [1, 3, 5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 52, 53]
for i in available_atom_types:
valencedict[i]=[]
for i in range(len(db)):
dists = db[i].get_all_distances()
# this gives you an ase object with some properties
neighbors = ase.neighborlist.build_neighbor_list(db[i])
neighborlist = neighbors.nl.neighbors
# distances of all neighbors
neighbordist = []
# get atom types
atypes = db[i].get_atomic_numbers()
#every atom has a list of neighbors, we iterate over those
for index,iatom in enumerate(neighborlist):
#the first index is always the atom we look at
# the other indices contain the neighbors
#append the dict entry for the atom type with the len-1 of the neighbor list
# check if we have not missed an atom type:
if atypes[index] not in valencedict:
print(atypes[index], "not an entry of the dictionary, check if this atom type is missed in the OE62-publication")
valencedict[atypes[index]].append(len(iatom)-1)
for ineighbor in iatom:
# now we append the neighbordistance-list with all distances of one atom to its neighbors
neighbordist.append(dists[iatom[0],ineighbor])
#now we want the maximum distance of all neighbour-distances
maxdists.append(np.max(neighbordist))
radial_limit_upper = np.max(maxdists)
print(radial_limit_upper)
radial_limit_lower = np.min(maxdists)
print(radial_limit_lower) | Python |
3D | rhyan10/G-SchNetOE62 | radical_filtering.py | .py | 1,469 | 46 | import ase.io
import pickle
import sys
import time
import numpy as np
#geoms = ase.io.read("./data/OE62.db")
#geoms = ase.io.read("generated_molecules.db",":100")
geoms=ase.io.read("./models/model1/generated/generated_molecules_11.db",":100")
sys.path.append('./GSchNetOE62')
from GSchNetOE62 import utility_functions
from utility_functions import print_atom_bond_ring_stats
from utility_classes import Molecule
even = 0
odd = 0
index = 0
#predictions = np.load('predictions.npz')
molecule_to_keep = []
#filtered_predictions = []
#import os, psutil
#process = psutil.Process(os.getpid())
#print(len(predictions['eigenvalues_pbe0'][0]))
time.sleep(5)
for geom in geoms:
#os = geom.get_positions()
atypes = geom.get_atomic_numbers()
if(sum(atypes)%2 == 0):
even +=1
molecule_to_keep.append(geom)
#filtered_predictions.append(predictions['eigenvalues_pbe0'][index])
else:
odd +=1
index +=1
print(index)
#print(len(filtered_predictions))
#print(len(filtered_predictions[-1]))
#print(process.memory_info().rss) # in bytes
print(molecule_to_keep)
#print(len(molecule_to_keep))
#print(even)
#filtered_predictions = predictions['eigenvalues_pbe0'][molecule_to_keep]
#print(len(filtered_predictions))
#np.savez_compressed('filtered_predictions2.npz',eigenvalues_pbe0 = filtered_predictions)
ase.io.write("FullGenerated.db", molecule_to_keep)
#ase.io.write("generated_molecules_no_rad.db", molecule_to_keep)
| Python |
3D | rhyan10/G-SchNetOE62 | template_preprocess_dataset.py | .py | 12,931 | 272 | import collections
import argparse
import sys
import time
import numpy as np
import logging
from ase.db import connect
from scipy.spatial.distance import pdist, squareform
from utility_classes import ConnectivityCompressor, Molecule
from multiprocessing import Process, Queue
from pathlib import Path
# list names of collected statistics here (e.g. the number of atoms of each type)
stat_heads = ['n_atoms', 'C', 'N', 'O', 'F', 'H']
atom_types = [6, 7, 8, 9, 1] # atom type charges in the same order as in stat_heads
def preprocess_dataset(datapath, new_db_path=None, cutoff=2.0,
precompute_distances=True, remove_invalid=True,
invalid_list=None, valence_list=None, logging_print=True):
'''
Pre-processes all molecules of a dataset.
Along with a new database containing the pre-processed molecules, a
"input_db_invalid.txt" file holding the indices of removed molecules and a
"new_db_statistics.npz" file (containing atom count statistics for all molecules in
the new database) are stored.
Args:
datapath (str): full path to dataset (ase.db database)
new_db_path (str, optional): full path to new database where pre-processed
molecules shall be stored (None to simply append "gen" to the name in
datapath, default: None)
cutoff (float, optional): cutoff value in angstrom used to determine which
atoms in a molecule are considered as neighbors (i.e. connected, default:
2.0)
precompute_distances (bool, optional): if True, the pairwise distances between
atoms in each molecule are computed and stored in the database (default:
True)
remove_invalid (bool, optional): if True, molecules that do not pass the
validity or connectivity checks are removed from the new database (default:
True)
invalid_list (list of int, optional): precomputed list containing indices of
molecules that are marked as invalid (default: None)
valence_list (list, optional): the valence of atom types in the form
[type1 valence type2 valence ...] which could be used for valence checks
(not implemented, default: None)
logging_print (bool, optional): set True to show output with logging.info
instead of standard printing (default: True)
'''
# convert paths
datapath = Path(datapath)
if new_db_path is None:
new_db_path = datapath.parent / (datapath.stem + 'gen.db')
else:
new_db_path = Path(new_db_path)
def _print(x, end='\n', flush=False):
if logging_print:
logging.info(x)
else:
print(x, end=end, flush=flush)
with connect(datapath) as db:
n_all = db.count()
if n_all == 0:
_print('No molecules found in data base!')
sys.exit(0)
_print('\nPre-processing data...')
if logging_print:
_print(f'Processed: 0 / {n_all}...')
else:
_print(f'0.00%', end='', flush=True)
# setup counter etc.
count = 0 # count number of discarded (invalid etc.) molecules
disc = [] # indices of disconnected structures
inval = [] # indices of invalid structures
stats = np.empty((len(stat_heads), 0)) # scaffold for statistics
start_time = time.time()
compressor = ConnectivityCompressor() # used to compress connectivity matrices
# check if list of invalid molecules was provided and cast it into a set (allows
# for faster lookup)
if invalid_list is not None and remove_invalid:
invalid_list = {*invalid_list}
n_inval = len(invalid_list)
else:
n_inval = 0
# preprocess each structure in the source db and write results into target db
with connect(datapath) as source_db:
with connect(new_db_path) as target_db:
for i in range(source_db.count()):
# skip molecule if index is present in precomputed list of invalid
# molecules and if remove_invalid is True
if remove_invalid and invalid_list is not None:
if i in invalid_list:
continue
# get molecule from database
row = source_db.get(i + 1)
# extract additional data stored with molecule
data = row.data
# get ase.Atoms object
at = row.toatoms()
# get positions and atomic numbers
pos = at.positions
numbers = at.numbers
# the algorithm to sample generation traces (atom placement steps)
# assumes that the atoms in our structures are ordered by their
# distance to the center of mass, thus we order them in that way here:
# center positions (using center of mass)
pos = pos - at.get_center_of_mass()
# order atoms by distance to center of mass
center_dists = np.sqrt(np.maximum(np.sum(pos ** 2, axis=1), 0))
idcs_sorted = np.argsort(center_dists)
pos = pos[idcs_sorted]
numbers = numbers[idcs_sorted]
# update positions and atomic numbers accordingly in ase.Atoms object
at.positions = pos
at.numbers = numbers
# retrieve connectivity matrix (and pairwise distances)
connectivity, pairwise_distances = get_connectivity(at, cutoff)
# check if the connectivity matrix represents a proper structure (i.e.
# if all atoms are connected to each other via some path) as
# disconnected structures cannot be used for training (there must be
# an atom placement trajectory for G-SchNet)
if is_disconnected(connectivity):
count += 1
disc += [i]
continue
# you could potentially implement some valency constraint checking here
# and remove or mark molecules that do not pass the test
# val = [check validity e.g. with connectivity and valence list]
# if remove_invalid:
# if not val:
# count += 1
# inval += [i]
# continue
# update data stored in db with a compressed version of the
# connectivity matrix (we store only indices of entries >= 1
data.update({'con_mat': compressor.compress(connectivity)})
# if desired, also store precomputed distances (in condensed format)
if precompute_distances:
data.update({'dists': pairwise_distances})
# write preprocessed molecule and data to target database
target_db.write(at, data=data)
# you can additionally gather some statistics about the training data
# (these statistics can for example be used to filter molecules when
# displaying them with the display_molecules.py script)
# e.g. for QM9 we collected the atom, bond, and ring count statistics
# when doing valency checks
# here we simply count the number of atoms of each type
atom_type_counts = np.bincount(numbers, minlength=10)
# store counts [n_atoms, C, N, O, F, H] as listed in stat_heads
statistics = np.array([len(numbers), *atom_type_counts[atom_types]])
# update stats array with statistics of current molecule
stats = np.hstack((stats, statistics[:, None]))
# print progress every 1000 molecules
if (i+1) % 1000 == 0:
_print(f'Processed: {i+1:6d} / {n_all}...')
if not logging_print:
_print('\033[K', end='\n', flush=True)
_print(f'... successfully validated {n_all - count - n_inval} data '
f'points!', flush=True)
if invalid_list is not None:
_print(f'{n_inval} structures were removed because they are on the '
f'pre-computed list of invalid molecules!', flush=True)
if len(disc)+len(inval) > 0:
_print(f'CAUTION: Could not validate {len(disc)+len(inval)} additional '
f'molecules. You might want to increase the cutoff (currently '
f'{cutoff} angstrom) in order to have less disconnected structures. '
f'The molecules were removed and their indices are '
f'appended to the list of invalid molecules stored at '
f'{datapath.parent / (datapath.stem + f"_invalid.txt")}',
flush=True)
np.savetxt(datapath.parent / (datapath.stem + f'_invalid.txt'),
np.append(np.sort(list(invalid_list)), np.sort(inval + disc)),
fmt='%d')
elif remove_invalid:
_print(f'Identified {len(disc)} disconnected structures, and {len(inval)} '
f'invalid structures! You might want to increase the cutoff (currently '
f'{cutoff} angstrom) in order to have less disconnected structures.',
flush=True)
np.savetxt(datapath.parent / (datapath.stem + f'_invalid.txt'),
np.sort(inval + disc), fmt='%d')
_print('\nCompressing and storing statistics with numpy...')
np.savez_compressed(new_db_path.parent/(new_db_path.stem+f'_statistics.npz'),
stats=stats,
stat_heads=stat_heads)
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
_print(f'Done! Pre-processing needed {h:d}h{m:02d}m{s:02d}s.')
def get_connectivity(mol, cutoff=2.0):
'''
Write code to obtain a connectivity matrix given a molecule from your database
here. The simple default implementation calculates pairwise distances and then
uses a radial cutoff (e.g. 2 angstrom) to determine which atoms are labeled as
connected. The matrix only needs to be binary as it is only used to sample
generation traces, i.e. an order of atom placement steps for training.
However, one could for example also use chemoinformatics tools in order to obtain
bond order information and check the valence of provided structures on the run if
the structures allow this (we did this for our experiments with QM9 in order to
allow for comparison to related work, but we think that using a radial cutoff is
actually more robust and more general as it does not depend on usually unreliable
bond order assignment algorithms and can be used for all kinds of materials or
molecules).
Args:
mol (ase.Atoms): one molecule from the database
cutoff (float, optional): cutoff value in angstrom used to determine which
atoms are connected
Returns:
numpy.ndarray: the computed connectivity matrix (n_atoms x n_atoms, float)
numpy.ndarray: the computed pairwise distances in a condensed format
(length is n_atoms*(n_atoms-1)/2), see scipy.spatial.distance.pdist for
more information
'''
# retrieve positions
atom_positions = mol.get_positions()
# get pairwise distances (condensed)
pairwise_distances = pdist(atom_positions)
# use cutoff to obtain connectivity matrix (condensed format)
connectivity = np.array(pairwise_distances <= cutoff, dtype=float)
# cast to redundant square matrix format
connectivity = squareform(connectivity)
# set diagonal entries to zero (as we do not assume atoms to be their own neighbors)
connectivity[np.diag_indices_from(connectivity)] = 0
return connectivity, pairwise_distances
def is_disconnected(connectivity):
'''
Assess whether all atoms of a molecule are connected using a connectivity matrix
Args:
connectivity (numpy.ndarray): matrix (n_atoms x n_atoms) indicating bonds
between atoms
Returns
bool: True if the molecule consists of at least two disconnected graphs,
False if all atoms are connected by some path
'''
con_mat = connectivity
seen, queue = {0}, collections.deque([0]) # start at node (atom) 0
while queue:
vertex = queue.popleft()
# iterate over (bonded) neighbors of current node
for node in np.argwhere(con_mat[vertex] > 0).flatten():
# add node to queue and list of seen nodes if it has not been seen before
if node not in seen:
seen.add(node)
queue.append(node)
# if the seen nodes do not include all nodes, there are disconnected parts
return seen != {*range(len(con_mat))}
| Python |
3D | rhyan10/G-SchNetOE62 | utility_classes.py | .py | 39,030 | 956 | import operator
import re
import numpy as np
import openbabel as ob
import pybel
from multiprocessing import Process
from rdkit import Chem
from scipy.spatial.distance import squareform
class Molecule:
'''
Molecule class that allows to get statistics such as the connectivity matrix,
molecular fingerprint, canonical smiles representation, or ring count given
positions of atoms and their atomic numbers. Currently supports molecules made of
carbon, nitrogen, oxygen, fluorine, and hydrogen (such as in the QM9 benchmark
dataset). Mainly relies on routines from Open Babel and RdKit.
Args:
pos (numpy.ndarray): positions of atoms in euclidean space (n_atoms x 3)
atomic_numbers (numpy.ndarray): list with nuclear charge/type of each atom
(e.g. 1 for hydrogens, 6 for carbons etc.).
connectivity_matrix (numpy.ndarray, optional): optionally, a pre-calculated
connectivity matrix (n_atoms x n_atoms) containing the bond order between
atom pairs can be provided (default: None).
store_positions (bool, optional): set True to store the positions of atoms in
self.positions (only for convenience, not needed for computations, default:
False).
'''
type_infos = {1: {'name': 'H',
'n_bonds': 1},
3: {'name': 'Li',
'n_bonds': 1},
5: {'name': 'B',
'n_bonds': 3},
6: {'name': 'C',
'n_bonds': 4},
7: {'name': 'N',
'n_bonds': 3},
8: {'name': 'O',
'n_bonds': 2},
9: {'name': 'F',
'n_bonds': 1},
14: {'name':'Si',
'n_bonds': 4},
15: {'name': 'P',
'n_bonds': 5},
16: {'name': 'S',
'n_bonds': 6},
17: {'name': 'Cl',
'n_bonds': 1},
33: {'name': 'As',
'n_bonds': 5},
34: {'name': 'Se',
'n_bonds': 6},
35: {'name': 'Br',
'n_bonds': 1},
52: {'name': 'Te',
'n_bonds': 6},
53: {'name': 'I',
'n_bonds': 1},
}
type_charges = {'H': 1, 'Li':3,'B':5,'C': 6, 'N': 7, 'O': 8, 'F': 9,'Si':14,'P':15,'S':16,'Cl':17,'As':33,'Se':34,'Br':35,'Te':52,'I':53}
def __init__(self, pos, atomic_numbers, connectivity_matrix=None,
store_positions=False):
# set comparison metrics to None (will be computed just in time)
self._fp = None
self._fp_bits = None
self._can = None
self._mirror_can = None
self._inchi_key = None
self._inchi_string = None
self._bond_stats = None
self._fixed_connectivity = False
self._row_indices = {}
self._obmol = None
self._rings = None
self._n_atoms_per_type = None
self._connectivity = connectivity_matrix
# set statistics
self.n_atoms = len(pos)
self.numbers = atomic_numbers
self._unique_numbers = {*self.numbers} # set for fast query
self.positions = pos
if not store_positions:
self._obmol = self.get_obmol() # create obmol before removing pos
self.positions = None
def sanity_check(self):
'''
Check whether the sum of valence of all atoms can be divided by 2.
Returns:
bool: True if the test is passed, False otherwise
'''
count = 0
for atom in self.numbers:
count += self.type_infos[atom]['n_bonds']
if count % 2 == 0:
return True
else:
return False
def get_obmol(self):
'''
Retrieve the underlying Open Babel OBMol object.
Returns:
OBMol object: Open Babel OBMol representation
'''
if self._obmol is None:
if self.positions is None:
print('Error, cannot create obmol without positions!')
return
if self.numbers is None:
print('Error, cannot create obmol without atomic numbers!')
return
# use openbabel to infer bonds and bond order:
obmol = ob.OBMol()
obmol.BeginModify()
# set positions and atomic numbers of all atoms in the molecule
for p, n in zip(self.positions, self.numbers):
obatom = obmol.NewAtom()
obatom.SetAtomicNum(int(n))
obatom.SetVector(*p.tolist())
# infer bonds and bond order
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
obmol.EndModify()
self._obmol = obmol
return self._obmol
def get_fp(self):
'''
Retrieve the molecular fingerprint (the path-based FP2 from Open Babel is used,
which means that paths of length up to 7 are considered).
Returns:
pybel.Fingerprint object: moleculer fingerprint (use "fp1 | fp2" to
calculate the Tanimoto coefficient of two fingerprints)
'''
if self._fp is None:
# calculate fingerprint
self._fp = pybel.Molecule(self.get_obmol()).calcfp()
return self._fp
def get_fp_bits(self):
'''
Retrieve the bits set in the molecular fingerprint.
Returns:
Set of int: object containing the bits set in the molecular fingerprint
'''
if self._fp_bits is None:
self._fp_bits = {*self.get_fp().bits}
return self._fp_bits
def get_can(self):
'''
Retrieve the canonical SMILES representation of the molecule.
Returns:
String: canonical SMILES string
'''
if self._can is None:
# calculate canonical SMILES
self._can = pybel.Molecule(self.get_obmol()).write('can')
return self._can
def get_mirror_can(self):
'''
Retrieve the canonical SMILES representation of the mirrored molecule (the
z-coordinates are flipped).
Returns:
String: canonical SMILES string of the mirrored molecule
'''
if self._mirror_can is None:
# calculate canonical SMILES of mirrored molecule
self._flip_z() # flip z to mirror molecule using x-y plane
self._mirror_can = pybel.Molecule(self.get_obmol()).write('can')
self._flip_z() # undo mirroring
return self._mirror_can
def get_inchi_string(self):
'''
Retrieve the InChI string of the molecule.
Returns:
String: InChI string
'''
if self._inchi_string is None:
# calculate inchi string
self._inchi_string = pybel.Molecule(self.get_obmol()).\
write('inchi')
return self._inchi_string
def get_inchi_key(self):
'''
Retrieve the InChI-key of the molecule.
Returns:
String: InChI-key
'''
if self._inchi_key is None:
# calculate inchi key
self._inchi_key = pybel.Molecule(self.get_obmol()).\
write('inchikey')
return self._inchi_key
def _flip_z(self):
'''
Flips the z-coordinates of atom positions (to get a mirrored version of the
molecule).
'''
if self._obmol is None:
self.get_obmol()
for atom in ob.OBMolAtomIter(self._obmol):
x, y, z = atom.x(), atom.y(), atom.z()
atom.SetVector(x, y, -z)
self._obmol.ConnectTheDots()
self._obmol.PerceiveBondOrders()
def get_connectivity(self):
'''
Retrieve the connectivity matrix of the molecule.
Returns:
numpy.ndarray: (n_atoms x n_atoms) array containing the pairwise bond orders
between atoms (0 for no bond).
'''
if self._connectivity is None:
# get connectivity matrix
connectivity = np.zeros((self.n_atoms, len(self.numbers)))
for atom in ob.OBMolAtomIter(self.get_obmol()):
index = atom.GetIdx() - 1
# loop over all neighbors of atom
for neighbor in ob.OBAtomAtomIter(atom):
idx = neighbor.GetIdx() - 1
bond_order = neighbor.GetBond(atom).GetBO()
#print(f'{index}-{idx}: {bond_order}')
# do not count bonds between two hydrogen atoms
if (self.numbers[index] == 1 and self.numbers[idx] == 1
and bond_order > 0):
bond_order = 0
connectivity[index, idx] = bond_order
self._connectivity = connectivity
return self._connectivity
def get_ring_counts(self):
'''
Retrieve a list containing the sizes of rings in the symmetric smallest set
of smallest rings (S-SSSR from RdKit) in the molecule (e.g. [5, 6, 5] for two
rings of size 5 and one ring of size 6).
Returns:
List of int: list with ring sizes
'''
if self._rings is None:
# calculate symmetric SSSR with RdKit using the canonical smiles
# representation as input
can = self.get_can()
mol = Chem.MolFromSmiles(can)
# # Try calculating symmetric SSSR with RdKit using InChI instead of SMILES
# # to avoid incorrect SMILES representation.
# inchi = self.get_inchi_string()
# mol = Chem.MolFromInchi(inchi)
if mol is not None:
ssr = Chem.GetSymmSSSR(mol)
self._rings = [len(ssr[i]) for i in range(len(ssr))]
else:
self._rings = [] # cannot count rings
return self._rings
def get_ring_counts_and_aromaticity(self):
'''
Retrieve a dictionary of the number of rings of each size present in a
molecule, determined by OpenBabel's smallest set of smallest rings. Also
retrieve if these rings are considered to be aromatic.
'''
if self._obmol is None:
self.get_obmol()
ring_dict = {}
ring_list = self._obmol.GetSSSR()
for ring in ring_list:
ring_size = ring.Size()
ring_key = f'nrings_{ring_size}'
if ring_key not in ring_dict.keys():
ring_dict[ring_key] = 1
else:
ring_dict[ring_key] += 1
ring_aro = ring.IsAromatic()
if ring_aro:
ring_key = f'nrings_{ring_size}_aromatic'
if ring_key not in ring_dict.keys():
ring_dict[ring_key] = 1
else:
ring_dict[ring_key] += 1
return ring_dict
def get_aromaticity(self):
'''
Retrieve the percentage of atoms in the molecule (excluding hydrogens) which
are perceived as aromatic by OpenBabel. Also determine the number of atoms
of each element type which are aromatic.
Returns:
Tuple of float and dict.
'''
if self._obmol is None:
self.get_obmol()
natoms = 0.0
natoms_aro = 0.0
aro_dict = {}
for atom in ob.OBMolAtomIter(self._obmol):
atom_number = atom.GetAtomicNum()
atom_elem = self.type_infos[atom_number]['name']
if atom_elem != 'H':
natoms += 1.0
if atom.IsAromatic():
natoms_aro += 1.0
aro_key = f'n_aromatic_{atom_elem}'
if aro_key in aro_dict.keys():
aro_dict[aro_key] += 1
else:
aro_dict[aro_key] = 1
aro_percent = (natoms_aro/natoms)*100
return aro_percent, aro_dict
def get_n_atoms_per_type(self):
'''
Retrieve the number of atoms in the molecule per type.
Returns:
numpy.ndarray: number of atoms in the molecule per type, where the order
corresponds to the order specified in Molecule.type_infos
'''
if self._n_atoms_per_type is None:
_types = np.array(list(self.type_infos.keys()), dtype=int)
self._n_atoms_per_type =\
np.bincount(self.numbers, minlength=np.max(_types)+1)[_types]
return self._n_atoms_per_type
def remove_unpicklable_attributes(self, restorable=True):
'''
Some attributes of the class cannot be processed by pickle. This method
allows to remove these attributes prior to pickling.
Args:
restorable (bool, optional): Set True to allow restoring the deleted
attributes later on (default: True)
'''
# set attributes which are not picklable (SwigPyObjects) to None
if restorable and self.positions is None and self._obmol is not None:
# store positions to allow restoring obmol object later on
pos = [atom.coords for atom in pybel.Molecule(self._obmol).atoms]
self.positions = np.array(pos)
self._obmol = None
self._fp = None
def tanimoto_similarity(self, other_mol, use_bits=True):
'''
Get the Tanimoto (fingerprint) similarity to another molecule.
Args:
other_mol (Molecule or pybel.Fingerprint/list of bits set):
representation of the second molecule (if it is not a Molecule object,
it needs to be a pybel.Fingerprint if use_bits is False and a list of bits
set in the fingerprint if use_bits is True).
use_bits (bool, optional): set True to calculate Tanimoto similarity
from bits set in the fingerprint (default: True)
Returns:
float: Tanimoto similarity to the other molecule
'''
if use_bits:
a = self.get_fp_bits()
b = other_mol.get_fp_bits() if isinstance(other_mol, Molecule) \
else other_mol
n_equal = len(a.intersection(b))
if len(a) + len(b) == 0: # edge case with no set bits
return 1.
return n_equal / (len(a)+len(b)-n_equal)
else:
fp_other = other_mol.get_fp() if isinstance(other_mol, Molecule)\
else other_mol
return self.get_fp() | fp_other
def _update_bond_orders(self, idc_lists):
'''
Updates the bond orders in the underlying OBMol object.
Args:
idc_lists (list of list of int): nested list containing bonds, i.e. pairs
of row indices (list1) and column indices (list2) which shall be updated
'''
con_mat = self.get_connectivity()
self._obmol.BeginModify()
for i in range(len(idc_lists[0])):
idx1 = idc_lists[0][i]
idx2 = idc_lists[1][i]
obbond = self._obmol.GetBond(int(idx1+1), int(idx2+1))
obbond.SetBO(int(con_mat[idx1, idx2]))
self._obmol.EndModify()
# reset fingerprints etc
self._fp = None
self._can = None
self._mirror_can = None
self._inchi_key = None
def get_fixed_connectivity(self, recursive_call=False):
'''
Attempts to fix the connectivity matrix using some heuristics (as some valid
QM9 molecules do not pass the valency check using the connectivity matrix
obtained with Open Babel, which seems to have problems with assigning correct
bond orders to aromatic rings containing Nitrogen).
Args:
recursive_call (bool, do not set True): flag that indicates a recursive
call (used internally, do not set to True)
Returns:
numpy.ndarray: (n_atoms x n_atoms) array containing the pairwise bond orders
between atoms (0 for no bond) after the attempted fix.
'''
# if fix has already been attempted, return the connectivity matrix
if self._fixed_connectivity:
return self._connectivity
# define helpers:
# increases bond order between two atoms in connectivity matrix
def increase_bond(con_mat, idx1, idx2):
con_mat[idx1, idx2] += 1
con_mat[idx2, idx1] += 1
return con_mat
# decreases bond order between two atoms in connectivity matrix
def decrease_bond(con_mat, idx1, idx2):
con_mat[idx1, idx2] -= 1
con_mat[idx2, idx1] -= 1
return con_mat
# returns only the rows of the connectivity matrix corresponding to atoms of
# certain types (and the indices of these atoms)
def get_typewise_connectivity(con_mat, types):
idcs = []
for type in types:
idcs += list(self._get_row_idcs(type))
return con_mat[idcs], np.array(idcs).astype(int)
# store old connectivity matrix for later comparison
old_mat = self.get_connectivity().copy()
# get connectivity matrix and find indices of N and C atoms
con_mat = self.get_connectivity()
if 6 not in self._unique_numbers and 7 not in self._unique_numbers:
# do not attempt fixing if there is no carbon and no nitrogen
return con_mat
N_mat, N_idcs = get_typewise_connectivity(con_mat, [7])
C_mat, C_idcs = get_typewise_connectivity(con_mat, [6])
NC_idcs = np.hstack((N_idcs, C_idcs)) # indices of all N and C atoms
NC_valences = self._get_valences()[NC_idcs] # array with valency constraints
# return connectivity if valency constraints of N and C atoms are already met
if np.all(np.sum(con_mat[NC_idcs], axis=1) == NC_valences):
return con_mat
# if a C or N atom is "overcharged" (total bond order too high) we decrease
# double to single bonds between N-N or N-C until it is not overcharged anymore
# (e.g. C=N=C -> C=N-C)
if 7 in self._unique_numbers: # only necessary if molecule contains N
for cur in NC_idcs:
type = self.numbers[cur]
if np.sum(con_mat[cur]) <= self.type_infos[type]['n_bonds']:
continue
if type == 6: # for carbon look only at nitrogen neighbors
neighbors = self._get_neighbors(cur, types=[7], strength=2)
else:
neighbors = self._get_neighbors(cur, types=[6, 7],
strength=2)
for neighbor in neighbors:
con_mat = decrease_bond(con_mat, cur, neighbor)
self._connectivity = con_mat
if np.sum(con_mat[cur]) == \
self.type_infos[type]['n_bonds']:
break
# get updated partial connectivity matrices for N and C
N_mat, _ = get_typewise_connectivity(con_mat, [7])
C_mat, _ = get_typewise_connectivity(con_mat, [6])
# increase total number of bonds by transferring the strength of a
# double C-N bond to two neighboring bonds, if the involved atoms
# are not yet saturated (e.g. H2C-H2C=N-H2C -> H2C=H2C-N=H2C)
if (np.sum(N_mat) < len(N_idcs) * 3 or np.sum(C_mat) < len(C_idcs) * 4) \
and 7 in self._unique_numbers:
for cur in NC_idcs:
type = self.numbers[cur]
if sum(con_mat[cur]) >= self.type_infos[type]['n_bonds']:
continue
CN_nbors = self._get_CN_neighbors(cur)
for nbor_1, nbor_2 in CN_nbors:
if con_mat[nbor_1, nbor_2] <= 1:
continue
else:
nbor_2_nbors = np.where(con_mat[nbor_2] == 1)[0]
for nbor_2_nbor in nbor_2_nbors:
nbor_2_nbor_type = self.numbers[nbor_2_nbor]
if (np.sum(con_mat[nbor_2_nbor]) <
self.type_infos[nbor_2_nbor_type]['n_bonds']):
con_mat = increase_bond(con_mat, cur, nbor_1)
con_mat = increase_bond(con_mat, nbor_2, nbor_2_nbor)
con_mat = decrease_bond(con_mat, nbor_1, nbor_2)
self._connectivity = con_mat
# increase bond strength between two undercharged neighbors C-N,
# C-C or N-N (e.g HN-CH2 -> HN=CH2, starting from those atoms with least
# available neighbors if there are multiple undercharged neighbors)
undercharged_pairs = True
while (undercharged_pairs):
NC_charges = np.sum(con_mat[NC_idcs], axis=1)
undercharged = NC_idcs[np.where(NC_charges < NC_valences)[0]]
partial_con_mat = con_mat[undercharged][:, undercharged]
# if non of the undercharged atoms are neighbors, stop
if np.sum(partial_con_mat) == 0:
break
# sort by number of undercharged neighbors
n_nbors = np.sum(partial_con_mat > 0, axis=0)
# mask indices with zero undercharged neighbors to ignore them when sorting
n_nbors[np.where(n_nbors == 0)[0]] = 1000
cur = np.argmin(n_nbors)
cur_nbor = np.where(partial_con_mat[cur] > 0)[0][0]
con_mat = increase_bond(con_mat, undercharged[cur], undercharged[cur_nbor])
self._connectivity = con_mat
# if the molecule still is not valid, try to flip double bonds if an atom
# forms a double bond and has at least one other neighbor that has too few bonds
# (e.g. C-N=C -> C=N-C) and repeat above heuristics with a recursive call of
# this function
if not recursive_call and \
not np.all(np.sum(con_mat[NC_idcs], axis=1) == NC_valences):
changed = False
candidates = np.where(np.any(con_mat[NC_idcs][:, NC_idcs] == 2, axis=0))[0]
for cand in NC_idcs[candidates]:
if np.sum(con_mat[cand, NC_idcs] == 2) == 0:
continue
NC_charges = np.sum(con_mat[NC_idcs], axis=1)
undercharged = NC_charges < NC_valences
uc_neighbors = np.logical_and(con_mat[cand, NC_idcs] == 1, undercharged)
if np.any(uc_neighbors):
uc_neighbor = NC_idcs[np.where(uc_neighbors)[0][0]]
oc_neighbor = NC_idcs[
np.where(con_mat[cand, NC_idcs] == 2)[0][0]]
con_mat = increase_bond(con_mat, cand, uc_neighbor)
con_mat = decrease_bond(con_mat, cand, oc_neighbor)
self._connectivity = con_mat
changed = True
if changed:
self._connectivity = self.get_fixed_connectivity(
recursive_call=True)
# store that fixing the connectivity matrix has already been attempted
if not recursive_call:
self._fixed_connectivity = True
if np.any(old_mat != self._connectivity):
# update bond orders in underlying OBMol object (where they changed)
self._update_bond_orders(np.where(old_mat != self._connectivity))
return self._connectivity
def _get_valences(self):
'''
Retrieve the valency constraints of all atoms in the molecule.
Returns:
numpy.ndarray: valency constraints (one per atom)
'''
valence = []
for atom in self.numbers:
valence += [self.type_infos[atom]['n_bonds']]
return np.array(valence)
def _get_CN_neighbors(self, idx):
'''
For a focus atom of type K returns indices of atoms C (carbon) and N (nitrogen)
on two-step paths of the form K-C-N (and K-C-C only for K=N since one atom
needs to be nitrogen).
Args:
idx (int): the index of the focus atom from which paths are examined
Returns:
list of lists: list1[i] contains an index of a direct neighbor of the
focus atom and list2[i] contains the index of a second neighbor on the
i-th identified two-step path
'''
con_mat = self.get_connectivity()
nbors = con_mat[idx] > 0
C_nbors = np.where(np.logical_and(self.numbers == 6, nbors))[0]
type = self.numbers[idx]
# mask types to exclude idx from neighborhood
_numbers = self.numbers.copy()
_numbers[idx] = 0
CN_nbors = np.where(np.logical_and(_numbers == 7, con_mat[C_nbors] > 0))
CN_nbors = [(C_nbors[CN_nbors[0][i]], CN_nbors[1][i])
for i in range(len(CN_nbors[0]))]
if type == 7: # for N atoms, also add C-C neighbors
CC_nbors = np.where(np.logical_and(
_numbers == 6, con_mat[C_nbors] > 0))
CC_nbors = [
(C_nbors[CC_nbors[0][i]], CC_nbors[1][i])
for i in range(len(CC_nbors[0]))]
CN_nbors += CC_nbors
return CN_nbors
def _get_neighbors(self, idx, types=None, strength=1):
'''
Retrieve the indices of neighbors of an atom.
Args:
idx (int): index of the atom
types (list of int, optional): restrict the returned neighbors to
contain only atoms of the specified types (set None to apply no type
filter, default: None)
strength (int, optional): restrict the returned neighbors to contain
only atoms with a certain minimal bond order to the atom at idx
(default: 1)
Returns:
list of int: indices of all neighbors that meet the requirements
'''
con_mat = self.get_connectivity()
neighbors = con_mat[idx] >= strength
if types is not None:
type_arr = np.zeros(len(neighbors)).astype(bool)
for type in types:
type_arr = np.logical_or(type_arr, self.numbers == type)
return np.where(np.logical_and(neighbors, type_arr))[0]
def get_bond_stats(self, ring_analysis='RDKit'):
'''
Retrieve the bond and ring count of the molecule. The bond count is
calculated for every pair of types (e.g. C1N are all single bonds between
carbon and nitrogen atoms in the molecule, C2N are all double bonds between
such atoms etc.). The ring count is provided for rings from size 3 to 8 (R3,
R4, ..., R8) and for rings greater than size eight (R>8).
Returns:
dict (str->int): bond and ring counts
'''
if self._bond_stats is None:
# 1st analyze bonds
unique_types = np.sort(list(self._unique_numbers))
# get connectivity and read bonds from matrix
con_mat = self.get_connectivity()
d = {}
for i, type1 in enumerate(unique_types):
row_idcs = self._get_row_idcs(type1)
n_bonds1 = self.type_infos[type1]['n_bonds']
for type2 in unique_types[i:]:
col_idcs = self._get_row_idcs(type2)
n_bonds2 = self.type_infos[type2]['n_bonds']
max_bond_strength = min(n_bonds1, n_bonds2)
if n_bonds1 == n_bonds2: # exclude small trivial molecules
max_bond_strength -= 1
for n in range(1, max_bond_strength + 1):
id = self.type_infos[type1]['name'] + str(n) + \
self.type_infos[type2]['name']
d[id] = np.sum(con_mat[row_idcs][:, col_idcs] == n)
if type1 == type2:
d[id] = int(d[id]/2) # remove twice counted bonds
# 2nd analyze rings
if ring_analysis == 'RDKit':
ring_counts = self.get_ring_counts()
if len(ring_counts) > 0:
ring_counts = np.bincount(np.array(ring_counts))
n_bigger_8 = 0
for i in np.nonzero(ring_counts)[0]:
if i < 9:
d[f'R{i}'] = ring_counts[i]
else:
n_bigger_8 += ring_counts[i]
if n_bigger_8 > 0:
d[f'R>8'] = n_bigger_8
elif ring_analysis == 'OpenBabel':
ring_dict = self.get_ring_counts_and_aromaticity()
d.update(ring_dict)
self._bond_stats = d
return self._bond_stats
def _get_row_idcs(self, type):
'''
Retrieve the indices of all atoms in the molecule corresponding to a selected
type.
Args:
type (int): the atom type (atomic number, e.g. 6 for carbon)
Returns:
list of int: indices of all atoms with the selected type
'''
if type not in self._row_indices:
self._row_indices[type] = np.where(self.numbers == type)[0]
return self._row_indices[type]
class ConnectivityCompressor():
'''
Utility class that provides methods to compress and decompress connectivity
matrices.
'''
def __init__(self):
pass
def compress(self, connectivity_matrix):
'''
Compresses a single connectivity matrix.
Args:
connectivity_matrix (numpy.ndarray): array (n_atoms x n_atoms)
containing the bond orders of bonds between atoms of a molecule
Returns:
dict (str/int->int): the length of the non-redundant connectivity
matrix (list with upper triangular part) and the indices of that list for
bond orders > 0
'''
smaller = squareform(connectivity_matrix) # get list of upper triangular part
d = {'n_entries': len(smaller)} # store length of list
for i in np.unique(smaller).astype(int): # store indices per bond order > 0
if i > 0:
d[int(i)] = np.where(smaller == i)[0]
return d
def decompress(self, idcs_dict):
'''
Retrieve the full (n_atoms x n_atoms) connectivity matrix from compressed
format.
Args:
idcs_dict (dict str/int->int): compressed connectivity matrix
(obtained with the compress method)
Returns:
numpy.ndarray: full connectivity matrix as an array of shape (n_atoms x
n_atoms)
'''
n_entries = idcs_dict['n_entries']
con_mat = np.zeros(n_entries)
for i in idcs_dict:
if isinstance(i, int) or i.isdigit():
con_mat[idcs_dict[i]] = int(i)
return squareform(con_mat)
def compress_batch(self, connectivity_batch):
'''
Compress a batch of connectivity matrices.
Args:
connectivity_batch (list of numpy.ndarray): list of connectivity matrices
Returns:
list of dict: batch of compressed connectivity matrices (see compress)
'''
dict_list = []
for matrix in connectivity_batch:
dict_list += [self.compress(matrix)]
return dict_list
def decompress_batch(self, idcs_dict_batch):
'''
Retrieve a list of full connectivity matrices from a batch of compressed
connectivity matrices.
Args:
idcs_dict_batch (list of dict): list with compressed connectivity
matrices
Return:
list numpy.ndarray: batch of full connectivity matrices (see decompress)
'''
matrix_list = []
for idcs_dict in idcs_dict_batch:
matrix_list += [self.decompress(idcs_dict)]
return matrix_list
class IndexProvider():
'''
Class which allows to filter a large set of molecules for desired structures
according to provided statistics. The filtering is done using a selection string
of the general format 'Statistics_nameDelimiterOperatorTarget_value'
(e.g. 'C,>8' to filter for all molecules with more than eight carbon atoms where
'C' is the statistic counting the number of carbon atoms in a molecule, ',' is the
delimiter, '>' is the operator, and '8' is the target value).
Args:
statistics (numpy.ndarray):
statistics of all molecules where columns correspond to molecules and rows
correspond to available statistics (n_statistics x n_molecules)
row_headlines (numpy.ndarray):
the names of the statistics stored in each row (e.g. 'F' for the number of
fluorine atoms or 'R5' for the number of rings of size 5)
default_filter (str, optional):
the default behaviour of the filter if no operator and target value are
given (e.g. filtering for 'F' will give all molecules with at least 1
fluorine atom if default_filter='>0' or all molecules with exactly 2
fluorine atoms if default_filter='==2', default: '>0')
delimiter (str, optional):
the delimiter used to separate names of statistics from the operator and
target value in the selection strings (default: ',')
'''
# dictionary mapping strings of available operators to corresponding function:
op_dict = {'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'>=': operator.ge}
rel_re = re.compile('<=|<|={1,2}|!=|>=|>') # regular expression for operators
num_re = re.compile('[\-]*[0-9]+[.]*[0-9]*') # regular expression for target values
def __init__(self, statistics, row_headlines, default_filter='>0', delimiter=','):
self.statistics = np.array(statistics)
self.headlines = list(row_headlines)
self.default_relation = self.rel_re.search(default_filter).group(0)
self.default_number = float(self.num_re.search(default_filter).group(0))
self.delimiter = delimiter
def get_selected(self, selection_str, idcs=None):
'''
Retrieve the indices of all molecules which fulfill the selection criteria.
The selection string is of the general format
'Statistics_nameDelimiterOperatorTarget_value' (e.g. 'C,>8' to filter for all
molecules with more than eight carbon atoms where 'C' is the statistic counting
the number of carbon atoms in a molecule, ',' is the delimiter, '>' is the
operator, and '8' is the target value).
The following operators are available:
'<'
'<='
'=='
'!='
'>='
'>'
The target value can be any positive or negative integer or float value.
Multiple statistics can be summed using '+' (e.g. 'F+N,=0' gives all
molecules that have no fluorine and no nitrogen atoms).
Multiple filters can be concatenated using '&' (e.g. 'H,>8&C,=5' gives all
molecules that have more than 8 hydrogen atoms and exactly 5 carbon atoms).
Args:
selection_str (str): string describing the criterion(s) for filtering (build
as described above)
idcs (numpy.ndarray, optional): if provided, only this subset of all
molecules is filtered for structures fulfilling the selection criteria
Returns:
list of int: indices of all the molecules in the dataset that fulfill the
selection criterion(s)
'''
delimiter = self.delimiter
if idcs is None:
idcs = np.arange(len(self.statistics[0])) # take all to begin with
criterions = selection_str.split('&') # split criteria
for criterion in criterions:
rel_strs = criterion.split(delimiter)
# add multiple statistics if specified
heads = rel_strs[0].split('+')
statistics = self.statistics[self.headlines.index(heads[0])][idcs]
for head in heads[1:]:
statistics += self.statistics[self.headlines.index(head)][idcs]
if len(rel_strs) == 1:
relation = self.op_dict[self.default_relation](
statistics, self.default_number)
elif len(rel_strs) == 2:
rel = self.rel_re.search(rel_strs[1]).group(0)
num = float(self.num_re.search(rel_strs[1]).group(0))
relation = self.op_dict[rel](statistics, num)
new_idcs = np.where(relation)[0]
idcs = idcs[new_idcs]
return idcs
class ProcessQ(Process):
'''
Multiprocessing.Process class that runs a provided function using provided
(keyword) arguments and puts the result into a provided Multiprocessing.Queue
object (such that the result of the function can easily be obtained by the host
process).
Args:
queue (Multiprocessing.Queue): the queue into which the results of running
the target function will be put (the object in the queue will be a tuple
containing the provided name as first entry and the function return as
second entry).
name (str): name of the object (is returned as first value in the tuple put
into the queue.
target (callable object): the function that is executed in the process's run
method
args (list of any): sequential arguments target is called with
kwargs (dict (str->any)): keyword arguments target is called with
'''
def __init__(self, queue, name=None, target=None, args=(), kwargs={}):
super(ProcessQ, self).__init__(None, target, name, args, kwargs)
self._name = name
self._q = queue
self._target = target
self._args = args
self._kwargs = kwargs
def run(self):
'''
Method representing the process's activity.
Invokes the callable object passed as the target argument, if any, with
sequential and keyword arguments taken from the args and kwargs arguments,
respectively. Puts the string passed as name argument and the returned result
of the callable object into the queue as (name, result).
'''
if self._target is not None:
res = (self.name, self._target(*self._args, **self._kwargs))
self._q.put(res)
| Python |
3D | rhyan10/G-SchNetOE62 | template_filter_generated.py | .py | 29,634 | 620 |
import numpy as np
import pickle
import os
import argparse
import time
import collections
from scipy.spatial.distance import pdist
from schnetpack import Properties
from utility_classes import Molecule, ConnectivityCompressor
from utility_functions import update_dict
from ase import Atoms
from ase.db import connect
import ase
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
main_parser.add_argument('data_path',
help='Path to generated molecules in .mol_dict format, '
'a database called "generated_molecules.db" with the '
'filtered molecules along with computed statistics '
'("generated_molecules_statistics.npz") will be '
'stored in the same directory as the input file/s '
'(if the path points to a directory, all .mol_dict '
'files in the directory will be merged and filtered '
'in one pass)')
main_parser.add_argument('--valence',
default=[1,1,3,1, 5,3, 6,4, 7,3, 8,2, 9,1, 14,4, 15,5, 16,6, 17,1, 33,5, 34,6, 35,1, 52,6, 53,1], type=int,
nargs='+',
help='the valence of atom types in the form '
'[type1 valence type2 valence ...] '
'(default: %(default)s)')
main_parser.add_argument('--filters', type=str, nargs='*',
default=['valence', 'disconnected', 'unique'],
choices=['valence', 'disconnected', 'unique'],
help='Select the filters applied to identify '
'invalid molecules (default: %(default)s)')
main_parser.add_argument('--store', type=str, default='valid',
choices=['all', 'valid'],
help='How much information shall be stored '
'after filtering: \n"all" keeps all '
'generated molecules and statistics, '
'\n"valid" keeps only valid molecules'
'(default: %(default)s)')
main_parser.add_argument('--print_file',
help='Use to limit the printing if results are '
'written to a file instead of the console ('
'e.g. if running on a cluster)',
action='store_true')
return main_parser
def _get_atoms_per_type_str(mol):
'''
Get a string representing the atomic composition of a molecule (i.e. the number
of atoms per type in the molecule, e.g. H2C3O1, where the order of types is
determined by increasing nuclear charge).
Args:
mol (utility_classes.Molecule or numpy.ndarray: the molecule (or an array of
its atomic numbers)
Returns:
str: the atomic composition of the molecule
'''
if isinstance(mol, Molecule):
n_atoms_per_type = mol.get_n_atoms_per_type()
else:
# assume atomic numbers were provided
n_atoms_per_type = np.bincount(mol, minlength=10)[
np.array(list(Molecule.type_infos.keys()), dtype=int)]
s = ''
for t, n in zip(Molecule.type_infos.keys(), n_atoms_per_type):
s += f'{Molecule.type_infos[t]["name"]}{int(n):d}'
return s
def _update_dict(old_dict, **kwargs):
'''
Update an existing dictionary (any->list of any) with new entries where the new
values are either appended to the existing lists if the corresponding key already
exists in the dictionary or a new list under the new key is created.
Args:
old_dict (dict (any->list of any)): original dictionary that shall be updated
**kwargs: keyword arguments that can either be a dictionary of the same format
as old_dict (new_dict=dict (any->list of any)) which will be merged into
old_dict or a single key-value pair that shall be added (key=any, val=any)
Returns:
dict (any->list of any): the updated dictionary
'''
if 'new_dict' in kwargs:
for key in kwargs['new_dict']:
if key in old_dict:
old_dict[key] += kwargs['new_dict'][key]
else:
old_dict[key] = kwargs['new_dict'][key]
if 'val' in kwargs and 'key' in kwargs:
if kwargs['key'] in old_dict:
old_dict[kwargs['key']] += [kwargs['val']]
else:
old_dict[kwargs['key']] = [kwargs['val']]
return old_dict
def remove_disconnected(connectivity_batch, valid=None):
'''
Identify structures which are actually more than one molecule (as they consist of
disconnected structures) and mark them as invalid.
Args:
connectivity_batch (numpy.ndarray): batch of connectivity matrices
valid (numpy.ndarray, optional): array of the same length as connectivity_batch
which flags molecules as valid, if None all connectivity matrices are
considered to correspond to valid molecules in the beginning (default:
None)
Returns:
dict (str->numpy.ndarray): a dictionary containing an array which marks
molecules as valid under the key 'valid' (identified disconnected
structures will now be marked as invalid in contrast to the flag in input
argument valid)
'''
if valid is None:
valid = np.ones(len(connectivity_batch), dtype=bool)
# find disconnected parts for every given connectivity matrix
for i, con_mat in enumerate(connectivity_batch):
# only work with molecules categorized as valid
if not valid[i]:
continue
seen, queue = {0}, collections.deque([0])
while queue:
vertex = queue.popleft()
for node in np.argwhere(con_mat[vertex] > 0).flatten():
if node not in seen:
seen.add(node)
queue.append(node)
# if the seen nodes do not include all nodes, there are disconnected
# parts and the molecule is invalid
if seen != {*range(len(con_mat))}:
valid[i] = False
return {'valid': valid}
def check_valency(positions, numbers, valence, filter_by_valency=True,
print_file=True, prog_str=None, picklable_mols=False):
'''
Build utility_classes.Molecule objects from provided atom positions and types
of a set of molecules and assess whether they are meeting the valency
constraints or not (i.e. all of their atoms have the correct number of bonds).
Note that all input molecules need to have the same number of atoms.
Args:
positions (list of numpy.ndarray): list of positions of atoms in euclidean
space (n_atoms x 3) for each molecule
numbers (numpy.ndarray): list of nuclear charges/types of atoms
(e.g. 1 for hydrogens, 6 for carbons etc.) for each molecule
valence (numpy.ndarray): list of valency of each atom type where the index in
the list corresponds to the type (e.g. [0, 1, 0, 0, 0, 0, 2, 3, 4, 1] for
qm9 molecules as H=type 1 has valency of 1, O=type 6 has valency of 2,
N=type 7 has valency of 3 etc.)
filter_by_valency (bool, optional): whether molecules that fail the valency
check should be marked as invalid, else all input molecules will be
classified as valid but the connectivity matrix is still computed and
returned (default: True)
print_file (bool, optional): set True to suppress printing of progress string
(default: True)
prog_str (str, optional): specify a custom progress string (default: None)
picklable_mols (bool, optional): set True to remove all the information in
the returned list of utility_classes.Molecule objects that can not be
serialized with pickle (e.g. the underlying Open Babel ob.Mol object,
default: False)
Returns:
dict (str->list/numpy.ndarray): a dictionary containing a list of
utility_classes.Molecule ojbects under the key 'mols', a numpy.ndarray with
the corresponding (n_atoms x n_atoms) connectivity matrices under the key
'connectivity', and a numpy.ndarray (key 'valid') that marks whether a
molecule has passed (entry=1) or failed (entry=0) the valency check if
filter_by_valency is True (otherwise it will be 1 everywhere)
'''
n_atoms = len(numbers[0])
n_mols = len(numbers)
thresh = n_mols if n_mols < 30 else 30
connectivity = np.zeros((len(positions), n_atoms, n_atoms))
valid = np.ones(len(positions), dtype=bool)
mols = []
for i, (pos, num) in enumerate(zip(positions, numbers)):
mol = Molecule(pos, num, store_positions=False)
con_mat = mol.get_connectivity()
random_ord = range(len(pos))
# filter incorrect valence if desired
if filter_by_valency:
nums = num
# try to fix connectivity if it isn't correct already
for _ in range(10):
if np.all(np.sum(con_mat, axis=0) == valence[nums]):
val = True
break
else:
val = False
con_mat = mol.get_fixed_connectivity()
if np.all(
np.sum(con_mat, axis=0) == valence[nums]):
val = True
break
random_ord = np.random.permutation(range(len(pos)))
mol = Molecule(pos[random_ord], num[random_ord])
con_mat = mol.get_connectivity()
nums = num[random_ord]
valid[i] = val
if ((i + 1) % thresh == 0) and not print_file \
and prog_str is not None:
print('\033[K', end='\r', flush=True)
print(f'{prog_str} ({100 * (i + 1) / n_mols:.2f}%)',
end='\r', flush=True)
# reverse random order and save fixed connectivity matrix
rand_ord_rev = np.argsort(random_ord)
connectivity[i] = con_mat[rand_ord_rev][:, rand_ord_rev]
if picklable_mols:
mol.get_fp_bits()
mol.get_can()
mol.get_mirror_can()
mol.remove_unpicklable_attributes(restorable=False)
mols += [mol]
return {'mols': mols, 'connectivity': connectivity, 'valid': valid}
def remove_disconnected(connectivity_batch, valid=None):
'''
Identify structures which are actually more than one molecule (as they consist of
disconnected structures) and mark them as invalid.
Args:
connectivity_batch (numpy.ndarray): batch of connectivity matrices
valid (numpy.ndarray, optional): array of the same length as connectivity_batch
which flags molecules as valid, if None all connectivity matrices are
considered to correspond to valid molecules in the beginning (default:
None)
Returns:
dict (str->numpy.ndarray): a dictionary containing an array which marks
molecules as valid under the key 'valid' (identified disconnected
structures will now be marked as invalid in contrast to the flag in input
argument valid)
'''
if valid is None:
valid = np.ones(len(connectivity_batch), dtype=bool)
# find disconnected parts for every given connectivity matrix
for i, con_mat in enumerate(connectivity_batch):
# only work with molecules categorized as valid
if not valid[i]:
continue
seen, queue = {0}, collections.deque([0])
while queue:
vertex = queue.popleft()
for node in np.argwhere(con_mat[vertex] > 0).flatten():
if node not in seen:
seen.add(node)
queue.append(node)
# if the seen nodes do not include all nodes, there are disconnected
# parts and the molecule is invalid
if seen != {*range(len(con_mat))}:
valid[i] = False
return {'valid': valid}
def filter_unique(mols, valid=None, use_bits=False):
'''
Identify duplicate molecules among a large amount of generated structures.
The first found structure of each kind is kept as valid original and all following
duplicating structures are marked as invalid (the molecular fingerprint and
canonical smiles representation is used which means that different spatial
conformers of the same molecular graph cannot be distinguished).
Args:
mols (list of utility_classes.Molecule): list of all generated molecules
valid (numpy.ndarray, optional): array of the same length as mols which flags
molecules as valid (invalid molecules are not considered in the comparison
process), if None, all molecules in mols are considered as valid (default:
None)
use_bits (bool, optional): set True to use the list of non-zero bits instead of
the pybel.Fingerprint object when comparing molecules (results are
identical, default: False)
Returns:
numpy.ndarray: array of the same length as mols which flags molecules as
valid (identified duplicates are now marked as invalid in contrast to the
flag in input argument valid)
numpy.ndarray: array of length n_mols where entry i is -1 if molecule i is
an original structure (not a duplicate) and otherwise it is the index j of
the original structure that molecule i duplicates (j<i)
numpy.ndarray: array of length n_mols that is 0 for all duplicates and the
number of identified duplicates for all original structures (therefore
the sum over this array is the total number of identified duplicates)
'''
if valid is None:
valid = np.ones(len(mols), dtype=bool)
else:
valid = valid.copy()
accepted_dict = {}
duplicating = -np.ones(len(mols), dtype=int)
duplicate_count = np.zeros(len(mols), dtype=int)
for i, mol1 in enumerate(mols):
if not valid[i]:
continue
mol_key = _get_atoms_per_type_str(mol1)
found = False
if mol_key in accepted_dict:
for j, mol2 in accepted_dict[mol_key]:
# compare fingerprints and canonical smiles representation
if mol1.tanimoto_similarity(mol2, use_bits=use_bits) >= 1:
if (mol1.get_can() == mol2.get_can()
or mol1.get_can() == mol2.get_mirror_can()):
found = True
valid[i] = False
duplicating[i] = j
duplicate_count[j] += 1
break
if not found:
accepted_dict = _update_dict(accepted_dict, key=mol_key, val=(i, mol1))
return valid, duplicating, duplicate_count
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
print_file = args.print_file
printed_todos = False
print(args.data_path)
# read input file or fuse dictionaries if data_path is a folder
if not os.path.isdir(args.data_path):
if not os.path.isfile(args.data_path):
print(f'\n\nThe specified data path ({args.data_path}) is neither a file '
f'nor a directory! Please specify a different data path.')
raise FileNotFoundError
else:
# print(args.data_path)
with open(args.data_path, 'rb') as f:
res = pickle.load(f) # read input file
# with open("./models/model1/generated/generated.mol_dict", 'rb') as f:
# res2 = pickle.load(f)
# for i in res2.keys():
# try:
# res[i]['_positions'] = np.array(list(res[i]['_positions']) + list(res2[i]['_positions']))
# res[i]['_atomic_numbers'] = np.array(list(res[i]['_atomic_numbers']) + list(res2[i]['_atomic_numbers']))
# except:
# res[i] = {}
# res[i]['_positions'] = list(res2[i]['_positions'])
# res[i]['_atomic_numbers'] = list(res2[i]['_atomic_numbers'])
#print(len(res)))
target_db = os.path.join(os.path.dirname(args.data_path),
'generated_molecules.db')
else:
print("here")
print(f'\n\nFusing .mol_dict files in folder {args.data_path}...')
mol_files = [f for f in os.listdir(args.data_path)
if f.endswith(".mol_dict")]
if len(mol_files) == 0:
print(f'Could not find any .mol_dict files at {args.data_path}! Please '
f'specify a different data path!')
raise FileNotFoundError
res = {}
res2 = {}
for file in mol_files:
with open(os.path.join(args.data_path, file), 'rb') as f:
cur_res = pickle.load(f)
update_dict(res, cur_res)
with open("./models/model1/generated/generated.mol_dict", 'rb') as f:
cur_res2 = pickle.load(f)
update_dict(res2, cur_res2)
target_db = os.path.join(args.data_path, 'generated_molecules.db')
print("Done")
# compute array with valence of provided atom types
max_type = max(args.valence[::2])
valence = np.zeros(max_type+1, dtype=int)
valence[args.valence[::2]] = args.valence[1::2]
# print the chosen settings
valence_str = ''
for i in range(max_type+1):
if valence[i] > 0:
valence_str += f'type {i}: {valence[i]}, '
filters = []
if 'valence' in args.filters:
filters += ['valency']
if 'disconnected' in args.filters:
filters += ['connectedness']
if 'unique' in args.filters:
filters += ['uniqueness']
if len(filters) >= 3:
edit = ', '
else:
edit = ' '
for i in range(len(filters) - 1):
filters[i] = filters[i] + edit
if len(filters) >= 2:
filters = filters[:-1] + ['and '] + filters[-1:]
string = ''.join(filters)
print(f'\n\n1. Filtering molecules according to {string}...')
print(f'\nTarget valence:\n{valence_str[:-2]}\n')
# initial setup of array for statistics and some counters
n_generated = 0
n_valid = 0
n_non_unique = 0
stat_heads = ['n_atoms', 'id', 'valid', 'duplicating', 'n_duplicates',
'known', 'equals', 'C', 'N', 'O', 'F', 'H','B','Li','Si','P','S','Cl','As','Se','Br','Te','I', 'H1C', 'H1N',
'H1O', 'C1C', 'C2C', 'C3C', 'C1N', 'C2N', 'C3N', 'C1O',
'C2O', 'C1F', 'N1N', 'N2N', 'N1O', 'N2O', 'N1F', 'O1O',
'O1F', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R>8']
stats = np.empty((len(stat_heads), 0))
all_mols = []
connectivity_compressor = ConnectivityCompressor()
# iterate over generated molecules by length (all generated molecules with n
# atoms are stored in one batch, so we loop over all available lengths n)
# this is useful e.g. for finding duplicates, since we only need to compare
# molecules of the same length (and can actually further narrow down the
# candidates by looking at the exact atom type composition of each molecule)
start_time = time.time()
for n_atoms in res:
if not isinstance(n_atoms, int) or n_atoms == 0:
continue
prog_str = lambda x: f'Checking {x} for molecules of length {n_atoms}'
work_str = 'valence' if 'valence' in args.filters else 'dictionary'
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str(work_str) + ' (0.00%)', end='\r', flush=True)
else:
print(prog_str(work_str), flush=True)
d = res[n_atoms] # dictionary containing molecules of length n_atoms
all_pos = d[Properties.R] # n_mols x n_atoms x 3 matrix with atom positions
all_numbers = d[Properties.Z] # n_mols x n_atoms matrix with atom types
n_mols = len(all_pos)
#if args.threads <= 0:
results = check_valency(all_pos, all_numbers, valence,
'valence' in args.filters, print_file,
prog_str(work_str))
connectivity = results['connectivity']
mols = results['mols']
valid = np.ones(n_mols, dtype=int) # all molecules are valid in the beginning
# check valency of molecules with length n
if 'valence' in args.filters:
if not printed_todos:
print('Please implement a procedure to check the valence in generated '
'molecules! Skipping valence check...')
# TODO
# Implement a procedure to assess the valence of generated molecules here!
# You can adapt and use the Molecule class in utility_classes.py,
# but the current code is tailored towards the QM9 dataset. In fact,
# the OpenBabel algorithm to kekulize bond orders is not very reliable
# and we implemented some heuristics in the Molecule class to fix these
# flaws for structures made of C, N, O, and F atoms. However, when using
# more complex structures with a more diverse set of atom types, we think
# that the reliability of bond assignment in OpenBabel might further
# degrade and therefore do no recommend to use valence checks for
# analysis unless it is very important for your use case.
# detect molecules with disconnected parts if desired
valid = remove_disconnected(connectivity, valid)['valid']
valid, duplicating, duplicate_count = \
filter_unique(mols, valid, use_bits=False)
#print(valid)
#time.sleep(100)
# TODO
# Implement a procedure to assess the connectedness of generated
# molecules here! You can for example use a connectivity matrix obtained
# from kekulized bond orders (as we do in our QM9 experiments) or
# calculate the connectivity with a simple cutoff (e.g. all atoms less
# then 2.0 angstrom apart are connected, see get_connectivity function in
# template_preprocess_dataset script).
# We will remove all molecules where two atoms are closer than 0.3
# identify identical molecules (e.g. using fingerprints)
# TODO
# Implement procedure to identify duplicate structures here.
# This can (heuristically) be achieved in many ways but perfectly identifying
# all duplicate structures without false positives or false negatives is
# probably impossible (or computationally prohibitive).
# For our QM9 experiments, we compared fingerprints and canonical smiles
# strings of generated molecules using the Molecule class in utility_classes.py
# that provides functions to obtain these. It would also be possible to compare
# learned embeddings, e.g. from SchNet or G-SchNet, either as an average over
# all atoms, over all atoms of the same type, or combined with an algorithm
# to find the best match between atoms of two molecules considering the
# distances between embeddings. A similar procedure could be implemented
# using the root-mean-square deviation (RMSD) of atomic positions. Then it
# would be required to find the best match between atoms of two structures if
# they are rotated such that the RMSD given the match is minimal. Again,
# the best procedure really depends on the experimental setup, e.g. the
# goals of the experiment, used data and size of molecules in the dataset etc.
# duplicate_count contains the number of duplicates found for each structure
duplicate_count = np.zeros(n_mols, dtype=int)
# duplicating contains -1 for original structures and the id of the duplicated
# original structure for duplicates
duplicating = -np.ones(n_mols, dtype=int)
# remove duplicate structures from list of valid molecules if desired
if 'unique' in args.filters:
valid[duplicating != -1] = 0
# count number of non-unique structures
n_non_unique += np.sum(duplicate_count)
# store list of valid molecules in dictionary
d.update({'valid': valid})
# collect statistics of generated data
n_generated += len(valid)
n_valid += np.sum(valid)
# count number of atoms per type (here for C, N, O, F, and H as example)
n_of_types = [np.sum(all_numbers == i, axis=1) for i in [6, 7, 8, 9, 1,3,5,14,15,16, 17,33,34, 35, 52, 53]]
stats_new = np.stack(
(np.ones(len(valid)) * n_atoms, # n_atoms
np.arange(0, len(valid)), # id
valid, # valid
duplicating, # id of duplicated molecule
duplicate_count, # number of duplicates
-np.ones(len(valid)), # known
-np.ones(len(valid)), # equals
*n_of_types,# n_atoms per type
*np.zeros((19, len(valid))), # n_bonds per type pairs
*np.zeros((7, len(valid)))
),
axis=0)
stats = np.hstack((stats, stats_new))
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
print(f'Needed {h:d}h{m:02d}m{s:02d}s.')
# Update and print results
res.update({'n_generated': n_generated,
'n_valid': n_valid,
'stats': stats,
'stat_heads': stat_heads})
print(f'Number of generated molecules: {n_generated}\n'
f'Number of duplicate molecules: {n_non_unique}')
if 'unique' in args.filters:
print(f'Number of unique and valid molecules: {n_valid}')
else:
print(f'Number of valid molecules (including duplicates): {n_valid}')
# Remove invalid molecules from results if desired
if args.store != 'all':
shrunk_res = {}
shrunk_stats = np.empty((len(stats), 0))
i = 0
for key in res:
if isinstance(key, str):
shrunk_res[key] = res[key]
continue
if key == 0:
continue
d = res[key]
start = i
end = i + len(d['valid'])
idcs = np.where(d['valid'])[0]
if len(idcs) < 1:
i = end
continue
# shrink stats
idx_id = stat_heads.index('id')
idx_known = stat_heads.index('known')
new_stats = stats[:, start:end]
new_stats = new_stats[:, idcs]
new_stats[idx_id] = np.arange(len(new_stats[idx_id])) # adjust ids
shrunk_stats = np.hstack((shrunk_stats, new_stats))
# shrink positions and atomic numbers
shrunk_res[key] = {Properties.R: d[Properties.R][idcs],
Properties.Z: d[Properties.Z][idcs]}
i = end
shrunk_res['stats'] = shrunk_stats
res = shrunk_res
# transfer results to ASE db
# get filename that is not yet taken for db
if os.path.isfile(target_db):
file_name, _ = os.path.splitext(target_db)
expand = 0
while True:
expand += 1
new_file_name = file_name + '_' + str(expand)
if os.path.isfile(new_file_name + '.db'):
continue
else:
target_db = new_file_name + '.db'
break
print(f'Transferring generated molecules to database at {target_db}...')
# open db
with connect(target_db) as conn:
# store metadata
conn.metadata = {'n_generated': int(n_generated),
'n_non_unique': int(n_non_unique),
'n_valid': int(n_valid),
'non_unique_removed_from_valid': 'unique' in args.filters}
# store molecules
for n_atoms in res:
if isinstance(n_atoms, str) or n_atoms == 0:
continue
d = res[n_atoms]
all_pos = d[Properties.R]
all_numbers = d[Properties.Z]
for pos, num in zip(all_pos, all_numbers):
at = Atoms(num, positions=pos)
conn.write(at)
# store gathered statistics in separate file
np.savez_compressed(os.path.splitext(target_db)[0] + f'_statistics.npz',
stats=res['stats'], stat_heads=res['stat_heads'])
print(target_db)
geoms = ase.io.read(target_db,":")
print(len(geoms))
molecule_to_keep = []
for geom in geoms:
#os = geom.get_positions()
atypes = geom.get_atomic_numbers()
if(sum(atypes)%2 == 0) and len(atypes) > 2:
molecule_to_keep.append(geom)
else:
pass
print(len(molecule_to_keep))
ase.io.write(target_db, molecule_to_keep)
print("Done")
| Python |
3D | rhyan10/G-SchNetOE62 | nn_classes.py | .py | 12,412 | 334 | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from collections import Iterable
import schnetpack as spk
from schnetpack.nn import MLP
from schnetpack.metrics import Metric
### OUTPUT MODULE ###
class AtomwiseWithProcessing(nn.Module):
r"""
Atom-wise dense layers that allow to use additional pre- and post-processing layers.
Args:
n_in (int): input dimension of representation (default: 128)
n_out (int): output dimension (default: 1)
n_layers (int): number of atom-wise dense layers in output network (default: 5)
n_neurons (list of int or None): number of neurons in each layer of the output
network. If `None`, interpolate linearly between n_in and n_out.
activation (function): activation function for hidden layers
(default: spk.nn.activations.shifted_softplus).
preprocess_layers (nn.Module): a torch.nn.Module or list of Modules for
preprocessing the representation given by the first part of the network
(default: None).
postprocess_layers (nn.Module): a torch.nn.Module or list of Modules for
postprocessing the output given by the second part of the network
(default: None).
in_key (str): keyword to access the representation in the inputs dictionary,
it is automatically inferred from the preprocessing layers, if at least one
is given (default: 'representation').
out_key (str): a string as key to the output dictionary (if set to 'None', the
output will not be wrapped into a dictionary, default: 'y')
Returns:
result: dictionary with predictions stored in result[out_key]
"""
def __init__(self, n_in=128, n_out=1, n_layers=5, n_neurons=None,
activation=spk.nn.activations.shifted_softplus,
preprocess_layers=None, postprocess_layers=None,
in_key='representation', out_key='y'):
super(AtomwiseWithProcessing, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.n_layers = n_layers
self.in_key = in_key
self.out_key = out_key
if isinstance(preprocess_layers, Iterable):
self.preprocess_layers = nn.ModuleList(preprocess_layers)
self.in_key = self.preprocess_layers[-1].out_key
elif preprocess_layers is not None:
self.preprocess_layers = preprocess_layers
self.in_key = self.preprocess_layers.out_key
else:
self.preprocess_layers = None
if isinstance(postprocess_layers, Iterable):
self.postprocess_layers = nn.ModuleList(postprocess_layers)
else:
self.postprocess_layers = postprocess_layers
if n_neurons is None:
# linearly interpolate between n_in and n_out
n_neurons = list(np.linspace(n_in, n_out, n_layers + 1).astype(int)[1:-1])
self.out_net = MLP(n_in, n_out, n_neurons, n_layers, activation)
self.derivative = None # don't compute derivative w.r.t. inputs
def forward(self, inputs):
"""
Compute layer output and apply pre-/postprocessing if specified.
Args:
inputs (dict of torch.Tensor): batch of input values.
Returns:
torch.Tensor: layer output.
"""
# apply pre-processing layers
if self.preprocess_layers is not None:
if isinstance(self.preprocess_layers, Iterable):
for pre_layer in self.preprocess_layers:
inputs = pre_layer(inputs)
else:
inputs = self.preprocess_layers(inputs)
# get (pre-processed) representation
if isinstance(inputs[self.in_key], tuple):
repr = inputs[self.in_key][0]
else:
repr = inputs[self.in_key]
# apply output network
result = self.out_net(repr)
# apply post-processing layers
if self.postprocess_layers is not None:
if isinstance(self.postprocess_layers, Iterable):
for post_layer in self.postprocess_layers:
result = post_layer(inputs, result)
else:
result = self.postprocess_layers(inputs, result)
# use provided key to store result
if self.out_key is not None:
result = {self.out_key: result}
return result
### METRICS ###
class KLDivergence(Metric):
r"""
Metric for mean KL-Divergence.
Args:
target (str): name of target property
model_output ([int], [str]): indices or keys to unpack the desired output
from the model in case of multiple outputs, e.g. ['x', 'y'] to get
output['x']['y'] (default: 'y').
name (str): name used in logging for this metric. If set to `None`,
`KLD_[target]` will be used (default: None).
mask (str): key for a mask in the examined batch which hides irrelevant output
values. If 'None' is provided, no mask will be applied (default: None).
inverse_mask (bool): whether the mask needs to be inverted prior to application
(default: False).
"""
def __init__(self, target='_labels', model_output='y', name=None,
mask=None, inverse_mask=False):
name = 'KLD_' + target if name is None else name
super(KLDivergence, self).__init__(name)
self.target = target
self.model_output = model_output
self.loss = 0.
self.n_entries = 0.
self.mask_str = mask
self.inverse_mask = inverse_mask
def reset(self):
self.loss = 0.
self.n_entries = 0.
def add_batch(self, batch, result):
# extract true labels
y = batch[self.target]
# extract predictions
yp = result
if self.model_output is not None:
if isinstance(self.model_output, list):
for key in self.model_output:
yp = yp[key]
else:
yp = yp[self.model_output]
# normalize output
log_yp = F.log_softmax(yp, -1)
# apply KL divergence formula entry-wise
loss = F.kl_div(log_yp, y, reduction='none')
# sum over last dimension to get KL divergence per distribution
loss = torch.sum(loss, -1)
# apply mask to filter padded dimensions
if self.mask_str is not None:
atom_mask = batch[self.mask_str]
if self.inverse_mask:
atom_mask = 1.-atom_mask
loss = torch.where(atom_mask > 0, loss, torch.zeros_like(loss))
n_entries = torch.sum(atom_mask > 0)
else:
n_entries = torch.prod(torch.tensor(loss.size()))
# calculate loss and n_entries
self.n_entries += n_entries.detach().cpu().data.numpy()
self.loss += torch.sum(loss).detach().cpu().data.numpy()
def aggregate(self):
return self.loss / max(self.n_entries, 1.)
### PRE- AND POST-PROCESSING LAYERS ###
class EmbeddingMultiplication(nn.Module):
r"""
Layer that multiplies embeddings of given types with the representation.
Args:
embedding (torch.nn.Embedding instance): the embedding layer used to embed atom
types.
in_key_types (str): the keyword to obtain types for embedding from inputs.
in_key_representation (str): the keyword to obtain the representation from
inputs.
out_key (str): the keyword used to store the calculated product in the inputs
dictionary.
"""
def __init__(self, embedding, in_key_types='_next_types',
in_key_representation='representation',
out_key='preprocessed_representation'):
super(EmbeddingMultiplication, self).__init__()
self.embedding = embedding
self.in_key_types = in_key_types
self.in_key_representation = in_key_representation
self.out_key = out_key
def forward(self, inputs):
"""
Compute layer output.
Args:
inputs (dict of torch.Tensor): batch of input values containing the atomic
numbers for embedding as well as the representation.
Returns:
torch.Tensor: layer output.
"""
# get types to embed from inputs
types = inputs[self.in_key_types]
st = types.size()
# embed types
if len(st) == 1:
emb = self.embedding(types.view(st[0], 1))
elif len(st) == 2:
emb = self.embedding(types.view(*st[:-1], 1, st[-1]))
# get representation
if isinstance(inputs[self.in_key_representation], tuple):
repr = inputs[self.in_key_representation][0]
else:
repr = inputs[self.in_key_representation]
if len(st) == 2:
# if multiple types are provided per molecule, expand
# dimensionality of representation
repr = repr.view(*repr.size()[:-1], 1, repr.size()[-1])
# multiply embedded types with representation
features = repr * emb
# store result in input dictionary
inputs.update({self.out_key: features})
return inputs
class NormalizeAndAggregate(nn.Module):
r"""
Layer that normalizes and aggregates given input along specifiable axes.
Args:
normalize (bool): set True to normalize the input (default: True).
normalization_axis (int): axis along which normalization is applied
(default: -1).
normalization_mode (str): which normalization to apply (currently only
'logsoftmax' is supported, default: 'logsoftmax').
aggregate (bool): set True to aggregate the input (default: True).
aggregation_axis (int): axis along which aggregation is applied
(default: -1).
aggregation_mode (str): which aggregation to apply (currently 'sum' and
'mean' are supported, default: 'sum').
keepdim (bool): set True to keep the number of dimensions after aggregation
(default: True).
in_key_mask (str): key to extract a mask from the inputs dictionary,
which hides values during aggregation (default: None).
squeeze (bool): whether to squeeze the input before applying normalization
(default: False).
Returns:
torch.Tensor: input after normalization and aggregation along specified axes.
"""
def __init__(self, normalize=True, normalization_axis=-1,
normalization_mode='logsoftmax', aggregate=True,
aggregation_axis=-1, aggregation_mode='sum', keepdim=True,
mask=None, squeeze=False):
super(NormalizeAndAggregate, self).__init__()
if normalize:
if normalization_mode.lower() == 'logsoftmax':
self.normalization = nn.LogSoftmax(normalization_axis)
else:
self.normalization = None
if aggregate:
if aggregation_mode.lower() == 'sum':
self.aggregation =\
spk.nn.base.Aggregate(aggregation_axis, mean=False,
keepdim=keepdim)
elif aggregation_mode.lower() == 'mean':
self.aggregation =\
spk.nn.base.Aggregate(aggregation_axis, mean=True,
keepdim=keepdim)
else:
self.aggregation = None
self.mask = mask
self.squeeze = squeeze
def forward(self, inputs, result):
"""
Compute layer output.
Args:
inputs (dict of torch.Tensor): batch of input values containing the mask
result (torch.Tensor): batch of result values to which normalization and
aggregation is applied
Returns:
torch.Tensor: normalized and aggregated result.
"""
res = result
if self.squeeze:
res = torch.squeeze(res)
if self.normalization is not None:
res = self.normalization(res)
if self.aggregation is not None:
if self.mask is not None:
mask = inputs[self.mask]
else:
mask = None
res = self.aggregation(res, mask)
return res
| Python |
3D | rhyan10/G-SchNetOE62 | loopHL.py | .py | 5,941 | 113 | import numpy as np
import statistics
import ase.io
import ase
import ase.io.xyz
import argparse
import subprocess
import ase.io
import pickle
import sys
import shutil
import time
sys.path.append('../G-SchNetOE62')
import utility_functions
from utility_functions import print_atom_bond_ring_stats
from ase import neighborlist
from ase.build import molecule
import numpy as np
import os
from scipy import sparse
from analysis import MoleculeAnalysis
from analysis import SchNetHAnalysis
import matplotlib.pyplot as plt
size=22
params={'legend.fontsize': 'large',
'figure.figsize': (7,5),
'axes.labelsize':size,
'axes.titlesize':size,
'ytick.labelsize':size,
'xtick.labelsize':size,
'axes.titlepad':10
}
plt.rcParams.update(params)
#fig=plt.figure()
if __name__ == "__main__":
bond_length_pairs = [['C','C'],['C','O'],['C','H']]
bond_angle_trios = [['C','C','C'],['C','O','C'],['C','C','O']]
number_of_loops = 20
generated_database_size = 200000
datapath = "./data/"
database_size = 13635
for i in range(7,number_of_loops):
if i!=7:
train = subprocess.Popen(['python ../G-SchNetOE62/gschnet_script.py train gschnet '+datapath+' ./models/iteration'+str(i)+'/ --pretrained_path ./models/iteration'+str(i-1)+'/ --dataset_name template_data --split '+str(round(database_size*0.8))+' '+str(round(database_size*0.1))+' --cuda --draw_random_samples 10 --batch_size 1 --max_epochs 5000'],shell=True)
train.wait()
generate = subprocess.Popen(['python ../G-SchNetOE62/gschnet_script.py generate gschnet ./models/iteration'+str(i)+'/ '+str(generated_database_size)+' --cuda'],shell=True)
generate.wait()
filter_ = subprocess.Popen(['python ../G-SchNetOE62/template_filter_generated.py ./models/iteration'+str(i)+'/generated/generated.mol_dict'],shell=True)
filter_.wait()
path = os.path.join("./models/iteration"+str(i)+"/", "analysis")
os.mkdir(path)
geoms = ase.io.read('./models/iteration'+str(i)+'/generated/generated_molecules.db',':')
number_of_molecules = MoleculeAnalysis.get_molecule_sizes(geoms)
ring_data = MoleculeAnalysis.get_rings(geoms)
with open('./models/iteration'+str(i)+'/analysis/number_of_molecules.pkl', 'wb') as f:
pickle.dump(number_of_molecules, f)
with open('./models/iteration'+str(i)+'/analysis/rings.pkl', 'wb') as f:
pickle.dump(ring_data, f)
for bond in bond_length_pairs:
bond_lengths = MoleculeAnalysis.get_bond_distances(geoms,bond)
with open('./models/iteration'+str(i)+'/analysis/'+(bond[0]+bond[1])+'.pkl', 'wb') as f:
pickle.dump(bond_lengths, f)
for bond_angle in bond_angle_trios:
bond_lengths = MoleculeAnalysis.get_angles(geoms,bond_angle)
with open('./models/iteration'+str(i)+'/analysis/'+(bond_angle[0]+bond_angle[1]+bond_angle[2])+'.pkl', 'wb') as f:
pickle.dump(bond_lengths, f)
orbital_energies_prediction1 = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/PBE0_1 --parallel --batch_size 1 --cuda'],shell=True)
orbital_energies_prediction1.wait()
orbital_energies_prediction2 = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/PBE0_2 --parallel --batch_size 1 --cuda'],shell=True)
orbital_energies_prediction2.wait()
quasi_energies_prediction = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/Delta --parallel --batch_size 1 --cuda'],shell=True)
quasi_energies_prediction.wait()
path = os.path.join("./models/iteration"+str(i)+"/", "energy_predictions")
os.mkdir(path)
shutil.move("./Models/PBE0_1/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/PBE01_predictions.npz")
shutil.move("./Models/PBE0_2/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/PBE02_predictions.npz")
shutil.move("./Models/Delta/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/Delta_predictions.npz")
dbname1="./models/iteration"+str(i)+"/energy_predictions/PBE01_predictions.npz"
dbname2="./models/iteration"+str(i)+"/energy_predictions/PBE02_predictions.npz"
dbname3="./models/iteration"+str(i)+"/energy_predictions/Delta_predictions.npz"
pbe0 = np.load(dbname1,allow_pickle=True)["eigenvalues_pbe0"]
pbe0_2 =np.load(dbname2,allow_pickle=True)["eigenvalues_pbe0"]
delta = np.load(dbname3,allow_pickle=True)["delta_eigenvalues_pbe0_gbw"]
sorted_gw,geoms = SchNetHAnalysis.energy_analysis(pbe0,pbe0_2,delta,geoms)
HOMO = sorted_gw[:,50].reshape(-1)
LUMO = sorted_gw[:,51].reshape(-1)
HL=np.abs(HOMO-LUMO)
with open('./models/iteration'+str(i)+'/energy_predictions/HOMO_energies.pkl', 'wb') as f:
pickle.dump(HOMO, f)
with open('./models/iteration'+str(i)+'/energy_predictions/LUMO_energies.pkl', 'wb') as f:
pickle.dump(LUMO, f)
####### Edit below for HOMO or HLGAP #######
std = np.std(HL)
mean = np.mean(HL)
new_db = []
for j, energy in enumerate(HL):
if energy < mean - std:
new_db.append(geoms[j])
os.system("mv ./data/train.db ./data/train%i.db"%(i-1))
ase.io.write("./data/train.db", new_db)
os.remove("./data/train_gschnet.db")
database_size = len(new_db)
print(database_size)
############################################
| Python |
3D | rhyan10/G-SchNetOE62 | loopHOMO.py | .py | 5,974 | 113 | import numpy as np
import statistics
import ase.io
import ase
import ase.io.xyz
import argparse
import subprocess
import ase.io
import pickle
import sys
import shutil
import time
sys.path.append('../G-SchNetOE62')
import utility_functions
from utility_functions import print_atom_bond_ring_stats
from ase import neighborlist
from ase.build import molecule
import numpy as np
import os
from scipy import sparse
from analysis import MoleculeAnalysis
from analysis import SchNetHAnalysis
import matplotlib.pyplot as plt
size=22
params={'legend.fontsize': 'large',
'figure.figsize': (7,5),
'axes.labelsize':size,
'axes.titlesize':size,
'ytick.labelsize':size,
'xtick.labelsize':size,
'axes.titlepad':10
}
plt.rcParams.update(params)
#fig=plt.figure()
if __name__ == "__main__":
bond_length_pairs = [['C','C'],['C','O'],['C','H']]
bond_angle_trios = [['C','C','C'],['C','O','C'],['C','C','O']]
number_of_loops = 20
generated_database_size = 20000
datapath = "./data/"
database_size = 7366
for i in range(3,number_of_loops):
if i!=3:
train = subprocess.Popen(['python ../G-SchNetOE62/gschnet_script.py train gschnet '+datapath+' ./models/iteration'+str(i)+'/ --pretrained_path ./models/iteration'+str(i-1)+'/ --dataset_name template_data --split '+str(round(database_size*0.8))+' '+str(round(database_size*0.1))+' --cuda --draw_random_samples 10 --batch_size 1 --max_epochs 5000'],shell=True)
train.wait()
generate = subprocess.Popen(['python ../G-SchNetOE62/gschnet_script.py generate gschnet ./models/iteration'+str(i)+'/ '+str(generated_database_size)+' --cuda --max_length 70'],shell=True)
generate.wait()
filter_ = subprocess.Popen(['python ../G-SchNetOE62/template_filter_generated.py ./models/iteration'+str(i)+'/generated/generated.mol_dict'],shell=True)
filter_.wait()
path = os.path.join("./models/iteration"+str(i)+"/", "analysis")
os.mkdir(path)
#os.system("mkdir ./models/iteration"+str(i)+"/analysis")
geoms = ase.io.read('./models/iteration'+str(i)+'/generated/generated_molecules.db',':')
number_of_molecules = MoleculeAnalysis.get_molecule_sizes(geoms)
ring_data = MoleculeAnalysis.get_rings(geoms)
with open('./models/iteration'+str(i)+'/analysis/number_of_molecules.pkl', 'wb') as f:
pickle.dump(number_of_molecules, f)
with open('./models/iteration'+str(i)+'/analysis/rings.pkl', 'wb') as f:
pickle.dump(ring_data, f)
for bond in bond_length_pairs:
bond_lengths = MoleculeAnalysis.get_bond_distances(geoms,bond)
with open('./models/iteration'+str(i)+'/analysis/'+(bond[0]+bond[1])+'.pkl', 'wb') as f:
pickle.dump(bond_lengths, f)
for bond_angle in bond_angle_trios:
bond_lengths = MoleculeAnalysis.get_angles(geoms,bond_angle)
with open('./models/iteration'+str(i)+'/analysis/'+(bond_angle[0]+bond_angle[1]+bond_angle[2])+'.pkl', 'wb') as f:
pickle.dump(bond_lengths, f)
orbital_energies_prediction1 = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/PBE0_1 --parallel --batch_size 1 --cuda'],shell=True)
orbital_energies_prediction1.wait()
orbital_energies_prediction2 = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/PBE0_2 --parallel --batch_size 1 --cuda'],shell=True)
orbital_energies_prediction2.wait()
quasi_energies_prediction = subprocess.Popen(['python /home/chem/mssdjc/software/SchNarc/src/scripts/run_schnet_ev.py pred ./models/iteration'+str(i)+'/generated/generated_molecules.db ./Models/Delta --parallel --batch_size 1 --cuda'],shell=True)
quasi_energies_prediction.wait()
path = os.path.join("./models/iteration"+str(i)+"/", "energy_predictions")
os.mkdir(path)
shutil.move("./Models/PBE0_1/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/PBE01_predictions.npz")
shutil.move("./Models/PBE0_2/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/PBE02_predictions.npz")
shutil.move("./Models/Delta/predictions.npz", "./models/iteration"+str(i)+"/energy_predictions/Delta_predictions.npz")
dbname1="./models/iteration"+str(i)+"/energy_predictions/PBE01_predictions.npz"
dbname2="./models/iteration"+str(i)+"/energy_predictions/PBE02_predictions.npz"
dbname3="./models/iteration"+str(i)+"/energy_predictions/Delta_predictions.npz"
pbe0 = np.load(dbname1,allow_pickle=True)["eigenvalues_pbe0"]
pbe0_2 =np.load(dbname2,allow_pickle=True)["eigenvalues_pbe0"]
delta = np.load(dbname3,allow_pickle=True)["delta_eigenvalues_pbe0_gbw"]
sorted_gw,geoms = SchNetHAnalysis.energy_analysis(pbe0,pbe0_2,delta,geoms)
HOMO = sorted_gw[:,50].reshape(-1)
LUMO = sorted_gw[:,51].reshape(-1)
with open('./models/iteration'+str(i)+'/energy_predictions/HOMO_energies.pkl', 'wb') as f:
pickle.dump(HOMO, f)
with open('./models/iteration'+str(i)+'/energy_predictions/LUMO_energies.pkl', 'wb') as f:
pickle.dump(LUMO, f)
####### Edit below for HOMO or HLGAP #######
std = np.std(HOMO)
mean = np.mean(HOMO)
new_db = []
for j, energy in enumerate(HOMO):
if energy > mean + std:
new_db.append(geoms[j])
os.system("mv ./data/train.db ./data/train%i.db"%(i-1))
ase.io.write("./data/train.db", new_db)
os.remove("./data/train_gschnet.db")
database_size = len(new_db)
print(database_size)
############################################
| Python |
3D | rhyan10/G-SchNetOE62 | qm9_preprocess_dataset.py | .py | 22,236 | 498 | import collections
import argparse
import sys
import time
import numpy as np
import logging
from ase.db import connect
from scipy.spatial.distance import pdist
from utility_classes import ConnectivityCompressor, Molecule
from multiprocessing import Process, Queue
from pathlib import Path
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
main_parser.add_argument('datapath', help='Full path to dataset (e.g. '
'/home/qm9.db)')
main_parser.add_argument('--valence_list',
default=[1, 1, 6, 4, 7, 3, 8, 2, 9, 1], type=int,
nargs='+',
help='The valence of atom types in the form '
'[type1 valence type2 valence ...] '
'(default: %(default)s)')
main_parser.add_argument('--n_threads', type=int, default=16,
help='Number of extra threads used while '
'processing the data')
main_parser.add_argument('--n_mols_per_thread', type=int, default=100,
help='Number of molecules processed by each '
'thread in one iteration')
return main_parser
def is_disconnected(connectivity):
'''
Assess whether all atoms of a molecule are connected using a connectivity matrix
Args:
connectivity (numpy.ndarray): matrix (n_atoms x n_atoms) indicating bonds
between atoms
Returns
bool: True if the molecule consists of at least two disconnected graphs,
False if all atoms are connected by some path
'''
con_mat = connectivity
seen, queue = {0}, collections.deque([0]) # start at node (atom) 0
while queue:
vertex = queue.popleft()
# iterate over (bonded) neighbors of current node
for node in np.argwhere(con_mat[vertex] > 0).flatten():
# add node to queue and list of seen nodes if it has not been seen before
if node not in seen:
seen.add(node)
queue.append(node)
# if the seen nodes do not include all nodes, there are disconnected parts
return seen != {*range(len(con_mat))}
def get_count_statistics(mol=None, get_stat_heads=False):
'''
Collects atom, bond, and ring count statistics of a provided molecule
Args:
mol (utility_classes.Molecule): Molecule to be examined
get_stat_heads (bool, optional): set True to only return the headers of
gathered statistics (default: False)
Returns:
numpy.ndarray: (n_statistics x 1) array containing the gathered statistics. Use
get_stat_heads parameter to obtain the corresponding row headers (where RX
describes number of X-membered rings and CXC indicates the number of
carbon-carbon bonds of order X etc.).
'''
stat_heads = ['n_atoms', 'C', 'N', 'O', 'F', 'H', 'H1C', 'H1N',
'H1O', 'C1C', 'C2C', 'C3C', 'C1N', 'C2N', 'C3N', 'C1O',
'C2O', 'C1F', 'N1N', 'N2N', 'N1O', 'N2O', 'N1F', 'O1O',
'O1F', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R>8']
if get_stat_heads:
return stat_heads
if mol is None:
return None
key_idx_dict = dict(zip(stat_heads, range(len(stat_heads))))
stats = np.zeros((len(stat_heads), 1))
# process all bonds and store statistics about bond and ring counts
bond_stats = mol.get_bond_stats()
for key, value in bond_stats.items():
if key in key_idx_dict:
idx = key_idx_dict[key]
stats[idx, 0] = value
# store simple statistics about number of atoms
stats[key_idx_dict['n_atoms'], 0] = mol.n_atoms
for key in ['C', 'N', 'O', 'F', 'H']:
idx = key_idx_dict[key]
charge = mol.type_charges[key]
if charge in mol._unique_numbers:
stats[idx, 0] = np.sum(mol.numbers == charge)
return stats
def preprocess_molecules(mol_idcs, source_db, valence,
precompute_distances=True, remove_invalid=True,
invalid_list=None, print_progress=False):
'''
Checks the validity of selected molecules and collects atom, bond,
and ring count statistics for the valid structures. Molecules are classified as
invalid if they consist of disconnected parts or fail a valence check, where the
valency constraints of all atoms in a molecule have to be satisfied (e.g. carbon
has four bonds, nitrogen has three bonds etc.)
Args:
mol_idcs (array): the indices of molecules from the source database that
shall be examined
source_db (str): full path to the source database (in ase.db sqlite format)
valence (array): an array where the i-th entry contains the valency
constraint of atoms with atomic charge i (e.g. a valency of 4 at array
position 6 representing carbon)
precompute_distances (bool, optional): if True, the pairwise distances between
atoms in each molecule are computed and stored in the database (default:
True)
remove_invalid (bool, optional): if True, molecules that do not pass the
valency or connectivity checks (or are on the invalid_list) are removed from
the new database (default: True)
invalid_list (list of int, optional): precomputed list containing indices of
molecules that are marked as invalid (because they did not pass the
valency or connectivity checks in earlier runs, default: None)
print_progress (bool, optional): set True to print the progress in percent
(default: False)
Returns
list of ase.Atoms: list of all valid molecules
list of dict: list of corresponding dictionaries with data of each molecule
numpy.ndarray: (n_statistics x n_valid_molecules) matrix with atom, bond,
and ring count statistics
list of int: list with indices of molecules that failed the valency check
list of int: list with indices of molecules that consist of disconnected parts
int: number of molecules processed
'''
# initial setup
count = 0 # count the number of invalid molecules
disc = [] # store indices of disconnected molecules
inval = [] # store indices of invalid molecules
data_list = [] # store data fields of molecules for new db
mols = [] # store molecules (as ase.Atoms objects)
compressor = ConnectivityCompressor() # (de)compress sparse connectivity matrices
stats = np.empty((len(get_count_statistics(get_stat_heads=True)), 0))
n_all = len(mol_idcs)
with connect(source_db) as source_db:
# iterate over provided indices
for i in mol_idcs:
i = int(i)
# skip molecule if present in invalid_list and remove_invalid is True
if remove_invalid and invalid_list is not None:
if i in invalid_list:
continue
# get molecule from database
row = source_db.get(i + 1)
data = row.data
at = row.toatoms()
# get positions and atomic numbers
pos = at.positions
numbers = at.numbers
# center positions (using center of mass)
pos = pos - at.get_center_of_mass()
# order atoms by distance to center of mass
center_dists = np.sqrt(np.maximum(np.sum(pos ** 2, axis=1), 0))
idcs_sorted = np.argsort(center_dists)
pos = pos[idcs_sorted]
numbers = numbers[idcs_sorted]
# update positions and atomic numbers accordingly in Atoms object
at.positions = pos
at.numbers = numbers
# instantiate utility_classes.Molecule object
mol = Molecule(pos, numbers)
# get connectivity matrix (detecting bond orders with Open Babel)
con_mat = mol.get_connectivity()
# stop if molecule is disconnected (and therefore invalid)
if remove_invalid:
if is_disconnected(con_mat):
count += 1
disc += [i]
continue
# check if valency constraints of all atoms in molecule are satisfied:
# since the detection of bond orders for the connectivity matrix with Open
# Babel is unreliable for certain cases (e.g. some aromatic rings) we
# try to fix it manually (with heuristics) or by reshuffling the atom
# order (as the bond order detection of Open Babel is sensitive to the
# order of atoms)
nums = numbers
random_ord = np.arange(len(numbers))
for _ in range(10): # try 10 times before dismissing as invalid
if np.all(np.sum(con_mat, axis=0) == valence[nums]):
# valency is correct -> mark as valid and stop check
val = True
break
else:
# try to fix bond orders using heuristics
val = False
con_mat = mol.get_fixed_connectivity()
if np.all(np.sum(con_mat, axis=0) == valence[nums]):
# valency is now correct -> mark as valid and stop check
val = True
break
# shuffle atom order before checking valency again
random_ord = np.random.permutation(range(len(pos)))
mol = Molecule(pos[random_ord], numbers[random_ord])
con_mat = mol.get_connectivity()
nums = numbers[random_ord]
if remove_invalid:
if not val:
# stop if molecule is invalid (it failed the repeated valence checks)
count += 1
inval += [i]
continue
if precompute_distances:
# calculate pairwise distances of atoms and store them in data
dists = pdist(pos)[:, None]
data.update({'dists': dists})
# store compressed connectivity matrix in data
rand_ord_rev = np.argsort(random_ord)
con_mat = con_mat[rand_ord_rev][:, rand_ord_rev]
data.update(
{'con_mat': compressor.compress(con_mat)})
# update atom, bond, and ring count statistics
stats = np.hstack((stats, get_count_statistics(mol=mol)))
# add results to the lists
mols += [at]
data_list += [data]
# print progress if desired
if print_progress:
if i % 100 == 0:
print('\033[K', end='\r', flush=True)
print(f'{100 * (i + 1) / n_all:.2f}%', end='\r', flush=True)
return mols, data_list, stats, inval, disc, count
def _processing_worker(q_in, q_out, task):
'''
Simple worker function that repeatedly fulfills a task using transmitted input and
sends back the results until a stop signal is received. Can be used as target in
a multiprocessing.Process object.
Args:
q_in (multiprocessing.Queue): queue to receive a list with data. The first
entry signals whether worker can stop and the remaining entries are used as
input arguments to the task function
q_out (multiprocessing.Queue): queue to send results from task back
task (callable function): function that is called using the received data
'''
while True:
data = q_in.get(True) # receive data
if data[0]: # stop if stop signal is received
break
results = task(*data[1:]) # fulfill task with received data
q_out.put(results) # send back results
def _submit_jobs(qs_out, count, chunk_size, n_all, working_flag,
n_per_thread):
'''
Function that submits a job to preprocess molecules to every provided worker.
Args:
qs_out (list of multiprocessing.Queue): queues used to send data to workers (one
queue per worker)
count (int): index of the earliest, not yet preprocessed molecule in the db
chunk_size (int): number of molecules to be divided amongst workers
n_all (int): total number of molecules in the db
working_flag (array): flags indicating whether workers are running
n_per_thread (int): number of molecules to be given to each thread
Returns:
numpy.ndarray: array with flags indicating whether workers got
a job
int: index of the new earliest, not yet preprocessed molecule in
the db (after the submitted preprocessing jobs have been done)
'''
# calculate indices of molecules that shall be preprocessed by workers
idcs = np.arange(count, min(n_all, count + chunk_size))
start = 0
for i, q in enumerate(qs_out):
if start >= len(idcs):
# stop if no more indices are left to submit
break
end = start + n_per_thread
q.put((False, idcs[start:end])) # submit indices (and signal to not stop)
working_flag[i] = 1 # set flag that current worker got a job
start = end
new_count = count + len(idcs)
return working_flag, new_count
def preprocess_dataset(datapath, valence_list, n_threads, n_mols_per_thread=100,
logging_print=True, new_db_path=None, precompute_distances=True,
remove_invalid=True, invalid_list=None):
'''
Pre-processes all molecules of a dataset using the provided valency information.
Multi-threading is used to speed up the process.
Along with a new database containing the pre-processed molecules, a
"input_db_invalid.txt" file holding the indices of removed molecules (which
do not pass the valence or connectivity checks, omitted if remove_invalid is False)
and a "new_db_statistics.npz" file (containing atom, bond, and ring count statistics
for all molecules in the new database) are stored.
Args:
datapath (str): full path to dataset (ase.db database)
valence_list (list): the valence of atom types in the form
[type1 valence type2 valence ...]
n_threads (int): number of threads used (0 for no extra threads)
n_mols_per_thread (int, optional): number of molecules processed by each
thread at each iteration (default: 100)
logging_print (bool, optional): set True to show output with logging.info
instead of standard printing (default: True)
new_db_path (str, optional): full path to new database where pre-processed
molecules shall be stored (None to simply append "gen" to the name in
datapath, default: None)
precompute_distances (bool, optional): if True, the pairwise distances between
atoms in each molecule are computed and stored in the database (default:
True)
remove_invalid (bool, optional): if True, molecules that do not pass the
valency or connectivity check are removed from the new database (default:
True)
invalid_list (list of int, optional): precomputed list containing indices of
molecules that are marked as invalid (because they did not pass the
valency or connectivity checks in earlier runs, default: None)
'''
# convert paths
datapath = Path(datapath)
if new_db_path is None:
new_db_path = datapath.parent / (datapath.stem + 'gen.db')
else:
new_db_path = Path(new_db_path)
# compute array where the valency constraint of atom type i is stored at entry i
max_type = max(valence_list[::2])
valence = np.zeros(max_type + 1, dtype=int)
valence[valence_list[::2]] = valence_list[1::2]
def _print(x, end='\n', flush=False):
if logging_print:
logging.info(x)
else:
print(x, end=end, flush=flush)
with connect(datapath) as db:
n_all = db.count()
if n_all == 0:
_print('No molecules found in data base!')
sys.exit(0)
_print('\nPre-processing data...')
if logging_print:
_print(f'Processed: 0 / {n_all}...')
else:
_print(f'0.00%', end='', flush=True)
# initial setup
n_iterations = 0
chunk_size = n_threads * n_mols_per_thread
current = 0
count = 0 # count number of discarded (invalid etc.) molecules
disc = []
inval = []
stats = np.empty((len(get_count_statistics(get_stat_heads=True)), 0))
working_flag = np.zeros(n_threads, dtype=bool)
start_time = time.time()
if invalid_list is not None and remove_invalid:
invalid_list = {*invalid_list}
n_inval = len(invalid_list)
else:
n_inval = 0
with connect(new_db_path) as new_db:
if n_threads >= 1:
# set up threads and queues
threads = []
qs_in = []
qs_out = []
for i in range(n_threads):
qs_in += [Queue(1)]
qs_out += [Queue(1)]
threads += \
[Process(target=_processing_worker,
name=str(i),
args=(qs_out[-1],
qs_in[-1],
lambda x:
preprocess_molecules(x,
datapath,
valence,
precompute_distances,
remove_invalid,
invalid_list)))]
threads[-1].start()
# submit first round of jobs
working_flag, current = \
_submit_jobs(qs_out, current, chunk_size, n_all,
working_flag, n_mols_per_thread)
while np.any(working_flag == 1):
n_iterations += 1
# initialize new iteration
results = []
# gather results
for i, q in enumerate(qs_in):
if working_flag[i]:
results += [q.get()]
working_flag[i] = 0
# submit new jobs
working_flag, current_new = \
_submit_jobs(qs_out, current, chunk_size, n_all, working_flag,
n_mols_per_thread)
# store gathered results
for res in results:
mols, data_list, _stats, _inval, _disc, _c = res
for (at, data) in zip(mols, data_list):
new_db.write(at, data=data)
stats = np.hstack((stats, _stats))
inval += _inval
disc += _disc
count += _c
# print progress
if logging_print and n_iterations % 10 == 0:
_print(f'Processed: {current:6d} / {n_all}...')
elif not logging_print:
_print('\033[K', end='\r', flush=True)
_print(f'{100 * current / n_all:.2f}%', end='\r',
flush=True)
current = current_new # update current position in database
# stop worker threads and join
for i, q_out in enumerate(qs_out):
q_out.put((True,))
threads[i].join()
threads[i].terminate()
if logging_print:
_print(f'Processed: {n_all} / {n_all}...')
else:
results = preprocess_molecules(range(n_all), datapath, valence,
precompute_distances, remove_invalid,
invalid_list, print_progress=True)
mols, data_list, stats, inval, disc, count = results
for (at, data) in zip(mols, data_list):
new_db.write(at, data=data)
if not logging_print:
_print('\033[K', end='\n', flush=True)
_print(f'... successfully validated {n_all - count - n_inval} data '
f'points!', flush=True)
if invalid_list is not None:
_print(f'{n_inval} structures were removed because they are on the '
f'pre-computed list of invalid molecules!', flush=True)
if len(disc)+len(inval) > 0:
_print(f'CAUTION: Could not validate {len(disc)+len(inval)} additional '
f'molecules. These were also removed and their indices are '
f'appended to the list of invalid molecules stored at '
f'{datapath.parent / (datapath.stem + f"_invalid.txt")}',
flush=True)
np.savetxt(datapath.parent / (datapath.stem + f'_invalid.txt'),
np.append(np.sort(list(invalid_list)), np.sort(inval + disc)),
fmt='%d')
elif remove_invalid:
_print(f'Identified {len(disc)} disconnected structures, and {len(inval)} '
f'structures with invalid valence!', flush=True)
np.savetxt(datapath.parent / (datapath.stem + f'_invalid.txt'),
np.sort(inval + disc), fmt='%d')
_print('\nCompressing and storing statistics with numpy...')
np.savez_compressed(new_db_path.parent/(new_db_path.stem+f'_statistics.npz'),
stats=stats,
stat_heads=get_count_statistics(get_stat_heads=True))
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
_print(f'Done! Pre-processing needed {h:d}h{m:02d}m{s:02d}s.')
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
preprocess_dataset(**vars(args))
| Python |
3D | rhyan10/G-SchNetOE62 | qm9_filter_generated.py | .py | 59,408 | 1,291 | import numpy as np
import collections
import pickle
import os
import argparse
import openbabel as ob
import pybel
import time
import json
from schnetpack import Properties
from utility_classes import Molecule, ConnectivityCompressor
from utility_functions import run_threaded, print_atom_bond_ring_stats, update_dict
from multiprocessing import Process, Queue
from ase import Atoms
from ase.db import connect
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
main_parser.add_argument('data_path',
help='Path to generated molecules in .mol_dict format, '
'a database called "generated_molecules.db" with the '
'filtered molecules along with computed statistics '
'("generated_molecules_statistics.npz") will be '
'stored in the same directory as the input file/s '
'(if the path points to a directory, all .mol_dict '
'files in the directory will be merged and filtered '
'in one pass)')
main_parser.add_argument('--train_data_path',
help='Path to training data base (if provided, '
'generated molecules can be compared/matched with '
'those in the training data set)',
default=None)
main_parser.add_argument('--model_path',
help='Path of directory containing the model that '
'generated the molecules. It should contain a '
'split.npz file with training data splits and a '
'args.json file with the arguments used during '
'training (if this and --train_data_path '
'are provided, the generated molecules will be '
'filtered for new structures which were not included '
'in the training or validation data)',
default=None)
main_parser.add_argument('--valence',
default=[1,1,3,1, 5,3, 6,4, 7,3, 8,2, 9,1, 14,4, 15,5, 16,6, 17,1, 33,5, 34,6, 35,1, 52,6, 53,1], type=int,
nargs='+',
help='the valence of atom types in the form '
'[type1 valence type2 valence ...] '
'(default: %(default)s)')
main_parser.add_argument('--filters', type=str, nargs='*',
default=['valence', 'disconnected', 'unique'],
choices=['valence', 'disconnected', 'unique'],
help='Select the filters applied to identify '
'invalid molecules (default: %(default)s)')
main_parser.add_argument('--store', type=str, default='valid_connectivity',
choices=['all', 'valid', 'new',
'valid_connectivity',
'new_connectivity'],
help='How much information shall be stored '
'after filtering: \n"all" keeps all '
'generated molecules and statistics '
'including the connectivity matrices,'
'\n"valid" keeps only valid molecules and '
'discards connectivity matrices,\n'
'"new" furthermore discards all validly '
'generated molecules that match training '
'data (corresponds to "valid" if '
'model_path is not provided), '
'\n"new_connectivity" and '
'"valid_connectivity" store only new or '
'valid molecules and the corresponding '
'connectivity matrices '
'(default: %(default)s)')
main_parser.add_argument('--print_file',
help='Use to limit the printing if results are '
'written to a file instead of the console ('
'e.g. if running on a cluster)',
action='store_true')
main_parser.add_argument('--threads', type=int, default=8,
help='Number of threads used (set to 0 to run '
'everything sequentially in the main thread,'
' default: %(default)s)')
return main_parser
def remove_disconnected(connectivity_batch, valid=None):
'''
Identify structures which are actually more than one molecule (as they consist of
disconnected structures) and mark them as invalid.
Args:
connectivity_batch (numpy.ndarray): batch of connectivity matrices
valid (numpy.ndarray, optional): array of the same length as connectivity_batch
which flags molecules as valid, if None all connectivity matrices are
considered to correspond to valid molecules in the beginning (default:
None)
Returns:
dict (str->numpy.ndarray): a dictionary containing an array which marks
molecules as valid under the key 'valid' (identified disconnected
structures will now be marked as invalid in contrast to the flag in input
argument valid)
'''
if valid is None:
valid = np.ones(len(connectivity_batch), dtype=bool)
# find disconnected parts for every given connectivity matrix
for i, con_mat in enumerate(connectivity_batch):
# only work with molecules categorized as valid
if not valid[i]:
continue
seen, queue = {0}, collections.deque([0])
while queue:
vertex = queue.popleft()
for node in np.argwhere(con_mat[vertex] > 0).flatten():
if node not in seen:
seen.add(node)
queue.append(node)
# if the seen nodes do not include all nodes, there are disconnected
# parts and the molecule is invalid
if seen != {*range(len(con_mat))}:
valid[i] = False
return {'valid': valid}
def filter_unique(mols, valid=None, use_bits=False):
'''
Identify duplicate molecules among a large amount of generated structures.
The first found structure of each kind is kept as valid original and all following
duplicating structures are marked as invalid (the molecular fingerprint and
canonical smiles representation is used which means that different spatial
conformers of the same molecular graph cannot be distinguished).
Args:
mols (list of utility_classes.Molecule): list of all generated molecules
valid (numpy.ndarray, optional): array of the same length as mols which flags
molecules as valid (invalid molecules are not considered in the comparison
process), if None, all molecules in mols are considered as valid (default:
None)
use_bits (bool, optional): set True to use the list of non-zero bits instead of
the pybel.Fingerprint object when comparing molecules (results are
identical, default: False)
Returns:
numpy.ndarray: array of the same length as mols which flags molecules as
valid (identified duplicates are now marked as invalid in contrast to the
flag in input argument valid)
numpy.ndarray: array of length n_mols where entry i is -1 if molecule i is
an original structure (not a duplicate) and otherwise it is the index j of
the original structure that molecule i duplicates (j<i)
numpy.ndarray: array of length n_mols that is 0 for all duplicates and the
number of identified duplicates for all original structures (therefore
the sum over this array is the total number of identified duplicates)
'''
if valid is None:
valid = np.ones(len(mols), dtype=bool)
else:
valid = valid.copy()
accepted_dict = {}
duplicating = -np.ones(len(mols), dtype=int)
duplicate_count = np.zeros(len(mols), dtype=int)
for i, mol1 in enumerate(mols):
if not valid[i]:
continue
mol_key = _get_atoms_per_type_str(mol1)
found = False
if mol_key in accepted_dict:
for j, mol2 in accepted_dict[mol_key]:
# compare fingerprints and canonical smiles representation
if mol1.tanimoto_similarity(mol2, use_bits=use_bits) >= 1:
if (mol1.get_can() == mol2.get_can()
or mol1.get_can() == mol2.get_mirror_can()):
found = True
valid[i] = False
duplicating[i] = j
duplicate_count[j] += 1
break
if not found:
accepted_dict = _update_dict(accepted_dict, key=mol_key, val=(i, mol1))
return valid, duplicating, duplicate_count
def filter_unique_threaded(mols, valid=None, n_threads=16,
n_mols_per_thread=5, print_file=True,
prog_str=None):
'''
Identify duplicate molecules among a large amount of generated structures using
multiple CPU-threads. The first found structure of each kind is kept as valid
original and all following duplicating structures are marked as invalid (the
molecular fingerprint and canonical smiles representation is used which means that
different spatial conformers of the same molecular graph cannot be distinguished).
Args:
mols (list of utility_classes.Molecule): list of all generated molecules
valid (numpy.ndarray, optional): array of the same length as mols which flags
molecules as valid (invalid molecules are not considered in the comparison
process), if None, all molecules in mols are considered as valid (default:
None)
n_threads (int, optional): number of additional threads used (default: 16)
n_mols_per_thread (int, optional): number of molecules that are processed by
each thread in each iteration (default: 5)
print_file (bool, optional): set True to suppress printing of progress string
(default: True)
prog_str (str, optional): specify a custom progress string (if None,
no progress will be printed, default: None)
Returns:
numpy.ndarray: array of the same length as mols which flags molecules as
valid (identified duplicates are now marked as invalid in contrast to the
flag in input argument valid)
numpy.ndarray: array of length n_mols where entry i is -1 if molecule i is
an original structure (not a duplicate) and otherwise it is the index j of
the original structure that molecule i duplicates (j<i)
numpy.ndarray: array of length n_mols that is 0 for all duplicates and the
number of identified duplicates for all original structures (therefore
the sum over this array is the total number of identified duplicates)
'''
if valid is None:
valid = np.ones(len(mols), dtype=bool)
else:
valid = valid.copy()
if len(mols) < 3*n_threads*n_mols_per_thread or n_threads == 0:
return filter_unique(mols, valid, use_bits=True)
current = 0
still_valid = np.zeros_like(valid)
working_flag = np.zeros(n_threads, dtype=bool)
duplicating = []
goal = n_threads*n_mols_per_thread
# set up threads and queues
threads = []
qs_in = []
qs_out = []
for i in range(n_threads):
qs_in += [Queue(1)]
qs_out += [Queue(1)]
threads += [Process(target=_filter_worker, name=str(i),
args=(qs_out[-1], qs_in[-1], mols))]
threads[-1].start()
# get first two mini-batches (workers do not need to process first one)
new_idcs, current, dups = _filter_mini_batch(mols, valid, current, goal)
duplicating += dups # maintain list of which molecules are duplicated
newly_accepted = new_idcs
still_valid[newly_accepted] = 1 # trivially accept first batch
newly_accepted_dict = _create_mol_dict(mols, newly_accepted)
new_idcs, current, dups = _filter_mini_batch(mols, valid, current, goal)
duplicating += dups
# submit second mini batch to workers
start = 0
for i, q_out in enumerate(qs_out):
if start >= len(new_idcs):
continue
end = start+n_mols_per_thread
q_out.put((False, newly_accepted_dict, new_idcs[start:end]))
working_flag[i] = 1
start = end
# loop while the worker threads have data to process
k = 1
while np.any(working_flag == 1):
# get new mini batch
new_idcs, current, dups = \
_filter_mini_batch(mols, valid, current, goal)
# gather results from workers
newly_accepted = []
newly_accepted_dict = {}
for i, q_in in enumerate(qs_in):
if working_flag[i]:
returned = q_in.get()
newly_accepted += returned[0]
duplicating += returned[1]
newly_accepted_dict = _update_dict(newly_accepted_dict,
new_dict=returned[2])
working_flag[i] = 0
# submit gathered results and new mini batch molecules to workers
start = 0
for i, q_out in enumerate(qs_out):
if start >= len(new_idcs):
continue
end = start + n_mols_per_thread
q_out.put((False, newly_accepted_dict, new_idcs[start:end]))
working_flag[i] = 1
start = end
# set validity according to gathered data
still_valid[newly_accepted] = 1
duplicating += dups
k += 1
if ((k % 10) == 0 or current >= len(mols)) and not print_file \
and prog_str is not None:
print('\033[K', end='\r', flush=True)
print(f'{prog_str} ({100 * min(current/len(mols), 1):.2f}%)',
end='\r', flush=True)
# stop worker threads and join
for i, q_out in enumerate(qs_out):
q_out.put((True,))
threads[i].join()
threads[i].terminate()
# fix statistics about duplicates
duplicating, duplicate_count = _process_duplicates(duplicating, len(mols))
return still_valid, duplicating, duplicate_count
def _get_atoms_per_type_str(mol):
'''
Get a string representing the atomic composition of a molecule (i.e. the number
of atoms per type in the molecule, e.g. H2C3O1, where the order of types is
determined by increasing nuclear charge).
Args:
mol (utility_classes.Molecule or numpy.ndarray: the molecule (or an array of
its atomic numbers)
Returns:
str: the atomic composition of the molecule
'''
if isinstance(mol, Molecule):
n_atoms_per_type = mol.get_n_atoms_per_type()
else:
# assume atomic numbers were provided
n_atoms_per_type = np.bincount(mol, minlength=10)[
np.array(list(Molecule.type_infos.keys()), dtype=int)]
s = ''
for t, n in zip(Molecule.type_infos.keys(), n_atoms_per_type):
s += f'{Molecule.type_infos[t]["name"]}{int(n):d}'
return s
def _create_mol_dict(mols, idcs=None):
'''
Create a dictionary holding indices of a list of molecules where the key is a
string that represents the atomic composition (i.e. the number of atoms per type in
the molecule, e.g. H2C3O1, where the order of types is determined by increasing
nuclear charge). This is especially useful to speed up the comparison of molecules
as candidate structures with the same composition of atoms can easily be accessed
while ignoring all molecules with different compositions.
Args:
mols (list of utility_classes.Molecule or numpy.ndarray): the molecules or
the atomic numbers of the molecules which are referenced in the dictionary
idcs (list of int, optional): indices of a subset of the molecules in mols that
shall be put into the dictionary (if None, all structures in mol will be
referenced in the dictionary, default: None)
Returns:
dict (str->list of int): dictionary with the indices of molecules in mols
ordered by their atomic composition
'''
if idcs is None:
idcs = range(len(mols))
mol_dict = {}
for idx in idcs:
mol = mols[idx]
mol_key = _get_atoms_per_type_str(mol)
mol_dict = _update_dict(mol_dict, key=mol_key, val=idx)
return mol_dict
def _update_dict(old_dict, **kwargs):
'''
Update an existing dictionary (any->list of any) with new entries where the new
values are either appended to the existing lists if the corresponding key already
exists in the dictionary or a new list under the new key is created.
Args:
old_dict (dict (any->list of any)): original dictionary that shall be updated
**kwargs: keyword arguments that can either be a dictionary of the same format
as old_dict (new_dict=dict (any->list of any)) which will be merged into
old_dict or a single key-value pair that shall be added (key=any, val=any)
Returns:
dict (any->list of any): the updated dictionary
'''
if 'new_dict' in kwargs:
for key in kwargs['new_dict']:
if key in old_dict:
old_dict[key] += kwargs['new_dict'][key]
else:
old_dict[key] = kwargs['new_dict'][key]
if 'val' in kwargs and 'key' in kwargs:
if kwargs['key'] in old_dict:
old_dict[kwargs['key']] += [kwargs['val']]
else:
old_dict[kwargs['key']] = [kwargs['val']]
return old_dict
def _filter_mini_batch(mols, valid, start, amount):
'''
Prepare a mini-batch consisting of unique molecules (with respect to all molecules
in the mini-batch) that can be divided and send to worker functions (see
_filter_worker) to compare them to the database of all original (non-duplicate)
molecules.
Args:
mols (list of utility_classes.Molecule): list of all generated molecules
valid (numpy.ndarray): array of the same length as mols which flags molecules as
valid (invalid molecules are not put into a mini-batch but skipped)
start (int): index of the first molecule in mols that should be put into a
mini-batch
amount (int): the total amount of molecules that shall be put into the
mini-batch (note that the mini-batch can be smaller than amount if all
molecules in mols have been processed already).
Returns:
list of int: list of indices of molecules in mols that have been put into the
mini-batch (i.e. the prepared mini-batch)
int: index of the first molecule in mols that is not yet put into a mini-batch
list of list of int: list of lists where the inner lists have exactly
two integer entries: the first being the index of an identified duplicate
molecule (skipped and not put into the mini-batch) and the second being the
index of the corresponding original molecule (put into the mini-batch)
'''
count = 0
accepted = []
accepted_dict = {}
duplicating = []
max_mol = len(mols)
while count < amount:
if start >= max_mol:
break
if not valid[start]:
start += 1
continue
mol1 = mols[start]
mol_key = _get_atoms_per_type_str(mol1)
found = False
if mol_key in accepted_dict:
for idx in accepted_dict[mol_key]:
mol2 = mols[idx]
if mol1.tanimoto_similarity(mol2, use_bits=True) >= 1:
if (mol1.get_can() == mol2.get_can()
or mol1.get_can() == mol2.get_mirror_can()):
found = True
duplicating += [[start, idx]]
break
if not found:
accepted += [start]
accepted_dict = _update_dict(accepted_dict, key=mol_key, val=start)
count += 1
start += 1
return accepted, start, duplicating
def _filter_worker(q_in, q_out, all_mols):
'''
Worker function for multi-threaded identification of duplicate molecules that
iteratively receives small batches of molecules which it compares to all previously
processed molecules that were identified as originals (non-duplicate structures).
Args:
q_in (multiprocessing.Queue): queue to receive a new job at each iteration
(contains three entries: 1st a flag whether the job is done, 2nd a
dictionary with indices of newly found original structures in the last
iteration, and 3rd a list of indices of candidate molecules that shall be
checked in the current iteration)
q_out (multiprocessing.Queue): queue to send results of the current iteration
(contains three entries: 1st a list with the indices of the candidates
that were identified as originals, 2nd a list of lists where each inner
list holds the index of an identified duplicate structure and the index
of the original structure that it duplicates, and 3rd a dictionary with
the indices of candidates that were identified as originals)
all_mols (list of utility_classes.Molecule): list with all generated molecules
'''
accepted_dict = {}
while True:
data = q_in.get(True)
if data[0]:
break
accepted_dict = _update_dict(accepted_dict, new_dict=data[1])
mols = data[2]
accept = []
accept_dict = {}
duplicating = []
for idx1 in mols:
found = False
mol1 = all_mols[idx1]
mol_key = _get_atoms_per_type_str(mol1)
if mol_key in accepted_dict:
for idx2 in accepted_dict[mol_key]:
mol2 = all_mols[idx2]
if mol1.tanimoto_similarity(mol2, use_bits=True) >= 1:
if (mol1.get_can() == mol2.get_can()
or mol1.get_can() == mol2.get_mirror_can()):
found = True
duplicating += [[idx1, idx2]]
break
if not found:
accept += [idx1]
accept_dict = _update_dict(accept_dict, key=mol_key, val=idx1)
q_out.put((accept, duplicating, accept_dict))
def _process_duplicates(dups, n_mols):
'''
Processes a list of duplicate molecules identified in a multi-threaded run and
infers a proper list with the correct statistics for each molecule (how many
duplicates of the structure are there and which is the first found structure of
that kind)
Args:
dups (list of list of int): list of lists where the inner lists have exactly
two integer entries: the first being the index of an identified duplicate
molecule and the second being the index of the corresponding original
molecule (which can also be a duplicate due to the applied multi-threading
approach, hence this function is needed to identify such cases and fix
the 'original' index to refer to the true original molecule, which is the
first found structure of that kind)
n_mols (int): the overall number of molecules that were examined
Returns:
numpy.ndarray: array of length n_mols where entry i is -1 if molecule i is
an original structure (not a duplicate) and otherwise it is the index j of
the original structure that molecule i duplicates (j<i)
numpy.ndarray: array of length n_mols that is 0 for all duplicates and the
number of identified duplicates for all original structures (therefore
the sum over this array is the total number of identified duplicates)
'''
duplicating = -np.ones(n_mols, dtype=int)
duplicate_count = np.zeros(n_mols, dtype=int)
if len(dups) == 0:
return duplicating, duplicate_count
dups = np.array(dups, dtype=int)
duplicates = dups[:, 0]
originals = dups[:, 1]
duplicating[duplicates] = originals
for original in originals:
wrongly_assigned_originals = []
while duplicating[original] >= 0:
wrongly_assigned_originals += [original]
original = duplicating[original]
duplicating[np.array(wrongly_assigned_originals, dtype=int)] = original
duplicate_count[original] += 1
return duplicating, duplicate_count
def check_valency(positions, numbers, valence, filter_by_valency=True,
print_file=True, prog_str=None, picklable_mols=False):
'''
Build utility_classes.Molecule objects from provided atom positions and types
of a set of molecules and assess whether they are meeting the valency
constraints or not (i.e. all of their atoms have the correct number of bonds).
Note that all input molecules need to have the same number of atoms.
Args:
positions (list of numpy.ndarray): list of positions of atoms in euclidean
space (n_atoms x 3) for each molecule
numbers (numpy.ndarray): list of nuclear charges/types of atoms
(e.g. 1 for hydrogens, 6 for carbons etc.) for each molecule
valence (numpy.ndarray): list of valency of each atom type where the index in
the list corresponds to the type (e.g. [0, 1, 0, 0, 0, 0, 2, 3, 4, 1] for
qm9 molecules as H=type 1 has valency of 1, O=type 6 has valency of 2,
N=type 7 has valency of 3 etc.)
filter_by_valency (bool, optional): whether molecules that fail the valency
check should be marked as invalid, else all input molecules will be
classified as valid but the connectivity matrix is still computed and
returned (default: True)
print_file (bool, optional): set True to suppress printing of progress string
(default: True)
prog_str (str, optional): specify a custom progress string (default: None)
picklable_mols (bool, optional): set True to remove all the information in
the returned list of utility_classes.Molecule objects that can not be
serialized with pickle (e.g. the underlying Open Babel ob.Mol object,
default: False)
Returns:
dict (str->list/numpy.ndarray): a dictionary containing a list of
utility_classes.Molecule ojbects under the key 'mols', a numpy.ndarray with
the corresponding (n_atoms x n_atoms) connectivity matrices under the key
'connectivity', and a numpy.ndarray (key 'valid') that marks whether a
molecule has passed (entry=1) or failed (entry=0) the valency check if
filter_by_valency is True (otherwise it will be 1 everywhere)
'''
n_atoms = len(numbers[0])
n_mols = len(numbers)
thresh = n_mols if n_mols < 30 else 30
connectivity = np.zeros((len(positions), n_atoms, n_atoms))
valid = np.ones(len(positions), dtype=bool)
mols = []
for i, (pos, num) in enumerate(zip(positions, numbers)):
mol = Molecule(pos, num, store_positions=False)
con_mat = mol.get_connectivity()
random_ord = range(len(pos))
# filter incorrect valence if desired
if filter_by_valency:
nums = num
# try to fix connectivity if it isn't correct already
for _ in range(10):
if np.all(np.sum(con_mat, axis=0) == valence[nums]):
val = True
break
else:
val = False
con_mat = mol.get_fixed_connectivity()
if np.all(
np.sum(con_mat, axis=0) == valence[nums]):
val = True
break
random_ord = np.random.permutation(range(len(pos)))
mol = Molecule(pos[random_ord], num[random_ord])
con_mat = mol.get_connectivity()
nums = num[random_ord]
valid[i] = val
if ((i + 1) % thresh == 0) and not print_file \
and prog_str is not None:
print('\033[K', end='\r', flush=True)
print(f'{prog_str} ({100 * (i + 1) / n_mols:.2f}%)',
end='\r', flush=True)
# reverse random order and save fixed connectivity matrix
rand_ord_rev = np.argsort(random_ord)
connectivity[i] = con_mat[rand_ord_rev][:, rand_ord_rev]
if picklable_mols:
mol.get_fp_bits()
mol.get_can()
mol.get_mirror_can()
mol.remove_unpicklable_attributes(restorable=False)
mols += [mol]
return {'mols': mols, 'connectivity': connectivity, 'valid': valid}
def filter_new(mols, stats, stat_heads, model_path, train_data_path, print_file=False,
n_threads=0):
'''
Check whether generated molecules correspond to structures in the training database
used for either training, validation, or as test data and update statistics array of
generated molecules accordingly.
Args:
mols (list of utility_classes.Molecule): generated molecules
stats (numpy.ndarray): statistics of all generated molecules where columns
correspond to molecules and rows correspond to available statistics
(n_statistics x n_molecules)
stat_heads (list of str): the names of the statistics stored in each row in
stats (e.g. 'F' for the number of fluorine atoms or 'R5' for the number of
rings of size 5)
model_path (str): path to the folder containing the trained model used to
generate the molecules
train_data_path (str): full path to the training database
print_file (bool, optional): set True to limit printing (e.g. if it is
redirected to a file instead of displayed in a terminal, default: False)
n_threads (int, optional): number of additional threads to use (default: 0)
Returns:
numpy.ndarray: updated statistics of all generated molecules (stats['known']
is 0 if a generated molecule does not correspond to a structure in the
training database, it is 1 if it corresponds to a training structure,
2 if it corresponds to a validation structure, and 3 if it corresponds to a
test structure, stats['equals'] is -1 if stats['known'] is 0 and otherwise
holds the index of the corresponding training/validation/test structure in
the database at train_data_path)
'''
print(f'\n\n2. Checking which molecules are new...')
idx_known = stat_heads.index('known')
# load training data
dbpath = train_data_path
if not os.path.isfile(dbpath):
print(f'The provided training data base {dbpath} is no file, please specify '
f'the correct path (including the filename and extension)!')
raise FileNotFoundError
print(f'Using data base at {dbpath}...')
split_file = os.path.join(model_path, 'split.npz')
print(split_file)
if not os.path.exists(split_file):
raise FileNotFoundError
S = np.load(split_file)
train_idx = S['train_idx']
val_idx = S['val_idx']
test_idx = S['test_idx']
train_idx = np.append(train_idx, val_idx)
train_idx = np.append(train_idx, test_idx)
# check if subset was used (and restrict indices accordingly)
train_args_path = os.path.join(model_path, f'args.json')
with open(train_args_path) as handle:
train_args = json.loads(handle.read())
if 'subset_path' in train_args:
if train_args['subset_path'] is not None:
subset = np.load(train_args['subset_path'])
train_idx = subset[train_idx]
print('\nComputing fingerprints of training data...')
start_time = time.time()
if n_threads <= 0:
train_fps = _get_training_fingerprints(dbpath, train_idx, print_file,
use_con_mat=True)
else:
train_fps = {'fingerprints': [None for _ in range(len(train_idx))]}
run_threaded(_get_training_fingerprints,
{'train_idx': train_idx},
{'dbpath': dbpath, 'use_bits': True, 'use_con_mat': True},
train_fps,
exclusive_kwargs={'print_file': print_file},
n_threads=n_threads)
train_fps_dict = _get_training_fingerprints_dict(train_fps['fingerprints'])
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
print(f'...{len(train_fps["fingerprints"])} fingerprints computed '
f'in {h:d}h{m:02d}m{s:02d}s!')
print('\nComparing fingerprints...')
start_time = time.time()
if n_threads <= 0:
results = _compare_fingerprints(mols, train_fps_dict, train_idx,
[len(val_idx), len(test_idx)],
stats.T, stat_heads, print_file)
else:
results = {'stats': stats.T}
run_threaded(_compare_fingerprints,
{'mols': mols, 'stats': stats.T},
{'train_idx': train_idx, 'train_fps': train_fps_dict,
'thresh': [len(val_idx), len(test_idx)],
'stat_heads': stat_heads, 'use_bits': True},
results,
exclusive_kwargs={'print_file': print_file},
n_threads=n_threads)
stats = results['stats'].T
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
print(f'... needed {h:d}h{m:02d}m{s:02d}s.')
print(f'Number of new molecules: '
f'{sum(stats[idx_known] == 0)+sum(stats[idx_known] == 3)}')
print(f'Number of molecules matching training data: '
f'{sum(stats[idx_known] == 1)}')
print(f'Number of molecules matching validation data: '
f'{sum(stats[idx_known] == 2)}')
print(f'Number of molecules matching test data: '
f'{sum(stats[idx_known] == 3)}')
return stats
def _get_training_fingerprints(dbpath, train_idx, print_file=True,
use_bits=False, use_con_mat=False):
'''
Get the fingerprints (FP2 from Open Babel), canonical smiles representation,
and atoms per type string of all molecules in the training database.
Args:
dbpath (str): path to the training database
train_idx (list of int): list containing the indices of training, validation,
and test molecules in the database (it is assumed
that train_idx[0:n_train] corresponds to training data,
train_idx[n_train:n_train+n_validation] corresponds to validation data,
and train_idx[n_train+n_validation:] corresponds to test data)
print_file (bool, optional): set True to suppress printing of progress string
(default: True)
use_bits (bool, optional): set True to return the non-zero bits in the
fingerprint instead of the pybel.Fingerprint object (default: False)
use_con_mat (bool, optional): set True to use pre-computed connectivity
matrices (need to be stored in the training database in compressed format
under the key 'con_mat', default: False)
Returns:
dict (str->list of tuple): dictionary with list of tuples under the key
'fingerprints' containing the fingerprint, the canonical smiles representation,
and the atoms per type string of each molecule listed in train_idx (preserving
the order)
'''
train_fps = []
if use_con_mat:
compressor = ConnectivityCompressor()
with connect(dbpath) as conn:
if not print_file:
print('0.00%', end='\r', flush=True)
for i, idx in enumerate(train_idx):
idx = int(idx)
row = conn.get(idx + 1)
at = row.toatoms()
pos = at.positions
atomic_numbers = at.numbers
if use_con_mat:
con_mat = compressor.decompress(row.data['con_mat'])
else:
con_mat = None
train_fps += [get_fingerprint(pos, atomic_numbers,
use_bits, con_mat)]
if (i % 100 == 0 or i + 1 == len(train_idx)) and not print_file:
print('\033[K', end='\r', flush=True)
print(f'{100 * (i + 1) / len(train_idx):.2f}%', end='\r',
flush=True)
return {'fingerprints': train_fps}
def get_fingerprint(pos, atomic_numbers, use_bits=False, con_mat=None):
'''
Compute the molecular fingerprint (Open Babel FP2), canonical smiles
representation, and number of atoms per type (e.g. H2O1) of a molecule.
Args:
pos (numpy.ndarry): positions of the atoms (n_atoms x 3)
atomic_numbers (numpy.ndarray): types of the atoms (n_atoms)
use_bits (bool, optional): set True to return the non-zero bits in the
fingerprint instead of the pybel.Fingerprint object (default: False)
con_mat (numpy.ndarray, optional): connectivity matrix of the molecule
containing the pairwise bond orders between all atoms (n_atoms x n_atoms)
(can be inferred automatically if not provided, default: None)
Returns:
pybel.Fingerprint or set of int: the fingerprint of the molecule or a set
containing the non-zero bits of the fingerprint if use_bits=True
str: the canonical smiles representation of the molecule
str: the atom types contained in the molecule followed by number of
atoms per type, e.g. H2C3O1, ordered by increasing atom type (nuclear
charge)
'''
if con_mat is not None:
mol = Molecule(pos, atomic_numbers, con_mat)
print(mol)
idc_lists = np.where(con_mat !=0)
mol._update_bond_orders(idc_lists)
mol = pybel.Molecule(mol.get_obmol())
else:
obmol = ob.OBMol()
obmol.BeginModify()
for p, n in zip(pos, atomic_numbers):
obatom = obmol.NewAtom()
obatom.SetAtomicNum(int(n))
obatom.SetVector(*p.tolist())
# infer bonds and bond order
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
obmol.EndModify()
mol = pybel.Molecule(obmol)
# use pybel to get fingerprint
if use_bits:
return {*mol.calcfp().bits}, mol.write('can'), \
_get_atoms_per_type_str(atomic_numbers)
else:
return mol.calcfp(), mol.write('can'), \
_get_atoms_per_type_str(atomic_numbers)
def _get_training_fingerprints_dict(fps):
'''
Convert a list of fingerprints into a dictionary where a string describing the
number of types in each molecules (e.g. H2C3O1, ordered by increasing nuclear
charge) is used as a key (allows for faster comparison of molecules as only those
made of the same atoms can be identical).
Args:
fps (list of tuple): list containing tuples as returned by the get_fingerprint
function (holding the fingerprint, canonical smiles representation, and the
atoms per type string)
Returns:
dict (str->list of tuple): dictionary containing lists of tuples holding the
molecular fingerprint, the canonical smiles representation, and the index
of the molecule in the input list using the atoms per type string of the
molecules as key (such that fingerprint tuples of all molecules with the
exact same atom composition, e.g. H2C3O1, are stored together in one list)
'''
fp_dict = {}
for i, fp in enumerate(fps):
fp_dict = _update_dict(fp_dict, key=fp[-1], val=fp[:-1]+(i,))
return fp_dict
def _compare_fingerprints(mols, train_fps, train_idx, thresh, stats,
stat_heads, print_file=True, use_bits=False,
max_heavy_atoms=9):
'''
Compare fingerprints of generated and training data molecules to update the
statistics of the generated molecules (to which training/validation/test
molecule it corresponds, if any).
Args:
mols (list of utility_classes.Molecule): generated molecules
train_fps (dict (str->list of tuple)): dictionary with fingerprints of
training/validation/test data as returned by _get_training_fingerprints_dict
train_idx (list of int): list that maps the index of fingerprints in the
train_fps dict to indices of the underlying training database (it is assumed
that train_idx[0:n_train] corresponds to training data,
train_idx[n_train:n_train+n_validation] corresponds to validation data,
and train_idx[n_train+n_validation:] corresponds to test data)
thresh (tuple of int): tuple containing the number of validation and test
data molecules (n_validation, n_test)
stats (numpy.ndarray): statistics of all generated molecules where columns
correspond to molecules and rows correspond to available statistics
(n_statistics x n_molecules)
stat_heads (list of str): the names of the statistics stored in each row in
stats (e.g. 'F' for the number of fluorine atoms or 'R5' for the number of
rings of size 5)
print_file (bool, optional): set True to limit printing (e.g. if it is
redirected to a file instead of displayed in a terminal, default: True)
use_bits (bool, optional): set True if the fingerprint is provided as a list of
non-zero bits instead of the pybel.Fingerprint object (default: False)
max_heavy_atoms (int, optional): the maximum number of heavy atoms in the
training data set (i.e. 9 for qm9, default: 9)
Returns:
dict (str->numpy.ndarray): dictionary containing the updated statistics under
the key 'stats'
'''
idx_known = stat_heads.index('known')
idx_equals = stat_heads.index('equals')
idx_val = stat_heads.index('valid')
n_val_mols, n_test_mols = thresh
# get indices of valid molecules
idcs = np.where(stats[:, idx_val] == 1)[0]
if not print_file:
print(f'0.00%', end='', flush=True)
for i, idx in enumerate(idcs):
mol = mols[idx]
mol_key = _get_atoms_per_type_str(mol)
# for now the molecule is considered to be new
stats[idx, idx_known] = 0
if np.sum(mol.numbers != 1) > max_heavy_atoms:
continue # cannot be in dataset
if mol_key in train_fps:
for fp_train in train_fps[mol_key]:
# compare fingerprint
if mol.tanimoto_similarity(fp_train[0], use_bits=use_bits) >= 1:
# compare canonical smiles representation
if (mol.get_can() == fp_train[1]
or mol.get_mirror_can() == fp_train[1]):
# store index of match
j = fp_train[-1]
stats[idx, idx_equals] = train_idx[j]
if j >= len(train_idx) - np.sum(thresh):
if j > len(train_idx) - n_test_mols:
stats[idx, idx_known] = 3 # equals test data
else:
stats[idx, idx_known] = 2 # equals validation data
else:
stats[idx, idx_known] = 1 # equals training data
break
if not print_file:
print('\033[K', end='\r', flush=True)
print(f'{100 * (i + 1) / len(idcs):.2f}%', end='\r',
flush=True)
if not print_file:
print('\033[K', end='', flush=True)
return {'stats': stats}
def collect_bond_and_ring_stats(mols, stats, stat_heads):
'''
Compute the bond and ring counts of a list of molecules and write them to the
provided array of statistics if it contains the corresponding fields (e.g. 'R3'
for rings of size 3 or 'C1N' for single bonded carbon-nitrogen pairs). Note that
only statistics of molecules marked as 'valid' in the stats array are computed and
that only those statistics will be stored, which already have columns in the stats
array named accordingly in stat_heads (e.g. if 'R5' for rings of size 5 is not
included in stat_heads, the number of rings of size 5 will not be stored in the
stats array for the provided molecules).
Args:
mols (list of utiltiy_classes.Molecule): list of molecules for which bond and
ring statistics are computed
stats (numpy.ndarray): statistics of all molecules where columns
correspond to molecules and rows correspond to available statistics
(n_statistics x n_molecules)
stat_heads (list of str): the names of the statistics stored in each row in
stats (e.g. 'F' for the number of fluorine atoms or 'R5' for the number of
rings of size 5)
Returns:
dict (str->numpy.ndarray): dictionary containing the updated statistics array
under 'stats'
'''
idx_val = stat_heads.index('valid')
for i, mol in enumerate(mols):
if stats[i, idx_val] != 1:
continue
bond_stats = mol.get_bond_stats()
for key, value in bond_stats.items():
if key not in stat_heads:
continue
idx = stat_heads.index(key)
stats[i, idx] = value
return {'stats': stats}
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
print_file = args.print_file
# read input file or fuse dictionaries if data_path is a folder
if not os.path.isdir(args.data_path):
if not os.path.isfile(args.data_path):
print(f'\n\nThe specified data path ({args.data_path}) is neither a file '
f'nor a directory! Please specify a different data path.')
raise FileNotFoundError
else:
with open(args.data_path, 'rb') as f:
res = pickle.load(f) # read input file
target_db = os.path.join(os.path.dirname(args.data_path),
'generated_molecules.db')
else:
print(f'\n\nFusing .mol_dict files in folder {args.data_path}...')
mol_files = [f for f in os.listdir(args.data_path)
if f.endswith(".mol_dict")]
if len(mol_files) == 0:
print(f'Could not find any .mol_dict files at {args.data_path}! Please '
f'specify a different data path!')
raise FileNotFoundError
res = {}
for file in mol_files:
with open(os.path.join(args.data_path, file), 'rb') as f:
cur_res = pickle.load(f)
update_dict(res, cur_res)
res = dict(sorted(res.items())) # sort dictionary keys
print(f'...done!')
target_db = os.path.join(args.data_path, 'generated_molecules.db')
# compute array with valence of provided atom types
max_type = max(args.valence[::2])
valence = np.zeros(max_type+1, dtype=int)
valence[args.valence[::2]] = args.valence[1::2]
# print the chosen settings
valence_str = ''
for i in range(max_type+1):
if valence[i] > 0:
valence_str += f'type {i}: {valence[i]}, '
filters = []
if 'valence' in args.filters:
filters += ['valency']
if 'disconnected' in args.filters:
filters += ['connectedness']
if 'unique' in args.filters:
filters += ['uniqueness']
if len(filters) >= 3:
edit = ', '
else:
edit = ' '
for i in range(len(filters) - 1):
filters[i] = filters[i] + edit
if len(filters) >= 2:
filters = filters[:-1] + ['and '] + filters[-1:]
string = ''.join(filters)
print(valence_str)
print(f'\n\n1. Filtering molecules according to {string}...')
print(f'\nTarget valence:\n{valence_str[:-2]}\n')
# initial setup of array for statistics and some counters
n_generated = 0
n_valid = 0
n_non_unique = 0
stat_heads = ['n_atoms', 'id', 'valid', 'duplicating', 'n_duplicates',
'known', 'equals', 'C', 'N', 'O', 'F', 'H', 'H1C', 'H1N',
'H1O', 'C1C', 'C2C', 'C3C', 'C1N', 'C2N', 'C3N', 'C1O',
'C2O', 'C1F', 'N1N', 'N2N', 'N1O', 'N2O', 'N1F', 'O1O',
'O1F', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R>8']
stats = np.empty((len(stat_heads), 0))
all_mols = []
connectivity_compressor = ConnectivityCompressor()
# construct connectivity matrix and fingerprints for filtering
start_time = time.time()
for n_atoms in res:
if not isinstance(n_atoms, int) or n_atoms == 0:
continue
prog_str = lambda x: f'Checking {x} for molecules of length {n_atoms}'
work_str = 'valence' if 'valence' in args.filters else 'dictionary'
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str(work_str) + ' (0.00%)', end='\r', flush=True)
else:
print(prog_str(work_str), flush=True)
d = res[n_atoms]
all_pos = d[Properties.R]
all_numbers = d[Properties.Z]
n_mols = len(all_pos)
# check valency
if args.threads <= 0:
results = check_valency(all_pos, all_numbers, valence,
'valence' in args.filters, print_file,
prog_str(work_str))
else:
results = {'connectivity': np.zeros((n_mols, n_atoms, n_atoms)),
'mols': [None for _ in range(n_mols)],
'valid': np.ones(n_mols, dtype=bool)}
results = run_threaded(check_valency,
{'positions': all_pos,
'numbers': all_numbers},
{'valence': valence,
'filter_by_valency': 'valence' in args.filters,
'picklable_mols': True,
'prog_str': prog_str(work_str)},
results,
n_threads=args.threads,
exclusive_kwargs={'print_file': print_file})
connectivity = results['connectivity']
mols = results['mols']
valid = results['valid']
# detect molecules with disconnected parts if desired
if 'disconnected' in args.filters:
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str("connectedness")+'...', end='\r', flush=True)
if args.threads <= 0:
valid = remove_disconnected(connectivity, valid)['valid']
else:
results = {'valid': valid}
run_threaded(remove_disconnected,
{'connectivity_batch': connectivity,
'valid': valid},
{},
results,
n_threads=args.threads)
valid = results['valid']
# identify molecules with identical fingerprints
if not print_file:
print('\033[K', end='\r', flush=True)
print(prog_str('uniqueness')+'...', end='\r', flush=True)
if args.threads <= 0:
still_valid, duplicating, duplicate_count = \
filter_unique(mols, valid, use_bits=False)
else:
still_valid, duplicating, duplicate_count = \
filter_unique_threaded(mols, valid,
n_threads=args.threads,
n_mols_per_thread=5,
print_file=print_file,
prog_str=prog_str('uniqueness'))
n_non_unique += np.sum(duplicate_count)
if 'unique' in args.filters:
valid = still_valid # remove non-unique from valid if desired
# store connectivity matrices
d.update({'connectivity': connectivity_compressor.compress_batch(connectivity),
'valid': valid})
# collect statistics of generated data
n_generated += len(valid)
n_valid += np.sum(valid)
n_of_types = [np.sum(all_numbers == i, axis=1) for i in
[6, 7, 8, 9, 1]]
stats_new = np.stack(
(np.ones(len(valid)) * n_atoms, # n_atoms
np.arange(0, len(valid)), # id
valid, # valid
duplicating, # id of duplicated molecule
duplicate_count, # number of duplicates
-np.ones(len(valid)), # known
-np.ones(len(valid)), # equals
*n_of_types, # n_atoms per type
*np.zeros((19, len(valid))), # n_bonds per type pairs
*np.zeros((7, len(valid))) # ring counts for 3-8 & >8
),
axis=0)
stats = np.hstack((stats, stats_new))
all_mols += mols
if not print_file:
print('\033[K', end='\r', flush=True)
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
print(f'Needed {h:d}h{m:02d}m{s:02d}s.')
if args.threads <= 0:
results = collect_bond_and_ring_stats(all_mols, stats.T, stat_heads)
else:
results = {'stats': stats.T}
run_threaded(collect_bond_and_ring_stats,
{'mols': all_mols, 'stats': stats.T},
{'stat_heads': stat_heads},
results=results,
n_threads=args.threads)
stats = results['stats'].T
# store statistics
res.update({'n_generated': n_generated,
'n_valid': n_valid,
'stats': stats,
'stat_heads': stat_heads})
print(f'Number of generated molecules: {n_generated}\n'
f'Number of duplicate molecules: {n_non_unique}')
if 'unique' in args.filters:
print(f'Number of unique and valid molecules: {n_valid}')
else:
print(f'Number of valid molecules (including duplicates): {n_valid}')
# filter molecules which were seen during training
if args.model_path is not None:
stats = filter_new(all_mols, stats, stat_heads, args.model_path,
args.train_data_path, print_file=print_file,
n_threads=args.threads)
res.update({'stats': stats})
# shrink results dictionary (remove invalid attempts, known molecules and
# connectivity matrices if desired)
if args.store != 'all':
shrunk_res = {}
shrunk_stats = np.empty((len(stats), 0))
i = 0
for key in res:
if isinstance(key, str):
shrunk_res[key] = res[key]
continue
if key == 0:
continue
d = res[key]
start = i
end = i + len(d['valid'])
idcs = np.where(d['valid'])[0]
if len(idcs) < 1:
i = end
continue
# shrink stats
idx_id = stat_heads.index('id')
idx_known = stat_heads.index('known')
new_stats = stats[:, start:end]
if 'new' in args.store and args.model_path is not None:
idcs = idcs[np.where(new_stats[idx_known, idcs] == 0)[0]]
new_stats = new_stats[:, idcs]
new_stats[idx_id] = np.arange(len(new_stats[idx_id])) # adjust ids
shrunk_stats = np.hstack((shrunk_stats, new_stats))
# shrink positions and atomic numbers
shrunk_res[key] = {Properties.R: d[Properties.R][idcs],
Properties.Z: d[Properties.Z][idcs]}
# store connectivity matrices if desired
if 'connectivity' in args.store:
shrunk_res[key].update(
{'connectivity': [d['connectivity'][k] for k in idcs]})
i = end
shrunk_res['stats'] = shrunk_stats
res = shrunk_res
# store results in new database
# get filename that is not yet taken for db
if os.path.isfile(target_db):
file_name, _ = os.path.splitext(target_db)
expand = 0
while True:
expand += 1
new_file_name = file_name + '_' + str(expand)
if os.path.isfile(new_file_name + '.db'):
continue
else:
target_db = new_file_name + '.db'
break
# open db
with connect(target_db) as conn:
# store metadata
conn.metadata = {'n_generated': int(n_generated),
'n_non_unique': int(n_non_unique),
'n_valid': int(n_valid),
'non_unique_removed_from_valid': 'unique' in args.filters}
# store molecules
for n_atoms in res:
if isinstance(n_atoms, str) or n_atoms == 0:
continue
d = res[n_atoms]
all_pos = d[Properties.R]
all_numbers = d[Properties.Z]
all_con_mats = d['connectivity']
for pos, num, con_mat in zip(all_pos, all_numbers, all_con_mats):
at = Atoms(num, positions=pos)
conn.write(at, data={'con_mat': con_mat})
# store gathered statistics in separate file
np.savez_compressed(os.path.splitext(target_db)[0] + f'_statistics.npz',
stats=res['stats'], stat_heads=res['stat_heads'])
# print average atom, bond, and ring count statistics of generated molecules
# stored in the database and of the training molecules
print(target_db)
print(args.model_path)
print(args.train_path)
print_atom_bond_ring_stats(target_db, args.model_path, args.train_data_path)
| Python |
3D | rhyan10/G-SchNetOE62 | gschnet_script.py | .py | 36,197 | 772 | import argparse
import logging
import os
import pickle
import time
from shutil import copyfile, rmtree
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data.sampler import RandomSampler
from ase import Atoms
import ase.visualize as asv
import schnetpack as spk
from schnetpack.utils import count_params, to_json, read_from_json
from schnetpack import Properties
from schnetpack.datasets import DownloadableAtomsData
from nn_classes import AtomwiseWithProcessing, EmbeddingMultiplication,\
NormalizeAndAggregate, KLDivergence
from utility_functions import boolean_string, collate_atoms, generate_molecules, \
update_dict, get_dict_count
# add your own dataset classes here:
from qm9_data import QM9gen
from template_data import TemplateData
dataset_name_to_class_mapping = {'qm9': QM9gen,
'template_data': TemplateData}
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
## command-specific
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_parser.add_argument('--cuda', help='Set flag to use GPU(s)',
action='store_true')
cmd_parser.add_argument('--parallel',
help='Run data-parallel on all available GPUs '
'(specify with environment variable'
+ ' CUDA_VISIBLE_DEVICES)',
action='store_true')
cmd_parser.add_argument('--batch_size', type=int,
help='Mini-batch size for training and prediction '
'(default: %(default)s)',
default=5)
cmd_parser.add_argument('--draw_random_samples', type=int, default=0,
help='Only draw x generation steps per molecule '
'in each batch (if x=0, all generation '
'steps are included for each molecule,'
'default: %(default)s)')
cmd_parser.add_argument('--checkpoint', type=int, default=-1,
help='The checkpoint of the model that is going '
'to be loaded for evaluation or generation '
'(set to -1 to load the best model '
'according to validation error, '
'default: %(default)s)')
cmd_parser.add_argument('--precompute_distances', type=boolean_string,
default='true',
help='Store precomputed distances in the database '
'during pre-processing (caution, has no effect if '
'the dataset has already been downloaded, '
'pre-processed, and stored before, '
'default: %(default)s)')
## training
train_parser = argparse.ArgumentParser(add_help=False,
parents=[cmd_parser])
train_parser.add_argument('datapath',
help='Path / destination of dataset '\
'directory')
train_parser.add_argument('modelpath',
help='Destination for models and logs')
train_parser.add_argument('--dataset_name', type=str, default='qm9',
help=f'Name of the dataset used (choose from '
f'{list(dataset_name_to_class_mapping.keys())}, '
f'default: %(default)s)'),
train_parser.add_argument('--subset_path', type=str,
help='A path to a npy file containing indices '
'of a subset of the data set at datapath '
'(default: %(default)s)',
default=None)
train_parser.add_argument('--seed', type=int, default=None,
help='Set random seed for torch and numpy.')
train_parser.add_argument('--overwrite',
help='Remove previous model directory.',
action='store_true')
train_parser.add_argument('--pretrained_path',
help='Start training from the pre-trained model at the '
'provided path (reset optimizer parameters such as '
'best loss and learning rate and create new split)',
default=None)
train_parser.add_argument('--split_path',
help='Path/destination of npz with data splits',
default=None)
train_parser.add_argument('--split',
help='Split into [train] [validation] and use '
'remaining for testing',
type=int, nargs=2, default=[None, None])
train_parser.add_argument('--max_epochs', type=int,
help='Maximum number of training epochs '
'(default: %(default)s)',
default=500)
train_parser.add_argument('--lr', type=float,
help='Initial learning rate '
'(default: %(default)s)',
default=1e-4)
train_parser.add_argument('--lr_patience', type=int,
help='Epochs without improvement before reducing'
' the learning rate (default: %(default)s)',
default=10)
train_parser.add_argument('--lr_decay', type=float,
help='Learning rate decay '
'(default: %(default)s)',
default=0.5)
train_parser.add_argument('--lr_min', type=float,
help='Minimal learning rate '
'(default: %(default)s)',
default=1e-6)
train_parser.add_argument('--logger',
help='Choose logger for training process '
'(default: %(default)s)',
choices=['csv', 'tensorboard'],
default='tensorboard')
train_parser.add_argument('--log_every_n_epochs', type=int,
help='Log metrics every given number of epochs '
'(default: %(default)s)',
default=1)
train_parser.add_argument('--checkpoint_every_n_epochs', type=int,
help='Create checkpoint every given number of '
'epochs'
'(default: %(default)s)',
default=25)
train_parser.add_argument('--label_width_factor', type=float,
help='A factor that is multiplied with the '
'range between two distance bins in order '
'to determine the width of the Gaussians '
'used to obtain labels from distances '
'(set to 0. to use one-hot '
'encodings of distances as labels, '
'default: %(default)s)',
default=0.1)
## evaluation
eval_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])
eval_parser.add_argument('datapath', help='Path of dataset directory')
eval_parser.add_argument('modelpath', help='Path of stored model')
eval_parser.add_argument('--split',
help='Evaluate trained model on given split',
choices=['train', 'validation', 'test'],
default=['test'], nargs='+')
## molecule generation
gen_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])
gen_parser.add_argument('modelpath', help='Path of stored model')
gen_parser.add_argument('amount_gen', type=int,
help='The amount of generated molecules')
gen_parser.add_argument('--show_gen',
help='Whether to open plots of generated '
'molecules for visual evaluation',
action='store_true')
gen_parser.add_argument('--chunk_size', type=int,
help='The size of mini batches during generation '
'(default: %(default)s)',
default=1000)
gen_parser.add_argument('--max_length', type=int,
help='The maximum number of atoms per molecule '
'(default: %(default)s)',
default=100)
gen_parser.add_argument('--file_name', type=str,
help='The name of the file in which generated '
'molecules are stored (please note that '
'increasing numbers are appended to the file name '
'if it already exists and that the extension '
'.mol_dict is automatically added to the chosen '
'file name, default: %(default)s)',
default='generated')
gen_parser.add_argument('--store_unfinished',
help='Store molecules which have not been '
'finished after sampling max_length atoms',
action='store_true')
gen_parser.add_argument('--print_file',
help='Use to limit the printing if results are '
'written to a file instead of the console ('
'e.g. if running on a cluster)',
action='store_true')
gen_parser.add_argument('--temperature', type=float,
help='The temperature T to use for sampling '
'(default: %(default)s)',
default=0.1)
# model-specific parsers
model_parser = argparse.ArgumentParser(add_help=False)
model_parser.add_argument('--aggregation_mode', type=str, default='sum',
choices=['sum', 'avg'],
help=' (default: %(default)s)')
####### G-SchNet #######
gschnet_parser = argparse.ArgumentParser(add_help=False,
parents=[model_parser])
gschnet_parser.add_argument('--features', type=int,
help='Size of atom-wise representation '
'(default: %(default)s)',
default=128)
gschnet_parser.add_argument('--interactions', type=int,
help='Number of regular SchNet interaction '
'blocks (default: %(default)s)',
default=9)
gschnet_parser.add_argument('--cutoff', type=float, default=10.,
help='Cutoff radius of local environment '
'(default: %(default)s)')
gschnet_parser.add_argument('--num_gaussians', type=int, default=25,
help='Number of Gaussians to expand distances '
'(default: %(default)s)')
gschnet_parser.add_argument('--max_distance', type=float, default=15.,
help='Maximum distance covered by the discrete '
'distributions over distances learned by '
'the model '
'(default: %(default)s)')
gschnet_parser.add_argument('--num_distance_bins', type=int, default=300,
help='Number of bins used in the discrete '
'distributions over distances learned by '
'the model(default: %(default)s)')
gschnet_parser.add_argument('--use_embeddings_for_type_predictions',
help='Copy extracted features and multiply them with '
'embeddings of all possible types to obtain '
'scores.',
action='store_true')
gschnet_parser.add_argument('--share_embeddings',
help='Share embedding layers in SchNet part and in '
'pre-processing before predicting distances and '
'types.',
action='store_true')
## setup subparser structure
cmd_subparsers = main_parser.add_subparsers(dest='mode',
help='Command-specific '
'arguments')
cmd_subparsers.required = True
subparser_train = cmd_subparsers.add_parser('train', help='Training help')
subparser_eval = cmd_subparsers.add_parser('eval', help='Eval help')
subparser_gen = cmd_subparsers.add_parser('generate', help='Generate help')
train_subparsers = subparser_train.add_subparsers(dest='model',
help='Model-specific '
'arguments')
train_subparsers.required = True
train_subparsers.add_parser('gschnet', help='G-SchNet help',
parents=[train_parser, gschnet_parser])
eval_subparsers = subparser_eval.add_subparsers(dest='model',
help='Model-specific '
'arguments')
eval_subparsers.required = True
eval_subparsers.add_parser('gschnet', help='G-SchNet help',
parents=[eval_parser, gschnet_parser])
gen_subparsers = subparser_gen.add_subparsers(dest='model',
help='Model-specific '
'arguments')
gen_subparsers.required = True
gen_subparsers.add_parser('gschnet', help='G-SchNet help',
parents=[gen_parser, gschnet_parser])
return main_parser
def get_model(args, parallelize=False):
# get information about the atom types available in the data set
dataclass = dataset_name_to_class_mapping[args.dataset_name]
num_types = len(dataclass.available_atom_types)
max_type = max(dataclass.available_atom_types)
# get SchNet layers for feature extraction
representation = \
spk.representation.SchNet(n_atom_basis=args.features,
n_filters=args.features,
n_interactions=args.interactions,
cutoff=args.cutoff,
n_gaussians=args.num_gaussians,
max_z=max_type + 3)
if args.share_embeddings:
emb_layers = representation.embedding
else:
emb_layers = nn.Embedding(max_type + 3, args.features, padding_idx=0)
# get output layers for prediction of next atom type
if args.use_embeddings_for_type_predictions:
preprocess_type = \
EmbeddingMultiplication(emb_layers,
in_key_types='_all_types',
in_key_representation='representation',
out_key='preprocessed_representation')
_n_out = 1
else:
preprocess_type = None
_n_out = num_types + 1 # number of possible types + stop token
postprocess_type = NormalizeAndAggregate(normalize=True,
normalization_axis=-1,
normalization_mode='logsoftmax',
aggregate=True,
aggregation_axis=-2,
aggregation_mode='sum',
keepdim=False,
mask='_type_mask',
squeeze=True)
out_module_type = \
AtomwiseWithProcessing(n_in=args.features,
n_out=_n_out,
n_layers=5,
preprocess_layers=preprocess_type,
postprocess_layers=postprocess_type,
out_key='type_predictions')
# get output layers for predictions of distances
preprocess_dist = \
EmbeddingMultiplication(emb_layers,
in_key_types='_next_types',
in_key_representation='representation',
out_key='preprocessed_representation')
out_module_dist = \
AtomwiseWithProcessing(n_in=args.features,
n_out=args.num_distance_bins,
n_layers=5,
preprocess_layers=preprocess_dist,
out_key='distance_predictions')
# combine layers into an atomistic model
model = spk.atomistic.AtomisticModel(representation,
[out_module_type, out_module_dist])
if parallelize:
model = nn.DataParallel(model)
logging.info("The model you built has: %d parameters" %
count_params(model))
return model
def train(args, model, train_loader, val_loader, device):
# setup hooks and logging
hooks = [
spk.hooks.MaxEpochHook(args.max_epochs)
]
# filter for trainable parameters
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
# setup optimizer
optimizer = Adam(trainable_params, lr=args.lr)
schedule = spk.hooks.ReduceLROnPlateauHook(optimizer,
patience=args.lr_patience,
factor=args.lr_decay,
min_lr=args.lr_min,
window_length=1,
stop_after_min=True)
hooks.append(schedule)
# set up metrics to log KL divergence on distributions of types and distances
metrics = [KLDivergence(target='_type_labels',
model_output='type_predictions',
name='KLD_types'),
KLDivergence(target='_labels',
model_output='distance_predictions',
mask='_dist_mask',
name='KLD_dists')]
if args.logger == 'csv':
logger =\
spk.hooks.CSVHook(os.path.join(args.modelpath, 'log'),
metrics,
every_n_epochs=args.log_every_n_epochs)
hooks.append(logger)
elif args.logger == 'tensorboard':
logger =\
spk.hooks.TensorboardHook(os.path.join(args.modelpath, 'log'),
metrics,
every_n_epochs=args.log_every_n_epochs)
hooks.append(logger)
norm_layer = nn.LogSoftmax(-1).to(device)
loss_layer = nn.KLDivLoss(reduction='none').to(device)
# setup loss function
def loss(batch, result):
# loss for type predictions (KLD)
out_type = norm_layer(result['type_predictions'])
loss_type = loss_layer(out_type, batch['_type_labels'])
loss_type = torch.sum(loss_type, -1)
loss_type = torch.mean(loss_type)
# loss for distance predictions (KLD)
mask_dist = batch['_dist_mask']
N = torch.sum(mask_dist)
out_dist = norm_layer(result['distance_predictions'])
loss_dist = loss_layer(out_dist, batch['_labels'])
loss_dist = torch.sum(loss_dist, -1)
loss_dist = torch.sum(loss_dist * mask_dist) / torch.max(N, torch.ones_like(N))
return loss_type + loss_dist
# initialize trainer
trainer = spk.train.Trainer(args.modelpath,
model,
loss,
optimizer,
train_loader,
val_loader,
hooks=hooks,
checkpoint_interval=args.checkpoint_every_n_epochs,
keep_n_checkpoints=10)
# reset optimizer and hooks if starting from pre-trained model (e.g. for
# fine-tuning)
if args.pretrained_path is not None:
logging.info('starting from pre-trained model...')
# reset epoch and step
trainer.epoch = 0
trainer.step = 0
trainer.best_loss = float('inf')
# reset optimizer
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = Adam(trainable_params, lr=args.lr)
trainer.optimizer = optimizer
# reset scheduler
schedule =\
spk.hooks.ReduceLROnPlateauHook(optimizer,
patience=args.lr_patience,
factor=args.lr_decay,
min_lr=args.lr_min,
window_length=1,
stop_after_min=True)
trainer.hooks[1] = schedule
# remove checkpoints of pre-trained model
rmtree(os.path.join(args.modelpath, 'checkpoints'))
os.makedirs(os.path.join(args.modelpath, 'checkpoints'))
# store first checkpoint
trainer.store_checkpoint()
# start training
trainer.train(device)
def evaluate(args, model, train_loader, val_loader, test_loader, device):
header = ['Subset', 'distances KLD', 'types KLD']
metrics = [KLDivergence(target='_labels',
model_output='distance_predictions',
mask='_dist_mask'),
KLDivergence(target='_type_labels',
model_output='type_predictions')]
results = []
if 'train' in args.split:
results.append(['training'] +
['%.5f' % i for i in
evaluate_dataset(metrics, model,
train_loader, device)])
if 'validation' in args.split:
results.append(['validation'] +
['%.5f' % i for i in
evaluate_dataset(metrics, model,
val_loader, device)])
if 'test' in args.split:
results.append(['test'] + ['%.5f' % i for i in evaluate_dataset(
metrics, model, test_loader, device)])
header = ','.join(header)
results = np.array(results)
np.savez("prediction.npz",results)
np.savetxt(os.path.join(args.modelpath, 'evaluation.csv'), results,
header=header, fmt='%s', delimiter=',')
def evaluate_dataset(metrics, model, loader, device):
for metric in metrics:
metric.reset()
for batch in loader:
batch = {
k: v.to(device)
for k, v in batch.items()
}
result = model(batch)
for metric in metrics:
metric.add_batch(batch, result)
results = [
metric.aggregate() for metric in metrics
]
return results
def generate(args, train_args, model, device):
# generate molecules (in chunks) and print progress
dataclass = dataset_name_to_class_mapping[train_args.dataset_name]
types = sorted(dataclass.available_atom_types) # retrieve available atom types
all_types = types + [types[-1] + 1] # add stop token to list (largest type + 1)
start_token = types[-1] + 2 # define start token (largest type + 2)
amount = args.amount_gen
chunk_size = args.chunk_size
if chunk_size >= amount:
chunk_size = amount
# set parameters for printing progress
if int(amount / 10.) < chunk_size:
step = chunk_size
else:
step = int(amount / 10.)
increase = lambda x, y: y + step if x >= y else y
thresh = step
if args.print_file:
progress = lambda x, y: print(f'Generated {x}.', flush=True) \
if x >= y else print('', end='', flush=True)
else:
progress = lambda x, y: print(f'\x1b[2K\rSuccessfully generated'
f' {x}', end='', flush=True)
# generate
generated = {}
left = args.amount_gen
done = 0
start_time = time.time()
with torch.no_grad():
while left > 0:
if left - chunk_size < 0:
batch = left
else:
batch = chunk_size
update_dict(generated,
generate_molecules(
batch,
model,
all_types=all_types,
start_token=start_token,
max_length=args.max_length,
save_unfinished=args.store_unfinished,
device=device,
max_dist=train_args.max_distance,
n_bins=train_args.num_distance_bins,
radial_limits=dataclass.radial_limits,
t=args.temperature)
)
left -= batch
done += batch
n = np.sum(get_dict_count(generated, args.max_length))
progress(n, thresh)
thresh = increase(n, thresh)
print('')
end_time = time.time() - start_time
m, s = divmod(end_time, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
print(f'Time consumed: {h:d}:{m:02d}:{s:02d}')
# sort keys in resulting dictionary
generated = dict(sorted(generated.items()))
# show generated molecules and print some statistics if desired
if args.show_gen:
ats = []
n_total_atoms = 0
n_molecules = 0
for key in generated:
n = 0
for i in range(len(generated[key][Properties.Z])):
at = Atoms(generated[key][Properties.Z][i],
positions=generated[key][Properties.R][i])
ats += [at]
n += 1
n_molecules += 1
n_total_atoms += n * key
asv.view(ats)
print(f'Total number of atoms placed: {n_total_atoms} '
f'(avg {n_total_atoms / n_molecules:.2f})', flush=True)
return generated
def main(args):
# set device (cpu or gpu)
device = torch.device('cuda' if args.cuda else 'cpu')
# store (or load) arguments
argparse_dict = vars(args)
jsonpath = os.path.join(args.modelpath, 'args.json')
if args.mode == 'train':
# overwrite existing model if desired
if args.overwrite and os.path.exists(args.modelpath):
rmtree(args.modelpath)
logging.info('existing model will be overwritten...')
# create model directory if it does not exist
if not os.path.exists(args.modelpath):
os.makedirs(args.modelpath)
# get latest checkpoint of pre-trained model if a path was provided
if args.pretrained_path is not None:
model_chkpt_path = os.path.join(args.modelpath, 'checkpoints')
pretrained_chkpt_path = os.path.join(args.pretrained_path, 'checkpoints')
if os.path.exists(model_chkpt_path) \
and len(os.listdir(model_chkpt_path)) > 0:
logging.info(f'found existing checkpoints in model directory '
f'({model_chkpt_path}), please use --overwrite or choose '
f'empty model directory to start from a pre-trained '
f'model...')
logging.warning(f'will ignore pre-trained model and start from latest '
f'checkpoint at {model_chkpt_path}...')
args.pretrained_path = None
else:
logging.info(f'fetching latest checkpoint from pre-trained model at '
f'{pretrained_chkpt_path}...')
if not os.path.exists(pretrained_chkpt_path):
logging.warning(f'did not find checkpoints of pre-trained model, '
f'will train from scratch...')
args.pretrained_path = None
else:
chkpt_files = [f for f in os.listdir(pretrained_chkpt_path)
if f.startswith("checkpoint")]
if len(chkpt_files) == 0:
logging.warning(f'did not find checkpoints of pre-trained '
f'model, will train from scratch...')
args.pretrained_path = None
else:
epoch = max([int(f.split(".")[0].split("-")[-1])
for f in chkpt_files])
chkpt = os.path.join(pretrained_chkpt_path,
"checkpoint-" + str(epoch) + ".pth.tar")
if not os.path.exists(model_chkpt_path):
os.makedirs(model_chkpt_path)
copyfile(chkpt, os.path.join(model_chkpt_path,
f'checkpoint-{epoch}.pth.tar'))
# store arguments for training in model directory
to_json(jsonpath, argparse_dict)
train_args = args
# set seed
spk.utils.set_random_seed(args.seed)
else:
# load arguments used for training from model directory
train_args = read_from_json(jsonpath)
# load data for training/evaluation
if args.mode in ['train', 'eval']:
# find correct data class
assert train_args.dataset_name in dataset_name_to_class_mapping, \
f'Could not find data class for dataset {train_args.dataset}. Please ' \
f'specify a correct dataset name!'
dataclass = dataset_name_to_class_mapping[train_args.dataset_name]
# load the dataset
logging.info(f'{train_args.dataset_name} will be loaded...')
subset = None
if train_args.subset_path is not None:
logging.info(f'Using subset from {train_args.subset_path}')
subset = np.load(train_args.subset_path)
subset = [int(i) for i in subset]
if issubclass(dataclass, DownloadableAtomsData):
data = dataclass(args.datapath, subset=subset,
precompute_distances=args.precompute_distances,
download=True if args.mode == 'train' else False)
else:
data = dataclass(args.datapath, subset=subset,
precompute_distances=args.precompute_distances)
# splits the dataset in test, val, train sets
split_path = os.path.join(args.modelpath, 'split.npz')
if args.mode == 'train':
if args.split_path is not None:
copyfile(args.split_path, split_path)
logging.info('create splits...')
data_train, data_val, data_test = data.create_splits(*train_args.split,
split_file=split_path)
logging.info('load data...')
types = sorted(dataclass.available_atom_types)
max_type = types[-1]
# set up collate function according to args
collate = lambda x: \
collate_atoms(x,
all_types=types + [max_type+1],
start_token=max_type+2,
draw_samples=args.draw_random_samples,
label_width_scaling=train_args.label_width_factor,
max_dist=train_args.max_distance,
n_bins=train_args.num_distance_bins)
train_loader = spk.data.AtomsLoader(data_train, batch_size=args.batch_size,
sampler=RandomSampler(data_train),
num_workers=4, pin_memory=True,
collate_fn=collate)
val_loader = spk.data.AtomsLoader(data_val, batch_size=args.batch_size,
num_workers=2, pin_memory=True,
collate_fn=collate)
# construct the model
if args.mode == 'train' or args.checkpoint >= 0:
model = get_model(train_args, parallelize=args.parallel)
logging.info(f'running on {device}')
# load model or checkpoint for evaluation or generation
if args.mode in ['eval', 'generate']:
if args.checkpoint < 0: # load best model
logging.info(f'restoring best model')
model = torch.load(os.path.join(args.modelpath, 'best_model')).to(device)
else:
logging.info(f'restoring checkpoint {args.checkpoint}')
chkpt = os.path.join(args.modelpath, 'checkpoints',
'checkpoint-' + str(args.checkpoint) + '.pth.tar')
state_dict = torch.load(chkpt)
model.load_state_dict(state_dict['model'], strict=True)
# execute training, evaluation, or generation
if args.mode == 'train':
logging.info("training...")
train(args, model, train_loader, val_loader, device)
logging.info("...training done!")
elif args.mode == 'eval':
logging.info("evaluating...")
test_loader = spk.data.AtomsLoader(data_test,
batch_size=args.batch_size,
num_workers=2,
pin_memory=True,
collate_fn=collate)
with torch.no_grad():
evaluate(args, model, train_loader, val_loader, test_loader, device)
logging.info("... done!")
elif args.mode == 'generate':
logging.info(f'generating {args.amount_gen} molecules...')
generated = generate(args, train_args, model, device)
gen_path = os.path.join(args.modelpath, 'generated/')
if not os.path.exists(gen_path):
os.makedirs(gen_path)
# get untaken filename and store results
file_name = os.path.join(gen_path, args.file_name)
if os.path.isfile(file_name + '.mol_dict'):
expand = 0
while True:
expand += 1
new_file_name = file_name + '_' + str(expand)
if os.path.isfile(new_file_name + '.mol_dict'):
continue
else:
file_name = new_file_name
break
with open(file_name + '.mol_dict', 'wb') as f:
pickle.dump(generated, f)
logging.info('...done!')
else:
logging.info(f'Unknown mode: {args.mode}')
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| Python |
3D | rhyan10/G-SchNetOE62 | utility_functions.py | .py | 52,621 | 1,143 | import torch
import os
import json
import numpy as np
import torch.nn.functional as F
from multiprocessing import Queue
from scipy.spatial.distance import pdist, squareform
from torch.autograd import Variable
from schnetpack import Properties
from utility_classes import ProcessQ, IndexProvider
def boolean_string(s):
'''
Allows to parse boolean strings ('true' or 'false') with argparse
Args:
s (str): boolean string ('true' or 'false')
Returns:
bool: the corresponding boolean value (True or False)
'''
if s.lower() not in {'false', 'true'}:
raise ValueError('Not a valid boolean string')
return s.lower() == 'true'
def cdists(mols, grid):
'''
Calculates the pairwise Euclidean distances between a set of molecules and a list
of positions on a grid (uses inplace operations to minimize memory demands).
Args:
mols (torch.Tensor): data set (of molecules) with shape
(batch_size x n_atoms x n_dims)
grid (torch.Tensor): array (of positions) with shape (n_positions x n_dims)
Returns:
torch.Tensor: batch of distance matrices (batch_size x n_atoms x n_positions)
'''
if len(mols.size()) == len(grid.size())+1:
grid = grid.unsqueeze(0) # add batch dimension
return F.relu(torch.sum((mols[:, :, None, :] - grid[:, None, :, :]).pow_(2), -1),
inplace=True).sqrt_()
def update_dict(d, d_upd):
'''
Updates a dictionary of numpy.ndarray with values from another dictionary of the
same kind. If a key is present in both dictionaries, the array of the second
dictionary is appended to the array of the first one and saved under that key in
the first dictionary.
Args:
d (dict of numpy.ndarray): dictionary to be updated
d_upd (dict of numpy.ndarray): dictionary with new values for updating
'''
for key in d_upd:
if key not in d:
d[key] = d_upd[key]
else:
for k in d_upd[key]:
d[key][k] = np.append(d[key][k], d_upd[key][k], 0)
def get_dict_count(d, max_length, skip=0):
'''
Counts the number of molecules in a dictionary where for each integer key i
molecules with i atoms are stored as positions and atomic numbers. Dictionaries
must be of the form
{i: {'_positions': numpy.ndarray, '_atomic_numbers': numpy.ndarray}}.
Args:
d (dict of numpy.ndarray): dictionary with atom positions and atomic numbers of
molecules (sorted by number of atoms per molecule)
max_length (int): the maximum number of atoms per molecule in the
dictionary (corresponds to the largest key in the dictionary)
skip (int, optional): a key of the dictionary which is ignored during
counting (e.g. to ignore molecules with 5 atoms, default: 0)
Returns:
int: the number of molecules in the dictionary
'''
n = np.zeros(max_length + 1, dtype=int)
for key in d:
if key == skip:
continue
n[key] = len(d[key][Properties.Z])
return n
def run_threaded(target, splitable_kwargs, kwargs, results, n_threads=16,
exclusive_kwargs={}):
'''
Allows to run a target callable object in several processes simultaneously and
join the results afterwards. This can be used to parallelize independent
computations on objects in lists (e.g. validity checks on a set of molecules).
Args:
target (callable object): the function that is executed by each process
(its return value has to be a dictionary with keys matching those of the
dictionary passed as results parameter)
splitable_kwargs (dict of iterables): keyword arguments for the target function
that shall be split and equally distributed among processes
(e.g. a list of molecules as {'molecules': [mol_1, mol_2, ..., mol_n]}).
kwargs (dict of any): keyword arguments for the target function that are passed
to each process (not split)
results (dict of empty list or scaffold): dictionary where the keys correspond
to the keys in the dictionary returned by the target function and all
values shall either be empty lists or corresponding scaffolds (i.e. lists
or arrays of the same length as the splitable input) that will be
filled with the returned results
n_threads (int): number of processes used
exclusive_kwargs (dict of any): keyword arguments for the target function which
are only passed to the first process
Returns:
dict of list: the results dictionary containing lists with the returned values
of the target function (the order of the elements in the splitable_kwargs
inputs is preserved in the output lists)
'''
# check if the number of threads is higher than the number of data points
if len(splitable_kwargs) > 0:
for key in splitable_kwargs:
# extract first key of splitable keyword arguments
first_key = key
break
n_data = len(splitable_kwargs[first_key])
if n_data <= n_threads:
# decrease number of threads
n_threads = n_data
else:
return results
# create scaffold for results
for key in results:
if len(results[key]) != n_data:
results[key] = [None for _ in range(n_data)]
# initialize list for threads and a queue for results
threads = []
queue = Queue(n_threads)
# initialize and start threads
for i in range(0, n_threads):
thread_kwargs = {}
for key in splitable_kwargs:
thread_kwargs[key] = splitable_kwargs[key][i::n_threads]
thread_kwargs.update(kwargs) # include unsplitable kwargs
if i == 0:
thread_kwargs.update(exclusive_kwargs)
threads += [ProcessQ(queue, target=target, name=str(i),
kwargs=thread_kwargs)]
threads[-1].start()
# gather returned results
n_results = 0
while n_results < n_threads:
id, res = queue.get(True)
for key in res:
id = int(id)
results[key][id::n_threads] = res[key] # fill results
n_results += 1
# join queue
queue.close()
queue.join_thread()
# join threads
for thread in threads:
thread.join()
thread.terminate()
return results
def print_equally_spaced(head, value, space=13):
'''
Prints a provided heading followed by a provided value with a dynamical spacing
depending on the length of the heading and value (can be used to print small
tables).
Args:
head (str): the heading that is printed
value (str): the value that is printed
space (int, optional): the maximum number of spaces (if heading is empty and
value is empty)
'''
space = max((space - len(f'{head}:') - len(f'{value}')), 1) * ' '
print(f'{head}:{space}{value}')
def print_accumulated_staticstics(stats, stat_heads, name='generated',
fields=('H', 'C', 'N', 'O', 'F'), set=None,
print_stats=('mean_percentage',),
additive_fields={}):
'''
Accumulates and prints statistics of a set of molecules (e.g. the average number
of carbon atoms per molecule of the set).
Args:
stats (numpy.ndarray): statistics of all molecules where columns
correspond to molecules and rows correspond to available statistics
(n_statistics x n_molecules)
stat_heads (numpy.ndarray or list of str): the names of the statistics stored
in each row in stats (e.g. 'F' for the number of fluorine atoms or 'R5'
for the number of rings of size 5)
name (str, optional): name of the set of molecules (e.g. 'generated' or
'qm9', default: 'generated')
fields (list of str, optional): the names of statistics for which the average
per molecule shall be printed (e.g. ['C', 'F'] to print the average number
of carbon atoms and the average number of fluorine atoms,
default: ['H', 'C', 'N', 'O', 'F'])
set (list of int, optional): indices of the molecules that shall be
considered when accumulating the statistics (set to None to consider all
molecules, default: None)
print_stats (list of str, optional): additional accumulations that can be
printed, choose from 'mean_percentage' (mean of percent of each entry in
fields in relation to all fields, e.g. mean percent of carbon atoms of
all carbon and fluorine atoms for fields=['C', 'F']) and 'molecules with'
(e.g. number of molecules in the set which have at least one carbon atom
for fields=['C']) (default: ['mean_percentage'])
additive_fields (dict of list, optional): dictionary of lists with names of
statistics where all statistics in a list will be summed and then
processed as the entries in the fields parameter
(e.g. {'triple': ['C3C', 'C3N']} will print the average number of triple
bonds by summing the triple bonds between two carbon atoms and those
between carbon and nitrogen atoms)
'''
stat_heads = list(stat_heads)
n_mols = len(stats[0])
if set is None:
set = np.arange(n_mols)
idcs = [stat_heads.index(key) for key in fields]
np_stats = np.zeros((len(fields)+len(additive_fields), len(set)))
names = [i for i in fields] + [key for key in additive_fields]
print(f'\nAccumulated statistics of {name}:')
print(f'\nMean absolute values...')
# transfer data from fields
for i, idx in enumerate(idcs):
np_stats[i] = np.array(stats[idx, set])
print_equally_spaced(stat_heads[idx], f'{np.mean(np_stats[i]):.2f}')
# accumulate and transfer data from additive_fields
print(stat_heads)
for i, key in enumerate(additive_fields):
_idcs = [stat_heads.index(val) for val in additive_fields[key]]
for idx in _idcs:
np_stats[len(fields)+i] += np.array(stats[idx, set])
print_equally_spaced(key, f'{np.mean(np_stats[len(fields)+i]):.2f}')
if len(np_stats) > 1:
print_equally_spaced('All', f'{np.mean(np_stats.sum(axis=0)):.2f}')
if 'mean_percentage' in print_stats:
print(f'\nMean percentage...')
for i, name in enumerate(names):
mean_p = np.mean(np_stats[i]/np.maximum(1, np_stats.sum(axis=0)))
print_equally_spaced(name, f'{mean_p:.2f}')
if 'molecules_with' in print_stats:
print(f'\nMolecules with...')
IP = IndexProvider(stats[:, set], stat_heads)
for i, idx in enumerate(idcs):
selected = IP.get_selected(fields[i])
print_equally_spaced(
stat_heads[idx],
f'{len(selected)} ({len(selected)/len(set):.2f}%)',
22)
def print_atom_bond_ring_stats(generated_data_path, model_path, train_data_path):
'''
Print average atom, bond, and ring count statistics of generated molecules
in the provided database and reference training molecules.
Args:
generated_data_path (str): path to database with generated molecules
model_path (str): path to directory containing the model used to generate the
molecules (it should contain a split.npz file which is used to identify
training, validation, and test molecules and an args.json file containing
the arguments of the training procedure)
train_data_path (str): path to database with training data molecules
'''
# load data of generated molecules
stats_path = os.path.splitext(generated_data_path)[0] + f'_statistics.npz'
if not os.path.isfile(stats_path):
print(f'Statistics of generated molecules not found (expected it at '
f'{stats_path}).\nPlease specify the correct path to the database '
f'holding the generated molecules!')
return
stats_dict = np.load(stats_path)
stats = stats_dict['stats']
stat_heads = stats_dict['stat_heads']
# load data of training molecules
training_stats_path = os.path.splitext(train_data_path)[0] + f'_statistics.npz'
if not os.path.isfile(training_stats_path):
print(f'Statistics of training data not found (expected it at '
f'{training_stats_path}).\nWill only print statistics of generated '
f'molecules...')
have_train_stats = False
else:
have_train_stats = True
train_stat_dict = np.load(training_stats_path)
# load split file to identify training, validation, and test molecules
split_file = os.path.join(model_path, f'split.npz')
S = np.load(split_file)
train_idx = S['train_idx']
# check if subset was used (and restrict indices accordingly)
train_args_path = os.path.join(model_path, f'args.json')
with open(train_args_path) as handle:
train_args = json.loads(handle.read())
if 'subset_path' in train_args:
if train_args['subset_path'] is not None:
subset = np.load(train_args['subset_path'])
train_idx = subset[train_idx]
# Atom type statistics
descr = ' concerning atom types'
print_accumulated_staticstics(stats, stat_heads,
name='generated molecules' + descr)
if have_train_stats:
print_accumulated_staticstics(train_stat_dict['stats'],
train_stat_dict['stat_heads'],
name='training molecules' + descr,
set=train_idx)
# Atom bond statistics
descr = ' concerning atom bonds'
print_accumulated_staticstics(stats, stat_heads, fields=(),
name='generated molecules' + descr,
additive_fields={
'Single': ['H1C', 'H1N', 'H1O',
'C1C', 'C1N', 'C1O', 'C1F',
'N1N', 'N1O', 'N1F',
'O1O', 'O1F'],
'Double': ['C2C', 'C2N', 'C2O',
'N2N', 'N2O'],
'Triple': ['C3C', 'C3N']})
if have_train_stats:
print_accumulated_staticstics(train_stat_dict['stats'],
train_stat_dict['stat_heads'],
fields=(),
name='training molecules' + descr,
set=train_idx,
additive_fields={
'Single': ['H1C', 'H1N', 'H1O',
'C1C', 'C1N', 'C1O', 'C1F',
'N1N', 'N1O', 'N1F',
'O1O', 'O1F'],
'Double': ['C2C', 'C2N', 'C2O',
'N2N', 'N2O'],
'Triple': ['C3C', 'C3N']})
# Ring statistics
descr = ' concerning ring structures'
fields = ['R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R>8']
print_accumulated_staticstics(stats, stat_heads, fields=fields,
name='generated molecules' + descr,
print_stats=['molecules_with'])
if have_train_stats:
print_accumulated_staticstics(train_stat_dict['stats'],
train_stat_dict['stat_heads'],
fields=fields,
name='training molecules' + descr,
set=train_idx,
print_stats=['molecules_with'])
def get_random_walk(mol_dict, stop_token=10, seed=None):
'''
Builds a random generation trace of a training molecule. Assumes that the atoms are
ordered by distance to the center of mass (close to far) and always starts with
the first atom (i.e. the one closest to center of mass). At each step, one of the
already placed atoms is randomly chosen as focus. Then the first unplaced
neighbor (closest to center of mass) is chosen as the next addition to the
unfinished structure. If all neighbors have already been placed, the stop token
is chosen as the next type and the focus is marked as finished (cannot be chosen
as focus anymore). The trace is complete after all atoms have been placed and
marked as finished.
The resulting trace will be saved in the dictionary passed as mol_dict argument.
The atom positions and atomic numbers will be ordered in the sequence of
placement in the trace and additionally there will be entries 'pred_types'
(torch.Tensor containing the next type that shall be predicted at each step
including stop tokens) and 'current' (torch.Tensor containing the index of the
currently focused atom at each step).
Args:
mol_dict (dict of torch.Tensor): dict containing the atom positions
('_positions') ordered by distance to the center of mass of the molecule,
the atomic numbers ('_atomic_numbers'), the connectivity matrix
('_con_mat'), and, optionally, the precomputed distances ('dists')
stop_token (int, optional): a dummy atom type which is used as the stop token
(default: 10)
seed (int, optional): a seed for the random selection of the focus at each
step (default: None)
'''
# set seed
if seed is not None:
old_state = torch.get_rng_state()
torch.manual_seed(seed)
# extract positions, atomic numbers, and connectivity matrix
numbers = mol_dict[Properties.Z]
n_atoms = len(numbers)
con_mat = (mol_dict['_con_mat'] > 0).float()
current = [-1] # in the first step, none of the atoms is focused
order = [0] # the new ordering always starts with the first atom (closest to com)
pred_types = [numbers[0]] # the first predicted type is that of the first atom
# start from first atom and traverse molecular graph (choosing the focus randomly)
con_mat[:, 0] = 0 # mark first atom as placed by removing its bonds
avail = torch.zeros(n_atoms).float() # list with atoms available as focus
avail[0] = 1. # first atom is available
i = 1
while torch.sum(con_mat > 0) or (torch.sum(avail) > 0):
# take random current focus
cur_i = torch.multinomial(avail, 1)[0]
current += [cur_i]
cur = order[cur_i]
# predict finished if no neighbors are left
if torch.sum(con_mat[cur]) == 0:
pred_types += [stop_token] # predict stop token
avail[cur_i] = 0
continue
# else choose neighbor which is closest to center of mass (first nonzero entry)
next = torch.nonzero(con_mat[cur] > 0)[0][0]
pred_types += [numbers[next]]
order += [next]
con_mat[:, next] = 0 # mark next atom as placed by removing its bonds
avail[i] = 1 # mark placed atom as available for being the focus
i += 1
# cast to torch.Tensor
order = torch.tensor(order)
pred_types = torch.tensor(pred_types)
current = torch.tensor(current)
# update dict of molecule with re-ordered positions, numbers, focus, and next types
mol_dict.update({'pred_types': pred_types,
'current': current,
Properties.R: mol_dict[Properties.R][order],
Properties.Z: mol_dict[Properties.Z][order]})
# re-order or calculate distances (depending on whether they were precomputed)
if 'dists' in mol_dict:
# re-order
mol_dict['dists'] = \
squareform(squareform(mol_dict['dists'][:, 0])[order][:, order])[:, None]
else:
# compute
mol_dict['dists'] = pdist(mol_dict[Properties.R][order])[:, None]
# reset seed
if seed is not None:
torch.set_rng_state(old_state)
def get_labels(n_bins, max_size, target, width_scaling):
'''
Get labels for distance predictions from ground truth distances. The labels are
either obtained by 1d Gaussian smearing or by one-hot encoding.
The bins c_i are found by taking n_bins equally spaced points in 0 <= c_i <=
max_size (e.g. for n_bins=3 and max_size=1 the bins are [0, 0.5, 1]).
Let w be the distance between two neighboring bins (e.g. 0.5 in the example
before). For one-hot encodings, ground truth distances d are sorted into a bin c_i
if (c_i - 0.5*w) <= d < (c_i + 0.5*w). For Gaussian smearing,
e^-((d-c_i)**2 / w*width_scaling) is calculated for each bin c_i.
Distance values larger than max_size are always one-hot encoded into the last bin
of the distribution.
Args:
n_bins (int): number of bins used to discretize (1d) space of distances
max_size (int): maximum distance covered (i.e. the disrcetized distribution
will cover the space 0 <= dist <= max_size)
target (torch.Tensor): distance matrix (n_atoms x n_atoms) or batch of
distance matrices (batch_size x n_atoms x n_atoms) holding the ground truth
width_scaling (float): factor scaling the width in the Gaussian smearing
(the width is the distance between two neighboring bins if
width_scaling=1., set to 0 to use one-hot encoding instead)
Returns:
torch.Tensor: labels for distance predictions (discretized distributions over
distances of the shape ((batch_size x )n_atoms x n_atoms x n_bins))
'''
if width_scaling > 0:
# use 1d Gaussian smearing
centers = torch.linspace(0, max_size, n_bins)
centers = centers.view(*[1 for _ in target.size()], -1)
width = max_size / (n_bins - 1) * width_scaling
labels = torch.exp(-(1 / width) * (target.unsqueeze(-1)-centers) ** 2)
max_dist_label = torch.zeros(n_bins)
max_dist_label[-1] = 1.
labels = torch.where(target.unsqueeze(-1) <= max_size, labels, max_dist_label)
labels = labels / torch.sum(labels, -1, keepdim=True)
else:
# use one hot encoding
width = max_size / (n_bins - 1)
bins = (((target + (width / 2.)) / max_size) * (n_bins - 1)).long()
bins = torch.clamp(bins, 0, n_bins - 1)
label_idcs = torch.arange(n_bins).reshape(1, 1, -1)
zero_labels = torch.zeros(*target.size(), n_bins)
labels = torch.where(label_idcs == bins.unsqueeze(-1),
torch.ones_like(zero_labels), zero_labels)
return labels
def get_padded_batch(mol_dicts):
'''
Builds a batch of input data and applies padding where necessary.
Args:
mol_dicts (list of dict of torch.Tensor): the input data for each molecule
(positions, atomic numbers, labels etc.) in a list
Returns:
dict of torch.Tensor: the input data as batches in a dictionary
'''
properties = mol_dicts[0]
# initialize maximum sizes
max_size = {
prop: np.array(val.size(), dtype=np.int)
for prop, val in properties.items()
}
# get maximum sizes
for properties in mol_dicts[1:]:
for prop, val in properties.items():
max_size[prop] = np.maximum(max_size[prop], np.array(val.size(),
dtype=np.int))
# initialize batch
batch = {
p: torch.zeros(len(mol_dicts),
*[int(ss) for ss in size]).type(mol_dicts[0][p].type())
for p, size in max_size.items()
}
has_atom_mask = Properties.atom_mask in batch
has_neighbor_mask = Properties.neighbor_mask in batch
if not has_neighbor_mask:
batch[Properties.neighbor_mask] =\
torch.zeros_like(batch[Properties.neighbors]).float()
if not has_atom_mask:
batch[Properties.atom_mask] =\
torch.zeros_like(batch[Properties.Z]).float()
# build batch and pad
for k, properties in enumerate(mol_dicts):
for prop, val in properties.items():
shape = val.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[prop][s] = val
# add mask
if not has_neighbor_mask:
nbh = properties[Properties.neighbors]
shape = nbh.size()
s = (k,) + tuple([slice(0, d) for d in shape])
mask = nbh >= 0
batch[Properties.neighbor_mask][s] = mask
batch[Properties.neighbors][s] = nbh * mask.long()
if not has_atom_mask:
z = properties[Properties.Z]
shape = z.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[Properties.atom_mask][s] = z > 0
# wrap everything in variables
batch = {k: Variable(v) for k, v in batch.items()}
return batch
def collate_atoms(mol_dicts,
all_types=[1, 6, 7, 8, 9, 10],
start_token=11,
n_bins=300,
max_dist=15.,
label_width_scaling=0.1,
draw_samples=0,
seed=None):
'''
Split each molecule into a random generation trace and then build batch of input
data and apply padding.
Args:
mol_dicts (list of dict): list of dicts containing properties of a training
molecule (positions, atomic numbers, connectivity matrix etc.)
all_types (list of int, optional): list of all atom types in the data set in
ascending order (including a dummy index as stop token that should be
larger than all other types and therefore the last entry, default:
[1, 6, 7, 8, 9, 10], which are all atomic charges in QM9 and 10 as stop
token)
start_token (int, optional): a dummy atom type (not included in all_types)
which is used as the start token (default: 11)
n_bins (int, optional): number of bins used to discretize (1d) space of
distances (default: 300)
max_dist (int, optional): maximum distance covered in the learned
distributions (i.e. the disrcetized distribution will cover the
space 0<=dist<= max_size, default: 15.)
label_width_scaling (float, optional): factor scaling the width in the Gaussian
smearing of distance labels (the width is the distance between two
neighboring bins if width_scaling=1., set to 0 to use one-hot encoding
instead, default: 0.1)
draw_samples (int, optional): the number of steps in the generation trace
that are randomly drawn for training (set to 0 to use the complete trace,
default: 0)
seed (int, optional): seed for the random sampling of a generation trace (set
None to use no seed, default: None)
Returns:
dict[str->torch.Tensor]: mini-batch of atomistic systems
'''
# store all possible types in a tensor, build one-hot encoded label vectors
all_types_tensor = torch.tensor(all_types)
type_labels = torch.eye(len(all_types)) # one-hot encoding of types
# build array that converts type to correct row index in the type labels
type_idc_converter = torch.zeros(torch.max(all_types_tensor)+1).long()
type_idc_converter[all_types_tensor.long()] = torch.arange(len(all_types))
# extract stop token
stop_token = all_types[-1]
# we use the same token as dummy type for the currently focused atom
focus_token = stop_token
# initialize lists
mols_gen_steps = [] # stores partial molecules at single steps of generation traces
next_list = [] # stores next atom type at each step
current_list = [] # stores currently focused atom at each step
n_tokens = 2 # we use a start (origin) token and a focus token
# divide every molecule into a generation trace (starting from the atom closest to
# the center of mass) to cover all steps of the generation process and save partial
# molecules along with labels of distance distributions and next type
for mol in mol_dicts:
# update molecule dict with random generation trace
get_random_walk(mol, stop_token=stop_token, seed=seed)
# extract information about molecule (and trace)
pos = mol[Properties.R]
numbers = mol[Properties.Z]
neighbors = mol[Properties.neighbors]
pred_types = mol['pred_types'].long()
focus = mol['current'].long() + n_tokens
# add start and focus tokens to data (positions, distances, atomic numbers)
# use origin as position for both tokens
pos = torch.cat((torch.zeros(n_tokens, 3), pos), 0)
n_atoms = len(pos)
# store (pre-computed) pairwise distances between atoms in a distance matrix
dists = torch.zeros(n_atoms, n_atoms)
dists[n_tokens:, n_tokens:] = torch.tensor(squareform(mol['dists'][:, 0]))
# compute distances of atoms to origin (tokens) and save them in distance matrix
center_dists = torch.sqrt(F.relu(torch.sum(pos ** 2, dim=1))) # dists to com
dists[:n_tokens, :] = center_dists.view(1, -1)
dists[:, :n_tokens] = center_dists.view(-1, 1)
# add start and focus token to atomic numbers
numbers = torch.cat((torch.tensor([focus_token, start_token]), numbers), 0)
# adjust neighbor lists due to tokens (which are basically additional atoms)
for i in range(n_tokens, 0, -1):
neighbors = \
torch.cat((neighbors, torch.ones(n_atoms-i, 1).long()*n_atoms-i), 1)
neighbors = \
torch.cat((neighbors, torch.arange(n_atoms-i).view(1, -1)), 0)
cell_offset = torch.zeros(n_atoms, n_atoms-1, 3)
# update dictionary with altered data
mol.update({Properties.R: pos,
Properties.Z: numbers,
Properties.neighbors: neighbors,
Properties.cell_offset: cell_offset})
# get distance labels
mol['_labels'] = get_labels(n_bins, max_dist, dists, label_width_scaling)
# remove unnecessary entries
mol.pop('dists')
mol.pop('pred_types')
mol.pop('current')
mol.pop('_con_mat')
# DIVIDE INTO PARTIAL MOLECULAR STRUCTURES #
# i marks the current step in the trace excluding prediction of stop tokens
# j marks the number of times the stop token has been predicted at the current i
if draw_samples <= 0: # place atom by atom for the whole molecule
j = 0
index_list = range(n_tokens, n_atoms + 1)
next_list += list(pred_types)
current_list += list(focus)
else: # randomly draw steps of the generation trace for training
overall_steps = len(pred_types)
# sample a few random steps
np.random.seed(seed) # set seed (will have no effect if None)
random_steps = np.random.choice(overall_steps,
min(draw_samples, overall_steps),
replace=False)
np.random.seed(None) # reset seed
index_list = []
j_list = []
for random_step in random_steps:
random_step = int(random_step)
i = int(torch.sum(pred_types[:random_step] != stop_token))
j = random_step - i
index_list += [i+n_tokens]
j_list += [j]
next_list += [pred_types[random_step]]
current_list += [focus[random_step]]
if len(j_list) > 0:
j = j_list.pop(0)
# iterate over steps in trace
for i in index_list:
_i = i-n_tokens # atom index if ignoring tokens
while True:
partial_mol = mol.copy()
# get the type of the next atom
next_type = pred_types[_i+j]
# don't consider distance predictions at stop token prediction steps
if next_type == stop_token:
dist_mask = torch.zeros(i).float()
else:
dist_mask = torch.ones(i).float()
# always consider the type prediction
type_mask = torch.ones(i).float()
# assemble neighborhood mask and neighbor indices
neighbor_mask = torch.ones(i, i-1)
neighbors = partial_mol[Properties.neighbors][:i, :i-1]
# set position and labels of the current (focus) token (first atom)
cur = focus[_i + j]
pos = partial_mol[Properties.R][:i]
label_idx = i if i < n_atoms else 0
labels = partial_mol['_labels'][label_idx, :i]
pos = torch.cat((pos[cur:cur+1], pos[1:]), 0)
labels = torch.cat((labels[cur:cur+1], labels[1:]), 0)
partial_mol.update(
{Properties.R: pos,
Properties.Z: partial_mol[Properties.Z][:i],
'_labels': labels,
'_dist_mask': dist_mask,
'_type_mask': type_mask,
Properties.neighbor_mask: neighbor_mask,
Properties.neighbors: neighbors,
Properties.cell_offset: partial_mol[Properties.cell_offset][:i, :i-1]
})
# store current step in list of trace steps of mini-batch molecules
mols_gen_steps += [partial_mol]
if draw_samples > 0:
if len(j_list) > 0:
j = j_list.pop(0)
break
if pred_types[_i+j] == stop_token:
j += 1 # increase stop token counter
if j == n_atoms-n_tokens:
# stop predicted for every atom -> trace finished
break
else:
# continue with next step in trace (without changing i)
continue
else:
# continue with next step in trace (by getting next i)
break
# build nn-input mini-batch from gathered generation steps
batch = get_padded_batch(mols_gen_steps)
# update with remaining indicators (the type of the next atom, all available atom
# types and the one-hot encoded labels for the type predictions)
next_list = torch.tensor(next_list)
batch.update({'_next_types': Variable(next_list),
'_all_types': Variable(all_types_tensor.view(1, -1)),
'_type_labels': Variable(
type_labels[torch.gather(type_idc_converter, 0, next_list)])
})
return batch
def get_grid(radial_limits, n_bins, max_dist):
'''
Get a grid with candidate atom positions. A lower and upper radial cutoff is used
to remove positions too close to or too far from the origin of the grid (which
should manually be centered on the current focus token at each generation step).
The grid extends in steps of 0.05 Angstrom into x, y, and z directions.
For the very first step (when predicting the first atom position), a grid with
positions that extend only into one dimension is returned. There is no need to
extend the grid into all dimensions in the first step as the predicted distribution
of positions will be radial by design (since the focus and origin token are
located at the same position). Furthermore, the minimum and maximum distances
between atoms that can be taken from the training data as radial limits will
generally not apply to the first prediction, where the distance between the center
of mass and the closest atom is determined. Therefore, the special start grid does
not make use of the provided radial limits.
Args:
radial_limits (list of float): list with lower distance limit as first entry
and upper distance limit as second entry
n_bins (int): number of bins used for distance predictions (will be used to
assemble the special grid for the first generation step)
max_dist float): maximum distance covered in the predicted distance
distributions (will be used to assemble the special grid for the first
generation step)
Returns:
grid (numpy.ndarray): 2d array of grid positions (n_grid_positions x 3) for all
generation steps except the first
start_grid (numpy.ndarray): 2d array of grid positions (n_grid_positions x 3)
for the first generation step
'''
n_dims = 3 # make grid in 3d space
grid_max = radial_limits[1]
grid_steps = int(grid_max * 2 * 20) + 1 # gives steps of length 0.05
coords = np.linspace(-grid_max, grid_max, grid_steps)
grid = np.meshgrid(*[coords for _ in range(n_dims)])
grid = np.stack(grid, axis=-1) # stack to array (instead of list)
# reshape into 2d array of positions
shape_a0 = np.prod(grid.shape[:n_dims])
grid = np.reshape(grid, (shape_a0, -1))
# cut off cells that are out of the spherical limits
grid_dists = np.sqrt(np.sum(grid**2, axis=-1))
grid_mask = np.logical_and(grid_dists >= radial_limits[0],
grid_dists <= radial_limits[1])
grid = grid[grid_mask]
# assemble special grid extending only in one direction (x-axis) for the first step
# (we don't need to populate a 3d grid due to rotational invariance at first step)
start_grid = np.zeros((n_bins, n_dims))
start_grid[:, 0] = np.linspace(0, max_dist, n_bins) # only extend along x-axis
return grid, start_grid
def get_default_neighbors(n_atoms):
'''
Get a neighborhood indices matrix where every atom is the neighbor of every other
atom (but not of itself, e.g. [[1, 2], [0, 2], [0, 1]] for three atoms).
Args:
n_atoms (int): number of atoms
Returns:
list of list of int: the indices of the neighbors of each atom
'''
return [list(range(0, i)) + list(range(i + 1, n_atoms)) for i in range(0, n_atoms)]
def generate_molecules(amount,
model,
t=0.1,
max_length=35,
save_unfinished=False,
all_types=[1, 6, 7, 8, 9, 10],
start_token=11,
n_bins=300,
max_dist=15.,
radial_limits=[0.9, 1.7],
device='cuda'
):
'''
Generate molecules using a trained G-SchNet model. The atomic numbers of all
chemical elements in the training data and the numbers assigned to focus and
start token need to be specified. A spherical grid that is re-centered on the
focused atom at every generation step is used. Its minimum and maximum distance
can be provided and should be close to the minimum and maximum distances of
neighbored atoms observed in the training data.
Args:
amount (int): the amount of molecules that shall be generated
model (schnetpack.atomistic.AtomisticModel): a trained G-SchNet model
t (float, optional): the sampling temperature which controls randomness during
sampling of atom positions (higher values introduce more randomness,
default: 0.1)
max_length (int, optional): the maximum number of atoms per molecule (if not
all atoms have been marked as finished when this number is reached then
generation is stopped and unfinished molecules are either disregarded or
stored in the category of unfinished examples depending on the
save_unfinished argument, default: 35)
save_unfinished (bool, optional): whether molecules that are still unfinished
after sampling 'max_length' atoms shall be stored in the returned
dictionary of generated moleclues (the key for unfinished molecules is -1,
default: False)
all_types (list of int optional): list of all atom types in the training data
set in ascending order (including a dummy index as stop token that should
be larger than all other types and therefore the last entry, default:
[1, 6, 7, 8, 9, 10], which are all atomic charges in QM9 and 10 as stop
token)
start_token (int, optional): a dummy atom type (not included in all_types)
which is used as the start token (default: 11)
n_bins (int, optional): number of bins used to discretize (1d) space of
distances (default: 300)
max_dist (int, optional): maximum distance covered in the learned
distributions (default: 15.)
radial_limits (list of float, optional): list with lower distance limit for
the radial atom position grid as first entry and upper distance limit as
second entry (default: [0.9, 1.7])
device (str, optional): choose whether to run the model on cpu ('cpu') or
gpu('cuda', default: 'cuda')
Returns:
dict[int->str->numpy.ndarray]: positions and atomic numbers of generated
molecules where the first key is the number of atoms (i.e. all generated
molecules with 9 atoms can be found using the key 9) and the second key
is either '_positions' to get a (n_molecules x n_atoms x 3) array of atom
positions or '_atomic_numbers' to get the corresponding
(n_molecules x n_atoms) array of atomic numbers
'''
failed_counter = 0
n_dims = 3
n_tokens = 2 # token for current atom and for center of mass (start)
start_idx = 1 # index of start_token
model = model.to(device) # put model on chosen device (gpu/cpu)
# increase max_length by three to compensate for tokens and last prediction step
max_length += n_tokens+1
all_types = torch.tensor(all_types).long().to(device)
stop_token = all_types[-1]
focus_token = stop_token
# initialize tensor that stores the indices of currently focused atoms
current_atoms = torch.ones(amount).long().to(device) # store current atom
# initialize tensor for atomic numbers
atom_numbers = torch.zeros(amount, max_length).long().to(device)
# set first atom as current (focus) token and second as center of mass (start)
atom_numbers[:, 0] = focus_token
atom_numbers[:, 1] = start_token
# initialize tensor for atom positions
positions = torch.zeros(amount, max_length, n_dims).to(device)
# initialize mask for molecules which are not yet finished (all in the beginning)
unfinished = torch.ones(amount, dtype=torch.bool).to(device)
# initialize mask to mark single atoms as finished/unfinished
atoms_unfinished = torch.ones(amount, max_length).float().to(device)
# molecule generation stops if all regular atoms of a molecule are marked finished
atoms_unfinished[:, [0]] = 0 # mark focus token as finished
# create grids (a small, linear one for the very first step and a radial one
# for all following generation steps)
general_grid, start_grid = get_grid(radial_limits, n_bins=n_bins, max_dist=max_dist)
general_grid = torch.tensor(general_grid).float().to(device) # radial grid
start_grid = torch.tensor(start_grid).float().to(device) # small start grid
# create default neighborhood list
neighbors = torch.tensor(get_default_neighbors(max_length-1)).long().to(device)
# create dictionary in which generated molecules will be stored (where the key
# will be the number of atoms in the respective generated molecule)
results = {}
# create short name for function that pulls results from gpu and removes
# the start and current tokens (first two entries)
s = lambda x: x[:, n_tokens:].detach().cpu().numpy()
# define function that builds a model input batch with current state of molecules
def build_batch(i):
amount = torch.sum(unfinished) # only get predictions for unfinished molecules
# build neighborhood and neighborhood mask
neighbors_i = neighbors[:i, :i-1].expand(amount, -1, -1).contiguous()
neighbor_mask = torch.ones_like(neighbors_i).float()
# set position of focus token (first entry of positions)
positions[unfinished, 0] = positions[unfinished, current_atoms[unfinished]]
# center positions on currently focused atom (for localized grid)
positions[unfinished, :i] -= \
positions[unfinished, current_atoms[unfinished]][:, None, :]
# build batch with data of the partial molecules
batch = {
Properties.R: positions[unfinished, :i],
Properties.Z: atom_numbers[unfinished, :i],
Properties.atom_mask: torch.zeros(amount, i, dtype=torch.float),
Properties.neighbors: neighbors_i,
Properties.neighbor_mask: neighbor_mask,
Properties.cell_offset: torch.zeros(amount, i, max(i-1, 1), n_dims),
Properties.cell: torch.zeros(amount, n_dims, n_dims),
'_next_types': atom_numbers[unfinished, i],
'_all_types': all_types.view(1, -1),
'_type_mask': torch.ones(amount, i, dtype=torch.float),
}
# put batch into torch variables and on gpu
batch = {
k: v.to(device)
for k, v in batch.items()
}
return batch
for i in range(n_tokens, max_length):
amount = torch.sum(unfinished)
# stop if the generation process is finished for all molecules
if amount == 0:
break
# store the global state of molecules (whether they are finished)
global_unfinished = unfinished.clone()
### 1st Part ###
# predict and sample next atom type until all unfinished molecules either have
# a proper next type (not stop token) or are completely finished
while torch.sum(unfinished) > 0:
# set the marker for the current (focus) atom
current_atoms[unfinished] = \
torch.multinomial(atoms_unfinished[unfinished, :i], 1).squeeze()
# get batch with updated data (changes in each iteration as unfinished and
# current_atoms are changed)
batch = build_batch(i)
# predict distribution over next atom types with model
type_pred = F.softmax(model(batch)['type_predictions'], dim=-1)
# sample types from predictions
next_types = torch.multinomial(type_pred, 1)
# store sampled type in tensor with atomic numbers
atom_numbers[unfinished, i] = all_types[next_types].view(-1)
# get molecules that predicted no proper type but the stop token
pred_stop = torch.eq(atom_numbers[unfinished, i], stop_token)
# set current atom of these molecules to finished
stop_mask = torch.zeros(len(unfinished), dtype=torch.bool).to(device)
stop_mask[unfinished] = pred_stop
atoms_unfinished[stop_mask, current_atoms[stop_mask]] = 0
# get molecules that were finished in this iteration (those which were
# unfinished before and now have all atoms marked as finished)
finished = global_unfinished & \
(torch.sum(atoms_unfinished[:, :i], dim=1) == 0)
# store molecules which are not yet completely finished but have
# predicted the stop type in the local unfished list in order to repeat
# the prediction procedure for these molecule (until they predict a
# proper type for which we can sample a new position)
unfinished[unfinished] = pred_stop & ~finished[unfinished]
# store molecules which have been finished in this generation step (i.e. all
# of their atoms are marked as finished)
idx = i-n_tokens # number of atoms in the finished molecules
if idx > 0 and torch.sum(finished) > 0:
# center generated molecules on origin token
positions[finished, :i] -= positions[finished, start_idx][:, None, :]
# store positions and atomic numbers in dictionary
results[idx] = {Properties.R: s(positions[finished, :i]),
Properties.Z: s(atom_numbers[finished, :i])}
# mark finished moleclues in global unfinished mask
global_unfinished[global_unfinished] = ~finished[global_unfinished]
# reset local unfinished mask to global state
unfinished[global_unfinished] = 1
# stop if max_length of molecules is reached or all are finished
amount = torch.sum(unfinished)
if i >= max_length-1 or amount == 0:
break
### 2nd Part ###
# sample new position given the type of the next atom
# get batch with updated data
batch = build_batch(i)
# run model to get predictions
logits = model(batch)
# get normalized log probabilities
log_p = F.log_softmax(logits['distance_predictions'], -1)
del logits
if i == n_tokens:
grid = start_grid # use grid with positions on a line to sample first atom
else:
grid = general_grid # use radial 3d grid for all steps after the first
# set up storage for log pdf over grid positions
log_pdf = torch.zeros_like(grid[:, 0].expand(amount, -1))
step = max_dist / (n_bins-1) # step size between two distance bins
# iterate over atoms in order to reduce memory demands
for atom in range(i):
# calculate distances between grid points and respective atom
dists = cdists(batch[Properties.R][:, atom:atom+1, :], grid)
# calculate indices of the corresponding distance bins
dists += step / 2.
dists *= (n_bins-1) / max_dist
dists.clamp_(0., n_bins-1)
dist_labels = dists.long().squeeze(1)
del dists
# look up probabilities of distance bins in output
log_p_grid = torch.gather(log_p[:, atom], -1, dist_labels)
# multiply predictions for individual atoms to get probability
log_pdf += log_p_grid
del log_p_grid
del log_p
# normalize distribution over grid
log_pdf -= torch.logsumexp(log_pdf, -1, keepdim=True)
# use temperature term on logits and normalize over grid again
if i > n_tokens: # not for the very first atom with special grid
log_pdf /= t
log_pdf -= torch.logsumexp(log_pdf, -1, keepdim=True)
log_pdf.exp_() # take exponential
# remove numerically failed attempts (NaN in pdf) by marking them as finished
# (they are not stored among the properly generated molecules, only disregarded)
if torch.isnan(log_pdf).any():
failed_mask = torch.isnan(log_pdf).any(dim=-1)
unfinished[unfinished] = ~failed_mask
log_pdf = log_pdf[~failed_mask]
failed_counter += torch.sum(failed_mask)
# sample positions of new atoms using the calculated pdfs over grid positions
new_atom_idcs = torch.multinomial(log_pdf, 1).view(-1)
del log_pdf
# store new positions
positions[unfinished, i, :] = grid[new_atom_idcs]
# set start token to finished at the end of the first iteration
if i == n_tokens:
atoms_unfinished[:, [start_idx]] = 0
# store unfinished molecules of max_length
if save_unfinished:
if torch.sum(unfinished) > 0:
batch = build_batch(i)
results[-1] = {Properties.R: s(batch[Properties.R]),
Properties.Z: s(batch[Properties.Z])}
if failed_counter > 0:
print(f'Failed attempts: {failed_counter}')
return results
| Python |
3D | rhyan10/G-SchNetOE62 | template_data.py | .py | 7,760 | 163 | import logging
from pathlib import Path
import numpy as np
import torch
from ase.db import connect
from schnetpack import Properties
from schnetpack.datasets import AtomsData
from utility_classes import ConnectivityCompressor
from template_preprocess_dataset import preprocess_dataset
class TemplateData(AtomsData):
""" Simple template dataset class. We assume molecules made of C, N, O, F,
and H atoms as illustration here.
The class basically serves as interface to a database. It initiates
pre-processing of the data in order to prepare it for usage with G-SchNet.
To this end, it calls the template_preprocess_dataset script which provides
very basic pre-processing (e.g. calculation of connectivity matrices) and can
also be adapted to the data at hand.
Single (pre-processed) data points are read from the database in the
get_properties method (which is called in __getitem__). The class builds upon
the AtomsData class from SchNetPack.
Args:
path (str): path to directory containing database
subset (list, optional): indices of subset, set to None for entire dataset
(default: None).
precompute_distances (bool, optional): if True and the pre-processed
database does not yet exist, the pairwise distances of atoms in the
dataset's molecules will be computed during pre-processing and stored in
the database (increases storage demand of the dataset but decreases
computational cost during training as otherwise the distances will be
computed once in every epoch, default: True)
remove_invalid (bool, optional): if True, molecules that do not pass the
implemented validity checks will be removed from the training data (
in the simple template_preprocess_dataset script this is only a check
for disconnectedness, i.e. if all atoms are connected by some path as
otherwise no proper generation trace can be sampled,
note: only works if the pre-processed database does not yet exist,
default: True)
"""
##### Adjust the following settings to fit your data: #####
# name of the database
db_name = 'train.db'
# name of the database after pre-processing (if the same as db_name, the original
# database will be renamed to <db_name>.bak.db)
preprocessed_db_name = 'train_gschnet.db'
# all atom types found in molecules of the dataset
available_atom_types = [1, 3, 5, 6, 7, 8, 9, 14, 15, 16, 17, 33, 34, 35, 52, 53] # for example H, C, N, O, and F
# valence constraints of the atom types (does not need to be provided unless a
# valence check is implemented, but this is not the case in the template script)
atom_types_valence = [1, 1, 3, 4, 3, 2, 1, 4, 5, 6, 1, 5, 6, 1, 6, 1]
# minimum and maximum distance between neighboring atoms in angstrom (this is
# used to determine which atoms are considered as connected in the connectivity
# matrix, i.e. for sampling generation traces during training, and also to restrict
# the grid around the focused atom during generation, as the next atom will always
# be a neighbor of the focused atom)
radial_limits = [0.5, 2]
# used to decompress connectivity matrices
connectivity_compressor = ConnectivityCompressor()
def __init__(self, path, subset=None, precompute_distances=True,
remove_invalid=True):
self.path_to_dir = Path(path)
self.db_path = self.path_to_dir / self.preprocessed_db_name
self.source_db_path = self.path_to_dir / self.db_name
self.precompute_distances = precompute_distances
self.remove_invalid = remove_invalid
# do pre-processing (if database is not already pre-processed)
found_connectivity = False
if self.db_path.is_file():
with connect(self.db_path) as conn:
n_mols = conn.count()
if n_mols > 0:
first_row = conn.get(1)
found_connectivity = 'con_mat' in first_row.data
if not found_connectivity:
self._preprocess_data()
super().__init__(str(self.db_path), subset=subset)
def create_subset(self, idx):
"""
Returns a new dataset that only consists of provided indices.
Args:
idx (numpy.ndarray): subset indices
Returns:
schnetpack.data.AtomsData: dataset with subset of original data
"""
idx = np.array(idx)
subidx = idx if self.subset is None or len(idx) == 0 \
else np.array(self.subset)[idx]
return type(self)(self.path_to_dir, subidx)
def get_properties(self, idx):
_idx = self._subset_index(idx)
with connect(self.db_path) as conn:
row = conn.get(_idx + 1)
at = row.toatoms()
# extract/calculate structure (atom positions, types and cell)
properties = {}
properties[Properties.Z] = torch.LongTensor(at.numbers.astype(np.int))
positions = at.positions.astype(np.float32)
positions -= at.get_center_of_mass() # center positions
properties[Properties.R] = torch.FloatTensor(positions)
properties[Properties.cell] = torch.FloatTensor(at.cell.astype(np.float32))
# recover connectivity matrix from compressed format
con_mat = self.connectivity_compressor.decompress(row.data['con_mat'])
# save in dictionary
properties['_con_mat'] = torch.FloatTensor(con_mat.astype(np.float32))
# extract pre-computed distances (if they exist)
if 'dists' in row.data:
properties['dists'] = row.data['dists'][:, None]
# get atom environment
nbh_idx, offsets = self.environment_provider.get_environment(at)
# store neighbors, cell, and index
properties[Properties.neighbors] = torch.LongTensor(nbh_idx.astype(np.int))
properties[Properties.cell_offset] = torch.FloatTensor(
offsets.astype(np.float32))
properties["_idx"] = torch.LongTensor(np.array([idx], dtype=np.int))
return at, properties
def _preprocess_data(self):
# check if pre-processing source db has different name than target db (if
# not, rename it)
source_db = self.path_to_dir / self.db_name
if self.db_name == self.preprocessed_db_name:
new_name = self.path_to_dir / (self.db_name + '.bak.db')
source_db.rename(new_name)
source_db = new_name
# look for pre-computed list of invalid molecules
invalid_list_path = self.source_db_path.parent / \
(self.source_db_path.stem + f'_invalid.txt')
if invalid_list_path.is_file():
invalid_list = np.loadtxt(invalid_list_path)
else:
invalid_list = None
# initialize pre-processing (calculation and validation of connectivity
# matrices as well as computation of pairwise distances between atoms)
valence_list = \
np.array([self.available_atom_types, self.atom_types_valence]).flatten('F')
preprocess_dataset(datapath=source_db,
cutoff=self.radial_limits[-1],
valence_list=list(valence_list),
logging_print=True,
new_db_path=self.db_path,
precompute_distances=self.precompute_distances,
remove_invalid=self.remove_invalid,
invalid_list=invalid_list)
return True
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-03/model_prediction_no_reg_val_dset.py | .py | 2,734 | 88 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
hist_path = os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro/vox_fluoro_img_no_l1_l2_loss')
hist_file_name = 'vox_fluoro_hist_objects_1.pkl'
load_model_name = 'vox_fluoro_img_no_l1_l2_loss_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-03'))
save_file_name = 'model_prediction_no_reg_val_dset_dist.pkl'
predict_numb = 100
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
os.makedirs(save_dir, exist_ok=True)
random_test_values = np.random.choice(hist_data['val_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_test_mat = image_init[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name), custom_objects={'cust_mean_squared_error_std': cust_mean_squared_error_std})
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), np.expand_dims(image_test_mat[:, 0, :, :], axis=-1), np.expand_dims(image_test_mat[:, 1, :, :], axis=-1), cali_test_mat], batch_size=10, verbose=1)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['predictions'] = predict_1
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-03/model_prediction_no_reg.py | .py | 2,729 | 88 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
hist_path = os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro/vox_fluoro_img_no_l1_l2_loss')
hist_file_name = 'vox_fluoro_hist_objects_1.pkl'
load_model_name = 'vox_fluoro_img_no_l1_l2_loss_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-03'))
save_file_name = 'vox_fluoro_predict_no_loss_dist.pkl'
predict_numb = 100
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
os.makedirs(save_dir, exist_ok=True)
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_test_mat = image_init[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name), custom_objects={'cust_mean_squared_error_std': cust_mean_squared_error_std})
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), np.expand_dims(image_test_mat[:, 0, :, :], axis=-1), np.expand_dims(image_test_mat[:, 1, :, :], axis=-1), cali_test_mat], batch_size=10, verbose=1)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['predictions'] = predict_1
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-03/model_prediction.py | .py | 2,707 | 88 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
hist_path = os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro/vox_fluoro_img_stnd_loss')
hist_file_name = 'vox_fluoro_hist_objects_2.pkl'
load_model_name = 'vox_fluoro_img_stnd_loss_2.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-03'))
save_file_name = 'vox_fluoro_predict_L1_0-1_L2_0-1.pkl'
predict_numb = 100
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
os.makedirs(save_dir, exist_ok=True)
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_test_mat = image_init[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name), custom_objects={'cust_mean_squared_error_std': cust_mean_squared_error_std})
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), np.expand_dims(image_test_mat[:, 0, :, :], axis=-1), np.expand_dims(image_test_mat[:, 1, :, :], axis=-1), cali_test_mat], verbose=1)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['predictions'] = predict_1
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-03/model_prediction_L1_0-1_L2_0-1.py | .py | 2,727 | 88 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
hist_path = os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro/vox_fluoro_img_stnd_loss')
hist_file_name = 'vox_fluoro_hist_objects_2.pkl'
load_model_name = 'vox_fluoro_img_stnd_loss_2.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-03'))
save_file_name = 'vox_fluoro_predict_L1_0-1_L2_0-1_dist.pkl'
predict_numb = 100
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
os.makedirs(save_dir, exist_ok=True)
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_test_mat = image_init[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name), custom_objects={'cust_mean_squared_error_std': cust_mean_squared_error_std})
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), np.expand_dims(image_test_mat[:, 0, :, :], axis=-1), np.expand_dims(image_test_mat[:, 1, :, :], axis=-1), cali_test_mat], batch_size=10, verbose=1)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['predictions'] = predict_1
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_norm_mse.py | .py | 3,082 | 94 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_norm_mse'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=10, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['raw_ouput'] = predict_1
output_dict['predictions'] = inv_min_max(predict_1, hist_data['label_train_val_min'], hist_data['label_train_val_max'])
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_no_bn.py | .py | 4,670 | 132 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_no_bn'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
random_train_values = np.random.choice(hist_data['train_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_train_mat = vox_init[sorted(random_train_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_train_1 = image_init_1[sorted(random_train_values)]
image_train_2 = image_init_2[sorted(random_train_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_train_mat = label_init[sorted(random_train_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_test_min_max = min_max_norm(cali_test_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_train_mat = cali_init[sorted(random_train_values)]
cali_train_min_max = min_max_norm(cali_train_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_test = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=6, verbose=2)
predict_train = model.predict([np.expand_dims(vox_train_mat, axis=-1), image_train_1, image_train_2, cali_train_mat], batch_size=6, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['test_raw_ouput'] = predict_test
output_dict['test_predictions'] = inv_min_max(predict_test, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['test_actual'] = label_test_mat
output_dict['train_raw_ouput'] = predict_train
output_dict['train_predictions'] = inv_min_max(predict_train, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['train_actual'] = label_train_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_norm.py | .py | 3,078 | 94 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_norm'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=10, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['raw_ouput'] = predict_1
output_dict['predictions'] = inv_min_max(predict_1, hist_data['label_train_val_min'], hist_data['label_train_val_max'])
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_no_bn_mae.py | .py | 4,674 | 132 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_no_bn_mae'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
random_train_values = np.random.choice(hist_data['train_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_train_mat = vox_init[sorted(random_train_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_train_1 = image_init_1[sorted(random_train_values)]
image_train_2 = image_init_2[sorted(random_train_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_train_mat = label_init[sorted(random_train_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_test_min_max = min_max_norm(cali_test_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_train_mat = cali_init[sorted(random_train_values)]
cali_train_min_max = min_max_norm(cali_train_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_test = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=6, verbose=2)
predict_train = model.predict([np.expand_dims(vox_train_mat, axis=-1), image_train_1, image_train_2, cali_train_mat], batch_size=6, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['test_raw_ouput'] = predict_test
output_dict['test_predictions'] = inv_min_max(predict_test, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['test_actual'] = label_test_mat
output_dict['train_raw_ouput'] = predict_train
output_dict['train_predictions'] = inv_min_max(predict_train, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['train_actual'] = label_train_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_norm_nadam_lr_0-01_mse.py | .py | 3,096 | 94 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_norm_nadam_lr_0-01_mse'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=10, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['raw_ouput'] = predict_1
output_dict['predictions'] = inv_min_max(predict_1, hist_data['label_train_val_min'], hist_data['label_train_val_max'])
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_min_max_1.py | .py | 4,674 | 132 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_min_max_1'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
random_train_values = np.random.choice(hist_data['train_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_train_mat = vox_init[sorted(random_train_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_train_1 = image_init_1[sorted(random_train_values)]
image_train_2 = image_init_2[sorted(random_train_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_train_mat = label_init[sorted(random_train_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_test_min_max = min_max_norm(cali_test_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_train_mat = cali_init[sorted(random_train_values)]
cali_train_min_max = min_max_norm(cali_train_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_test = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=6, verbose=2)
predict_train = model.predict([np.expand_dims(vox_train_mat, axis=-1), image_train_1, image_train_2, cali_train_mat], batch_size=6, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['test_raw_ouput'] = predict_test
output_dict['test_predictions'] = inv_min_max(predict_test, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['test_actual'] = label_test_mat
output_dict['train_raw_ouput'] = predict_train
output_dict['train_predictions'] = inv_min_max(predict_train, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['train_actual'] = label_train_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_res_update_mae.py | .py | 3,665 | 101 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_res_update_mae'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
random_train_values = np.random.choice(hist_data['train_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_train_mat = vox_init[sorted(random_train_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_test_mat = image_init[sorted(random_test_values)]
image_train_mat = image_init[sorted(random_train_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_train_mat = label_init[sorted(random_train_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_train_mat = cali_init[sorted(random_train_values)]
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name), custom_objects={'cust_mean_squared_error_var': cust_mean_squared_error_var})
predict_test = model.predict([np.expand_dims(vox_test_mat, axis=-1), np.expand_dims(image_test_mat[:, 0, :, :], axis=-1), np.expand_dims(image_test_mat[:, 1, :, :], axis=-1), cali_test_mat], batch_size=10, verbose=2)
predict_train = model.predict([np.expand_dims(vox_train_mat, axis=-1), np.expand_dims(image_train_mat[:, 0, :, :], axis=-1), np.expand_dims(image_train_mat[:, 1, :, :], axis=-1), cali_train_mat], batch_size=10, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['test_predictions'] = predict_test
output_dict['test_actual'] = label_test_mat
output_dict['train_predictions'] = predict_train
output_dict['train_actual'] = label_train_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_std.py | .py | 3,077 | 94 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_std'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_1 = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=10, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['raw_ouput'] = predict_1
output_dict['predictions'] = inv_min_max(predict_1, hist_data['label_train_val_min'], hist_data['label_train_val_max'])
output_dict['actual'] = label_test_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/jupyt/update_2019-Sep-17/model_prediction_vox_fluoro_res.py | .py | 3,641 | 101 | '''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_res'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
random_train_values = np.random.choice(hist_data['train_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_train_mat = vox_init[sorted(random_train_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_test_mat = image_init[sorted(random_test_values)]
image_train_mat = image_init[sorted(random_train_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_train_mat = label_init[sorted(random_train_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_train_mat = cali_init[sorted(random_train_values)]
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name), custom_objects={'cust_mean_squared_error_var': cust_mean_squared_error_var})
predict_test = model.predict([np.expand_dims(vox_test_mat, axis=-1), np.expand_dims(image_test_mat[:, 0, :, :], axis=-1), np.expand_dims(image_test_mat[:, 1, :, :], axis=-1), cali_test_mat], batch_size=10, verbose=2)
predict_train = model.predict([np.expand_dims(vox_train_mat, axis=-1), np.expand_dims(image_train_mat[:, 0, :, :], axis=-1), np.expand_dims(image_train_mat[:, 1, :, :], axis=-1), cali_train_mat], batch_size=10, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['test_predictions'] = predict_test
output_dict['test_actual'] = label_test_mat
output_dict['train_predictions'] = predict_train
output_dict['train_actual'] = label_train_mat
pickle.dump(output_dict, save_file)
save_file.close()
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_conv_2.py | .py | 13,103 | 297 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True, random_state = 42,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, random_state=42,test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
# 'regularizer_l1': (0.0, 1.0, 20),
# 'regularizer_l2': (0.0, 1.0, 20),
# 'activation_fn': ['elu', 'relu'],
# 'kern_init': ['glorot_uniform', 'glorot_normal'],
# 'conv_1_filters':[10, 20, 40, 50],
# 'conv_1_kernel':[3,5,10],
# 'conv_1_strides':[1,2],
# 'spatial_drop_rate_1':(0.0, 1.0, 10),
'pool_1_size':[2,3],
'conv_2_filters':[40, 60, 80,100],
'conv_2_kernel':[3,5,7],
'conv_2_strides':[1,2],
'pool_2_size':[2,3],
# 'conv_3_filters':[20, 80, 100],
# 'conv_3_kernel':[2,3],
# 'conv_3_strides':[1,2],
# 'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0.0, 1.0, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100.0,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
# regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
regularizer = keras.regularizers.l1_l2(l1 = 0.05, l2 = 0.2)
# activation_fn = params['activation_fn']
# kern_init = params['kern_init']
activation_fn = 'elu'
kern_init = 'glorot_uniform'
# conv_1_filters = params['conv_1_filters']
# conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
# conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
conv_1_padding = 'valid'
conv_1_filters = 50
conv_1_kernel = (10,10)
conv_1_strides = (2,2)
# spatial_drop_rate_1 = params['spatial_drop_rate_1']
spatial_drop_rate_1 = 0.3
pool_1_size = (params['pool_1_size'],params['pool_1_size'])
pool_1_padding = 'same'
# pool_1_size = (2,2)
conv_2_filters = params['conv_2_filters']
conv_2_kernel = (params['conv_2_kernel'],params['conv_2_kernel'])
conv_2_strides = (params['conv_2_strides'],params['conv_2_strides'])
conv_2_padding = 'same'
# conv_2_filters = 80
# conv_2_kernel = (3,3)
# conv_2_strides = (1,1)
pool_2_size = (params['pool_2_size'],params['pool_2_size'])
pool_2_padding = 'same'
# pool_2_size = (2,2)
# conv_3_filters = params['conv_3_filters']
# conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
# conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
conv_3_padding = 'valid'
conv_3_filters = 80
conv_3_kernel = (2,2)
conv_3_strides = (1,1)
pool_3_size = (2, 2)
pool_3_padding = 'valid'
# dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
dense_1_f_units = 80
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 30
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.25,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_dense_1.py | .py | 13,190 | 298 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True, random_state = 42,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, random_state=42,test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
# 'regularizer_l1': (0.0, 1.0, 20),
# 'regularizer_l2': (0.0, 1.0, 20),
# 'activation_fn': ['elu', 'relu'],
# 'kern_init': ['glorot_uniform', 'glorot_normal'],
# 'conv_1_filters':[10, 20, 40, 50],
# 'conv_1_kernel':[3,5,10],
# 'conv_1_strides':[1,2],
# 'spatial_drop_rate_1':(0.0, 1.0, 10),
# 'pool_1_size':[2,3],
# 'conv_2_filters':[40, 60, 80,100],
# 'conv_2_kernel':[3,5,7],
# 'conv_2_strides':[1,2],
# 'pool_2_size':[2,3],
# 'conv_3_filters':[20, 80, 100],
# 'conv_3_kernel':[2,3],
# 'conv_3_strides':[1,2],
'pool_3_size':[2,3],
'dense_1_f_units':[40, 80, 120],
'dense_2_f_units':[40, 80, 120],
'dense_3_f_units':[40, 80, 120],
'dense_1_ca_units':[6, 20, 60],
'dense_2_co_units':[20, 40, 80],
'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0.0, 1.0, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100.0,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
# regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
regularizer = keras.regularizers.l1_l2(l1 = 0.05, l2 = 0.2)
# activation_fn = params['activation_fn']
# kern_init = params['kern_init']
activation_fn = 'elu'
kern_init = 'glorot_uniform'
# conv_1_filters = params['conv_1_filters']
# conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
# conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
conv_1_padding = 'valid'
conv_1_filters = 50
conv_1_kernel = (10,10)
conv_1_strides = (2,2)
# spatial_drop_rate_1 = params['spatial_drop_rate_1']
spatial_drop_rate_1 = 0.3
# pool_1_size = (params['pool_1_size'],params['pool_1_size'])
pool_1_padding = 'same'
pool_1_size = (3,3)
# conv_2_filters = params['conv_2_filters']
# conv_2_kernel = (params['conv_2_kernel'],params['conv_2_kernel'])
# conv_2_strides = (params['conv_2_strides'],params['conv_2_strides'])
conv_2_padding = 'same'
conv_2_filters = 40
conv_2_kernel = (7,7)
conv_2_strides = (1,1)
# pool_2_size = (params['pool_2_size'],params['pool_2_size'])
pool_2_padding = 'same'
pool_2_size = (2,2)
# conv_3_filters = params['conv_3_filters']
# conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
# conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
conv_3_padding = 'same'
conv_3_filters = 100
conv_3_kernel = (3,3)
conv_3_strides = (1,1)
# pool_3_size = (2, 2)
pool_3_size = (params['pool_3_size'],params['pool_3_size'])
pool_3_padding = 'same'
dense_1_f_units = params['dense_1_f_units']
# dense_1_f_units = 80
dense_1_f_bias = True
dense_2_f_units = params['dense_2_f_units']
# dense_2_f_units = 120
dense_2_f_bias = True
dense_3_f_units = params['dense_3_f_units']
# dense_3_f_units = 120
dense_3_f_bias = True
dense_1_ca_units = params['dense_1_ca_units']
# dense_1_ca_units = 60
dense_1_ca_bias = True
dense_2_co_units = params['dense_2_co_units']
# dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 30
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.25,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_trial1.py | .py | 12,674 | 283 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
'regularizer_l1':(0, 1, 10),
'regularizer_l2':(0, 1, 10),
'activation_fn':['elu', 'relu'],
'kern_init':['glorot_uniform', 'glorot_normal'],
'conv_1_filters':[10, 20, 40, 50],
'conv_1_kernel':[(10, 10), (5, 5),(3, 3)],
'conv_1_strides':[(2, 2), (1, 1)],
'spatial_drop_rate_1':(0, 1, 10),
'pool_1_size':[(2, 2), (3, 3)],
'conv_2_filters':[20, 40, 80],
'conv_2_kernel':[(3, 3), (5, 5)],
'conv_2_strides':[(2, 2), (1, 1)],
'pool_2_size':[(2, 2), (3, 3)],
'conv_3_filters':[20, 80, 100],
'conv_3_kernel':[(2, 2), (3, 3)],
'conv_3_strides':[(2, 2), (1, 1)],
'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0, 1, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
activation_fn = params['activation_fn']
kern_init = params['kern_init']
conv_1_filters = params['conv_1_filters']
conv_1_kernel = params['conv_1_kernel']
conv_1_strides = params['conv_1_strides']
conv_1_padding = 'valid'
spatial_drop_rate_1 = params['spatial_drop_rate_1']
pool_1_size = params['pool_1_size']
pool_1_padding = 'same'
conv_2_filters = params['conv_2_filters']
conv_2_kernel = params['conv_2_kernel']
conv_2_strides = params['conv_2_strides']
conv_2_padding = 'same'
pool_2_size = params['pool_2_size']
pool_2_padding = 'same'
conv_3_filters = params['conv_3_filters']
conv_3_kernel = params['conv_3_kernel']
conv_3_strides = params['conv_3_strides']
conv_3_padding = 'valid'
pool_3_size = (2, 2)
pool_3_padding = 'valid'
dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 50
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.5,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
with open(os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.pkl')),'wb') as scan_file:
pickle.dump(t,scan_file,protocol=-1)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_hyperparameter_talos.py | .py | 12,867 | 282 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/hyperparameter/talos_1'))
os.makedirs(save_dir,exist_ok=True)
expr_name = 'just_fluoro_talos'
expr_no = '1'
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
'regularizer_l1':(0, 1, 10),
'regularizer_l2':(0, 1, 10),
'activation_fn':['elu', 'relu'],
'kern_init':['glorot_uniform', 'glorot_normal'],
'conv_1_filters':[10, 20, 40, 50],
'conv_1_kernel':[(10, 10), (5, 5),(3, 3)],
'conv_1_strides':[(2, 2), (1, 1)],
'spatial_drop_rate_1':(0, 1, 10),
'pool_1_size':[(2, 2), (3, 3)],
'conv_2_filters':[20, 40, 80],
'conv_2_kernel':[(3, 3), (5, 5)],
'conv_2_strides':[(2, 2), (1, 1)],
'pool_2_size':[(2, 2), (3, 3)],
'conv_3_filters':[20, 80, 100],
'conv_3_kernel':[(2, 2), (3, 3)],
'conv_3_strides':[(2, 2), (1, 1)],
'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0, 1, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100,10)
}
# In[11]:
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
activation_fn = params['activation_fn']
kern_init = params['kern_init']
conv_1_filters = params['conv_1_filters']
conv_1_kernel = params['conv_1_kernel']
conv_1_strides = params['conv_1_strides']
conv_1_padding = 'valid'
spatial_drop_rate_1 = params['spatial_drop_rate_1']
pool_1_size = params['pool_1_size']
pool_1_padding = 'same'
conv_2_filters = params['conv_2_filters']
conv_2_kernel = params['conv_2_kernel']
conv_2_strides = params['conv_2_strides']
conv_2_padding = 'same'
pool_2_size = params['pool_2_size']
pool_2_padding = 'same'
conv_3_filters = params['conv_3_filters']
conv_3_kernel = params['conv_3_kernel']
conv_3_strides = params['conv_3_strides']
conv_3_padding = 'valid'
pool_3_size = (2, 2)
pool_3_padding = 'valid'
dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 50
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order,activity_regularizer=regularizer)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order,activity_regularizer=regularizer)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order,activity_regularizer=regularizer)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order,activity_regularizer=regularizer)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order,activity_regularizer=regularizer)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order,activity_regularizer=regularizer)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*10)
print('Here we go: ')
print('\n'*10)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.5,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
with open(os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.pkl')),'wb') as scan_file:
pickle.dump(t,scan_file,protocol=-1)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n') | Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_hyperparameter_hyperas.py | .py | 9,433 | 174 |
import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import graphviz
import sys
from sklearn.model_selection import train_test_split
import json
import csv
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/hyperparameter/hyperas_1'))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2)
return image_train_cum, cali_train_cum, label_train_cum
def fluoro_model(image_train_cum, cali_train_cum, label_train_cum):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
regularizer = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
activation_fn = {{choice(['elu', 'relu'])}}
kern_init = {{choice(['glorot_uniform', 'glorot_normal'])}}
conv_1_filters = {{choice([10, 20, 40, 50])}}
conv_1_kernel = {{choice([(10, 10), (5, 5), (3, 3)])}}
conv_1_strides = {{choice([(2, 2), (1, 1)])}}
conv_1_padding = 'valid'
spatial_drop_rate_1 = {{uniform(0, 1)}}
pool_1_size = {{choice([(2, 2), (3, 3)])}}
pool_1_padding = 'same'
conv_2_filters = {{choice([20, 40, 80])}}
conv_2_kernel = {{choice([(3, 3), (5, 5)])}}
conv_2_strides = {{choice([(2, 2), (1, 1)])}}
conv_2_padding = 'same'
pool_2_size = {{choice([(2, 2), (3, 3)])}}
pool_2_padding = 'same'
conv_3_filters = {{choice([20, 80, 100])}}
conv_3_kernel = {{choice([(2, 2), (3, 3)])}}
conv_3_strides = {{choice([(2, 2), (1, 1)])}}
conv_3_padding = 'valid'
pool_3_size = (2, 2)
pool_3_padding = 'valid'
dense_1_f_units = {{choice([40, 80, 120])}}
dense_1_f_bias = True
dense_2_f_units = {{choice([40, 80, 120])}}
dense_2_f_bias = True
dense_3_f_units = {{choice([40, 80, 120])}}
dense_3_f_bias = True
dense_1_ca_units = {{choice([6, 20, 60])}}
dense_1_ca_bias = True
dense_2_co_units = {{choice([20, 40, 80])}}
dense_2_co_bias = True
drop_1_comb_rate = {{uniform(0, 1)}}
dense_3_co_units = {{choice([20, 40, 80])}}
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
main_output_bias = True
model_opt = {{choice(['adam', 'nadam', 'adagrad', 'rmsprop'])}}
model_loss = 'mse'
model_metric = root_mean_squared_error
model_epochs = {{choice([30, 40, 50])}}
model_batchsize = {{choice([5, 10, 30])}}
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype='float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype='float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype='float32', name='cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_padding, activation=activation_fn, input_shape=img_input_shape, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding, data_format=channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_padding, activation=activation_fn, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding, data_format=channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_padding, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_padding, data_format=channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_padding, activation=activation_fn, input_shape=img_input_shape, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding, data_format=channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_padding, activation=activation_fn, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding, data_format=channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_padding, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_padding, data_format=channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation=activation_fn, use_bias=dense_1_ca_bias, kernel_initializer=kern_init, name='dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name='dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation=activation_fn, use_bias=dense_2_co_bias, kernel_initializer=kern_init, name='dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units, activation=activation_fn, use_bias=dense_3_co_bias, kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation=main_output_act, name='main_output')(dense_3_comb)
model = keras.Model(inputs=[input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
keras.utils.plot_model(model, 'show.png', show_shapes=True)
model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])
result = model.fit(x=[np.expand_dims(image_train_cum[:, 0, :, :], axis=3), np.expand_dims(image_train_cum[:, 1, :, :], axis=3), cali_train_cum], y=label_train_cum, epochs=model_epochs, batch_size=model_batchsize, validation_split=0.2, shuffle=True, verbose=True)
return {'loss': np.amin(result.history['loss']), 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=fluoro_model, data=data_comp, algo=tpe.suggest, max_evals=5, trials=Trials())
json1 = json.dumps(best_run)
f = open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.json')), 'w')
f.write(json1)
f.close()
w = csv.writer(open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.csv')), 'w'))
for key, val in best_run.items():
w.writerow([key, val])
best_model.save(os.path.abspath(os.path.join(save_dir, 'best_model_hyperas.h5')))
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_conv_3.py | .py | 13,191 | 298 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True, random_state = 42,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, random_state=42,test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
# 'regularizer_l1': (0.0, 1.0, 20),
# 'regularizer_l2': (0.0, 1.0, 20),
# 'activation_fn': ['elu', 'relu'],
# 'kern_init': ['glorot_uniform', 'glorot_normal'],
# 'conv_1_filters':[10, 20, 40, 50],
# 'conv_1_kernel':[3,5,10],
# 'conv_1_strides':[1,2],
# 'spatial_drop_rate_1':(0.0, 1.0, 10),
# 'pool_1_size':[2,3],
# 'conv_2_filters':[40, 60, 80,100],
# 'conv_2_kernel':[3,5,7],
# 'conv_2_strides':[1,2],
'pool_2_size':[2,3],
'conv_3_filters':[20, 80, 100],
'conv_3_kernel':[2,3],
'conv_3_strides':[1,2],
'pool_3_size':[2,3],
'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0.0, 1.0, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100.0,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
# regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
regularizer = keras.regularizers.l1_l2(l1 = 0.05, l2 = 0.2)
# activation_fn = params['activation_fn']
# kern_init = params['kern_init']
activation_fn = 'elu'
kern_init = 'glorot_uniform'
# conv_1_filters = params['conv_1_filters']
# conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
# conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
conv_1_padding = 'valid'
conv_1_filters = 50
conv_1_kernel = (10,10)
conv_1_strides = (2,2)
# spatial_drop_rate_1 = params['spatial_drop_rate_1']
spatial_drop_rate_1 = 0.3
# pool_1_size = (params['pool_1_size'],params['pool_1_size'])
pool_1_padding = 'same'
pool_1_size = (3,3)
# conv_2_filters = params['conv_2_filters']
# conv_2_kernel = (params['conv_2_kernel'],params['conv_2_kernel'])
# conv_2_strides = (params['conv_2_strides'],params['conv_2_strides'])
conv_2_padding = 'same'
conv_2_filters = 40
conv_2_kernel = (7,7)
conv_2_strides = (1,1)
pool_2_size = (params['pool_2_size'],params['pool_2_size'])
pool_2_padding = 'same'
# pool_2_size = (2,2)
conv_3_filters = params['conv_3_filters']
conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
conv_3_padding = 'same'
# conv_3_filters = 80
# conv_3_kernel = (2,2)
# conv_3_strides = (1,1)
# pool_3_size = (2, 2)
pool_3_size = (params['pool_3_size'],params['pool_3_size'])
pool_3_padding = 'same'
dense_1_f_units = params['dense_1_f_units']
# dense_1_f_units = 80
dense_1_f_bias = True
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 30
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.25,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_l1_l2_reg.py | .py | 12,997 | 296 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
'regularizer_l1': (0.0, 1.0, 20),
'regularizer_l2': (0.0, 1.0, 20),
# 'activation_fn': ['elu', 'relu'],
'kern_init': ['glorot_uniform', 'glorot_normal'],
# 'conv_1_filters':[10, 20, 40, 50],
# 'conv_1_kernel':[3,5,10],
# 'conv_1_strides':[1,2],
# 'spatial_drop_rate_1':(0.0, 1.0, 10),
# 'pool_1_size':[2,3],
# 'conv_2_filters':[20, 40, 80],
# 'conv_2_kernel':[3,5],
# 'conv_2_strides':[1,2],
# 'pool_2_size':[2,3],
# 'conv_3_filters':[20, 80, 100],
# 'conv_3_kernel':[2,3],
# 'conv_3_strides':[1,2],
# 'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0.0, 1.0, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100.0,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
# activation_fn = params['activation_fn']
kern_init = params['kern_init']
activation_fn = 'elu'
# kern_init = 'glorot_uniform'
# conv_1_filters = params['conv_1_filters']
# conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
# conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
conv_1_padding = 'valid'
conv_1_filters = 20
conv_1_kernel = (5,5)
conv_1_strides = (2,2)
# spatial_drop_rate_1 = params['spatial_drop_rate_1']
spatial_drop_rate_1 = 0.5
# pool_1_size = (params['pool_1_size'],params['pool_1_size'])
pool_1_padding = 'same'
pool_1_size = (2,2)
# conv_2_filters = params['conv_2_filters']
# conv_2_kernel = (params['conv_2_kernel'],params['conv_2_kernel'])
# conv_2_strides = (params['conv_2_strides'],params['conv_2_strides'])
conv_2_padding = 'same'
conv_2_filters = 40
conv_2_kernel = (3,3)
conv_2_strides = (1,1)
# pool_2_size = (params['pool_2_size'],params['pool_2_size'])
pool_2_padding = 'same'
pool_2_size = (2,2)
# conv_3_filters = params['conv_3_filters']
# conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
# conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
conv_3_padding = 'valid'
conv_3_filters = 80
conv_3_kernel = (2,2)
conv_3_strides = (1,1)
pool_3_size = (2, 2)
pool_3_padding = 'valid'
# dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
dense_1_f_units = 80
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 30
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.1,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/temp_model.py | .py | 10,852 | 240 | #coding=utf-8
try:
import numpy as np
except:
pass
try:
import h5py
except:
pass
try:
import tensorflow as tf
except:
pass
try:
import keras
except:
pass
try:
import os
except:
pass
try:
import graphviz
except:
pass
try:
import sys
except:
pass
try:
import hyperas
except:
pass
try:
import hyperopt
except:
pass
try:
from sklearn.model_selection import train_test_split
except:
pass
try:
import json
except:
pass
try:
import csv
except:
pass
try:
from hyperopt import Trials, STATUS_OK, tpe
except:
pass
try:
from hyperas import optim
except:
pass
try:
from hyperas.distributions import choice, uniform
except:
pass
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2)
def keras_fmin_fnct(space):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
regularizer = keras.regularizers.l1_l2(l1=space['l1'], l2=space['l1_1'])
activation_fn = space['activation_fn']
kern_init = space['kern_init']
conv_1_filters = space['conv_1_filters']
conv_1_kernel = space['conv_1_kernel']
conv_1_strides = space['conv_1_strides']
conv_1_padding = 'valid'
spatial_drop_rate_1 = space['l1_2']
pool_1_size = space['pool_1_size']
pool_1_padding = 'same'
conv_2_filters = space['conv_2_filters']
conv_2_kernel = space['conv_2_kernel']
conv_2_strides = space['conv_1_strides_1']
conv_2_padding = 'same'
pool_2_size = space['pool_1_size_1']
pool_2_padding = 'same'
conv_3_filters = space['conv_3_filters']
conv_3_kernel = space['pool_1_size_2']
conv_3_strides = space['conv_1_strides_2']
conv_3_padding = 'valid'
pool_3_size = (2, 2)
pool_3_padding = 'valid'
dense_1_f_units = space['dense_1_f_units']
dense_1_f_bias = True
dense_2_f_units = space['dense_1_f_units_1']
dense_2_f_bias = True
dense_3_f_units = space['dense_1_f_units_2']
dense_3_f_bias = True
dense_1_ca_units = space['dense_1_ca_units']
dense_1_ca_bias = True
dense_2_co_units = space['conv_2_filters_1']
dense_2_co_bias = True
drop_1_comb_rate = space['l1_3']
dense_3_co_units = space['conv_2_filters_2']
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
main_output_bias = True
model_opt = space['model_opt']
model_loss = 'mse'
model_metric = root_mean_squared_error
model_epochs = space['model_epochs']
model_batchsize = space['model_batchsize']
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype=np.dtype('float32'), name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype=np.dtype('float32'), name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype=np.dtype('float32'), name='cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_padding, activation=activation_fn, input_shape=img_input_shape, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding, data_format=channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_padding, activation=activation_fn, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding, data_format=channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_padding, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_padding, data_format=channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_padding, activation=activation_fn, input_shape=img_input_shape, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding, data_format=channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_padding, activation=activation_fn, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding, data_format=channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_padding, data_format=channel_order, activity_regularizer=regularizer, kernel_initializer=kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_padding, data_format=channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name='dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation=activation_fn, use_bias=dense_1_ca_bias, kernel_initializer=kern_init, name='dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name='dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation=activation_fn, use_bias=dense_2_co_bias, kernel_initializer=kern_init, name='dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units, activation=activation_fn, use_bias=dense_3_co_bias, kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation=main_output_act, name='main_output')(dense_3_comb)
model = keras.Model(inputs=[input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
keras.utils.plot_model(model, 'show.png', show_shapes=True)
model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])
result = model.fit(x=[np.expand_dims(image_train_cum[:, 0, :, :], axis=3), np.expand_dims(image_train_cum[:, 1, :, :], axis=3), cali_train_cum], y=label_train_cum, epochs=model_epochs, batch_size=model_batchsize, validation_split=0.2, shuffle=True, verbose=True)
return {'loss': np.amin(result.history['loss']), 'status': STATUS_OK, 'model': model}
def get_space():
return {
'l1': hp.uniform('l1', 0, 1),
'l1_1': hp.uniform('l1_1', 0, 1),
'activation_fn': hp.choice('activation_fn', ['elu', 'relu']),
'kern_init': hp.choice('kern_init', ['glorot_uniform', 'glorot_normal']),
'conv_1_filters': hp.choice('conv_1_filters', [10, 20, 40, 50]),
'conv_1_kernel': hp.choice('conv_1_kernel', [(10, 10), (5, 5), (3, 3)]),
'conv_1_strides': hp.choice('conv_1_strides', [(2, 2), (1, 1)]),
'l1_2': hp.uniform('l1_2', 0, 1),
'pool_1_size': hp.choice('pool_1_size', [(2, 2), (3, 3)]),
'conv_2_filters': hp.choice('conv_2_filters', [20, 40, 80]),
'conv_2_kernel': hp.choice('conv_2_kernel', [(3, 3), (5, 5)]),
'conv_1_strides_1': hp.choice('conv_1_strides_1', [(2, 2), (1, 1)]),
'pool_1_size_1': hp.choice('pool_1_size_1', [(2, 2), (3, 3)]),
'conv_3_filters': hp.choice('conv_3_filters', [20, 80, 100]),
'pool_1_size_2': hp.choice('pool_1_size_2', [(2, 2), (3, 3)]),
'conv_1_strides_2': hp.choice('conv_1_strides_2', [(2, 2), (1, 1)]),
'dense_1_f_units': hp.choice('dense_1_f_units', [40, 80, 120]),
'dense_1_f_units_1': hp.choice('dense_1_f_units_1', [40, 80, 120]),
'dense_1_f_units_2': hp.choice('dense_1_f_units_2', [40, 80, 120]),
'dense_1_ca_units': hp.choice('dense_1_ca_units', [6, 20, 60]),
'conv_2_filters_1': hp.choice('conv_2_filters_1', [20, 40, 80]),
'l1_3': hp.uniform('l1_3', 0, 1),
'conv_2_filters_2': hp.choice('conv_2_filters_2', [20, 40, 80]),
'model_opt': hp.choice('model_opt', ['adam', 'nadam', 'adagrad', 'rmsprop']),
'model_epochs': hp.choice('model_epochs', [30, 40, 50]),
'model_batchsize': hp.choice('model_batchsize', [5, 10, 30]),
}
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_reg_act_kern.py | .py | 12,931 | 293 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
'regularizer_l1': (0.0, 1.0, 10),
'regularizer_l2': (0.0, 1.0, 10),
'activation_fn': ['elu', 'relu'],
'kern_init': ['glorot_uniform', 'glorot_normal'],
# 'conv_1_filters':[10, 20, 40, 50],
# 'conv_1_kernel':[3,5,10],
# 'conv_1_strides':[1,2],
# 'spatial_drop_rate_1':(0.0, 1.0, 10),
# 'pool_1_size':[2,3],
# 'conv_2_filters':[20, 40, 80],
# 'conv_2_kernel':[3,5],
# 'conv_2_strides':[1,2],
# 'pool_2_size':[2,3],
# 'conv_3_filters':[20, 80, 100],
# 'conv_3_kernel':[2,3],
# 'conv_3_strides':[1,2],
# 'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0.0, 1.0, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100.0,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
activation_fn = params['activation_fn']
kern_init = params['kern_init']
# conv_1_filters = params['conv_1_filters']
# conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
# conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
conv_1_padding = 'valid'
conv_1_filters = 20
conv_1_kernel = (5,5)
conv_1_strides = (2,2)
# spatial_drop_rate_1 = params['spatial_drop_rate_1']
spatial_drop_rate_1 = 0.5
# pool_1_size = (params['pool_1_size'],params['pool_1_size'])
pool_1_padding = 'same'
pool_1_size = (2,2)
# conv_2_filters = params['conv_2_filters']
# conv_2_kernel = (params['conv_2_kernel'],params['conv_2_kernel'])
# conv_2_strides = (params['conv_2_strides'],params['conv_2_strides'])
conv_2_padding = 'same'
conv_2_filters = 40
conv_2_kernel = (3,3)
conv_2_strides = (1,1)
# pool_2_size = (params['pool_2_size'],params['pool_2_size'])
pool_2_padding = 'same'
pool_2_size = (2,2)
# conv_3_filters = params['conv_3_filters']
# conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
# conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
conv_3_padding = 'valid'
conv_3_filters = 80
conv_3_kernel = (2,2)
conv_3_strides = (1,1)
pool_3_size = (2, 2)
pool_3_padding = 'valid'
# dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
dense_1_f_units = 80
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 30
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.1,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_conv_1.py | .py | 13,097 | 297 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True, random_state = 42,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, random_state=42,test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
# 'regularizer_l1': (0.0, 1.0, 20),
# 'regularizer_l2': (0.0, 1.0, 20),
# 'activation_fn': ['elu', 'relu'],
# 'kern_init': ['glorot_uniform', 'glorot_normal'],
'conv_1_filters':[10, 20, 40, 50],
'conv_1_kernel':[3,5,10],
'conv_1_strides':[1,2],
'spatial_drop_rate_1':(0.0, 1.0, 10),
# 'pool_1_size':[2,3],
# 'conv_2_filters':[40, 60, 80],
# 'conv_2_kernel':[3,5],
# 'conv_2_strides':[1,2],
# 'pool_2_size':[2,3],
# 'conv_3_filters':[20, 80, 100],
# 'conv_3_kernel':[2,3],
# 'conv_3_strides':[1,2],
# 'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0.0, 1.0, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100.0,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
# regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
regularizer = keras.regularizers.l1_l2(l1 = 0.05, l2 = 0.2)
# activation_fn = params['activation_fn']
# kern_init = params['kern_init']
activation_fn = 'elu'
kern_init = 'glorot_uniform'
conv_1_filters = params['conv_1_filters']
conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
conv_1_strides = (params['conv_1_strides'],params['conv_1_strides'])
conv_1_padding = 'valid'
# conv_1_filters = 20
# conv_1_kernel = (5,5)
# conv_1_strides = (2,2)
spatial_drop_rate_1 = params['spatial_drop_rate_1']
# spatial_drop_rate_1 = 0.5
# pool_1_size = (params['pool_1_size'],params['pool_1_size'])
pool_1_padding = 'same'
pool_1_size = (2,2)
# conv_2_filters = params['conv_2_filters']
# conv_2_kernel = (params['conv_2_kernel'],params['conv_2_kernel'])
# conv_2_strides = (params['conv_2_strides'],params['conv_2_strides'])
conv_2_padding = 'same'
conv_2_filters = 80
conv_2_kernel = (3,3)
conv_2_strides = (1,1)
# pool_2_size = (params['pool_2_size'],params['pool_2_size'])
pool_2_padding = 'same'
pool_2_size = (2,2)
# conv_3_filters = params['conv_3_filters']
# conv_3_kernel = (params['conv_3_kernel'],params['conv_3_kernel'])
# conv_3_strides = (params['conv_3_strides'],params['conv_3_strides'])
conv_3_padding = 'valid'
conv_3_filters = 80
conv_3_kernel = (2,2)
conv_3_strides = (1,1)
pool_3_size = (2, 2)
pool_3_padding = 'valid'
# dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
dense_1_f_units = 80
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 30
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.25,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/just_fluoro/just_fluoro_talos_testdeploy.py | .py | 13,223 | 304 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import keras
import talos
from sklearn.model_selection import train_test_split
import pickle
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/just_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp():
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:]
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[:]
image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(image_mat,cali_mat,label_mat,shuffle=True,test_size=0.2)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2)
# print('Image sub size:',image_train_sub.shape)
# print('Label sub size:',label_train_sub.shape)
# print('Cali sub size:',cali_train_sub.shape)
# print('Image val size:',image_val.shape)
# print('Label val size:',label_val.shape)
# print('Cali val size:',cali_val.shape)
return image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
params = {
'regularizer_l1':(0.0, 1.0, 10),
'regularizer_l2':(0.0, 1.0, 10),
# 'activation_fn':['elu', 'relu'],
# 'kern_init':['glorot_uniform', 'glorot_normal'],
# 'conv_1_filters':[10, 20, 40, 50],
# 'conv_1_kernel': [3,5,10],
# 'conv_1_strides':[(2, 2), (1, 1)],
# 'spatial_drop_rate_1':(0.0, 1.0, 10),
# 'pool_1_size':[(2, 2), (3, 3)],
# 'conv_2_filters':[20, 40, 80],
# 'conv_2_kernel':[(3, 3), (5, 5)],
# 'conv_2_strides':[(2, 2), (1, 1)],
# 'pool_2_size':[(2, 2), (3, 3)],
# 'conv_3_filters':[20, 80, 100],
# 'conv_3_kernel':[(2, 2), (3, 3)],
# 'conv_3_strides':[(2, 2), (1, 1)],
# 'dense_1_f_units':[40, 80, 120],
# 'dense_2_f_units':[40, 80, 120],
# 'dense_3_f_units':[40, 80, 120],
# 'dense_1_ca_units':[6, 20, 60],
# 'dense_2_co_units':[20, 40, 80],
# 'dense_3_co_units':[20, 40, 80],
# 'drop_1_comb_rate':(0, 1, 10),
# 'model_opt' :[keras.optimizers.Adam,keras.optimizers.Nadam],
# 'model_epochs' :[30,40,50,100],
# 'model_batchsize' :[5,10,30],
# 'learning_rate' :(0.0001,100,10)
}
def fluoro_model(X_talos,y_talos,X_val,y_val,params):
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
channel_order = 'channels_last'
img_input_shape = (128,128,1)
# Hyperparameters
regularizer = keras.regularizers.l1_l2(l1 = params['regularizer_l1'], l2 = params['regularizer_l2'])
# regularizer = keras.regularizers.l1(params['regularizer_l1'])
# regularizer = keras.regularizers.l1_l2(l1 = 0.05, l2 = 0.95)
activation_fn = 'relu'
# activation_fn = params['activation_fn']
kern_init = 'glorot_uniform'
# kern_init = params['kern_init']
# conv_1_filters = params['conv_1_filters']
# conv_1_kernel = (params['conv_1_kernel'],params['conv_1_kernel'])
# conv_1_strides = params['conv_1_strides']
conv_1_padding = 'valid'
conv_1_filters = 20
conv_1_kernel = (5,5)
conv_1_strides = (2,2)
# spatial_drop_rate_1 = params['spatial_drop_rate_1']
spatial_drop_rate_1 = 0.5
# pool_1_size = params['pool_1_size']
pool_1_padding = 'same'
pool_1_size = (2,2)
# conv_2_filters = params['conv_2_filters']
# conv_2_kernel = params['conv_2_kernel']
# conv_2_strides = params['conv_2_strides']
conv_2_padding = 'same'
conv_2_filters = 40
conv_2_kernel = (3,3)
conv_2_strides = (1,1)
# pool_2_size = params['pool_2_size']
pool_2_padding = 'same'
pool_2_size = (2,2)
# conv_3_filters = params['conv_3_filters']
# conv_3_kernel = params['conv_3_kernel']
# conv_3_strides = params['conv_3_strides']
conv_3_padding = 'valid'
conv_3_filters = 60
conv_3_kernel = (2,2)
conv_3_strides = (1,1)
pool_3_size = (2, 2)
pool_3_padding = 'valid'
# dense_1_f_units = params['dense_1_f_units']
dense_1_f_bias = True
dense_1_f_units = 80
# dense_2_f_units = params['dense_2_f_units']
dense_2_f_units = 120
dense_2_f_bias = True
# dense_3_f_units = params['dense_3_f_units']
dense_3_f_units = 120
dense_3_f_bias = True
# dense_1_ca_units = params['dense_1_ca_units']
dense_1_ca_units = 60
dense_1_ca_bias = True
# dense_2_co_units = params['dense_2_co_units']
dense_2_co_units = 80
dense_2_co_bias = True
# drop_1_comb_rate = params['drop_1_comb_rate']
drop_1_comb_rate = 0.1
# dense_3_co_units = params['dense_3_co_units']
dense_3_co_units = 80
dense_3_co_bias = True
main_output_units = 6
main_output_act = 'linear'
# model_opt = params['model_opt'](lr=params('learning_rate'))
model_opt = 'adam'
model_loss = 'mse'
model_metric = root_mean_squared_error
# model_epochs = params['model_epochs']
# model_batchsize = params['model_batchsize']
model_epochs = 1
model_batchsize = 10
input_fluoro_1 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro1_inpt')
input_fluoro_2 = keras.Input(shape=img_input_shape, dtype = 'float32', name='fluoro2_inpt')
input_cali = keras.Input(shape=(6,), dtype = 'float32', name = 'cali_inpt')
bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_1)
conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_1)
pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_1)
conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_1)
pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_1)
flatten_1_1 = keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_1')(flatten_1_1)
dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_1')(dense_1_f_1)
dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_1')(dense_2_f_1)
bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,kernel_size=conv_1_kernel,strides=conv_1_strides,padding=conv_1_padding,activation = activation_fn,input_shape = img_input_shape, data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(bn_1_2)
spat_1_2 = keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_padding,data_format = channel_order)(spat_1_2)
conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,kernel_size=conv_2_kernel,strides=conv_2_strides,padding=conv_2_padding, activation = activation_fn,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_1_2)
pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_padding,data_format = channel_order)(conv_2_2)
conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,kernel_size=conv_3_kernel,strides=conv_3_strides,padding=conv_3_padding,data_format = channel_order,activity_regularizer=regularizer,kernel_initializer = kern_init)(pool_2_2)
pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,padding=pool_3_padding,data_format = channel_order)(conv_3_2)
flatten_1_2 = keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, use_bias=dense_1_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_1_f_2')(flatten_1_2)
dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, use_bias=dense_2_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_2_f_2')(dense_1_f_2)
dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, use_bias=dense_3_f_bias, kernel_initializer=kern_init, activity_regularizer=regularizer, name = 'dense_3_f_2')(dense_2_f_2)
dense_1_cali = keras.layers.Dense(units=dense_1_ca_units, activation = activation_fn, use_bias = dense_1_ca_bias, kernel_initializer=kern_init, name = 'dense_1_cali')(input_cali)
dense_1_comb = keras.layers.concatenate([dense_3_f_1, dense_3_f_2, dense_1_cali], name = 'dense_1_comb')
dense_2_comb = keras.layers.Dense(units=dense_2_co_units, activation = activation_fn, use_bias = dense_2_co_bias, kernel_initializer=kern_init, name = 'dense_2_comb')(dense_1_comb)
drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
dense_3_comb = keras.layers.Dense(units=dense_3_co_units,activation =activation_fn,use_bias=dense_3_co_bias,kernel_initializer=kern_init, name='dense_3_comb')(drop_1_comb)
main_output = keras.layers.Dense(units=main_output_units, activation = main_output_act, name = 'main_output')(dense_3_comb)
model = keras.Model(inputs = [input_fluoro_1,input_fluoro_2,input_cali], outputs = main_output)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir,expr_name+'_'+expr_no+'.png')), show_shapes=True)
model.compile(optimizer=model_opt,loss = model_loss, metrics = [model_metric])
result = model.fit(x=[np.expand_dims(X_talos[0][:,0,:,:],axis=3),np.expand_dims(X_talos[0][:,1,:,:],axis=3), X_talos[1]],y=y_talos,epochs = model_epochs,batch_size = model_batchsize,validation_data=([np.expand_dims(X_val[0][:,0,:,:],axis=3),np.expand_dims(X_val[0][:,1,:,:],axis=3),X_val[1]],y_val), shuffle = True,verbose=1)
return result, model
image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp()
image_train_sub = image_train_sub[:5]
image_val = image_val[:5]
cali_train_sub = cali_train_sub[:5]
cali_val = cali_val[:5]
label_train_sub = label_train_sub[:5]
label_val = label_val[:5]
# fluoro_model([image_train_cum,cali_train_cum],label_train_cum,params)
print('\n'*3)
print('Here we go: ')
print('\n'*3)
t = talos.Scan(x = [image_train_sub,cali_train_sub], y=label_train_sub, x_val = [image_val,cali_val], y_val = label_val, params=params, model=fluoro_model, grid_downsample = 0.5,random_method = 'uniform_mersenne',clear_tf_session=True, print_params=True,dataset_name=expr_name, experiment_no=expr_no,debug=True)
t.saved_models
print('\n\n\n')
print('-------------')
print('\n')
print('t.data')
print('\n')
print(t.data)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.details')
print('\n')
print(t.details)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_models')
print('\n')
print(t.saved_models)
print('\n')
print('-------------')
print('\n\n\n')
print('\n\n\n')
print('-------------')
print('\n')
print('t.saved_weights')
print('\n')
print(t.saved_weights)
print('\n')
print('-------------')
print('\n\n\n')
| Python |
3D | john-drago/fluoro | code/hyperparameter/vox_fluoro/vox_fluoro_res_talos_2/vox_fluoro_res_talos_2.py | .py | 61,219 | 1,147 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
import talos
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def data_comp(num_of_samples=None):
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
pickle.dump(var_dict, hist_file)
hist_file.close()
return vox_mat_train, vox_mat_val, image_mat_train, image_mat_val, cali_mat_train, cali_mat_val, label_mat_train, label_mat_val
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
def cust_mae_normalized(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
std_dset = stats_file['std']
std_v = std_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.abs((y_true - y_pred) / std_v))
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': [None],
'v_res_act_fn': ['elu'],
'v_conv_0_filters': [30],
'v_conv_0_kernel': [9],
'v_conv_0_strides_0': [2],
'v_conv_0_strides_1': [2],
'v_conv_0_strides_2': [2],
'v_conv_0_pad': ['same'],
'v_spatial_drop_rate_0': [0.3],
'v_conv_1_filters': [30],
'v_conv_1_kernel': [5],
'v_conv_1_strides_0': [2],
'v_conv_1_strides_1': [2],
'v_conv_1_strides_2': [3],
'v_conv_1_pad': ['same'],
# ---
# Pool After Initial Layers
'v_pool_0_size': [2],
'v_pool_0_pad': ['same'],
# ---
# Second Run of Entry Layers
'v_conv_2_filters': [30],
'v_conv_2_kernel': [5],
'v_conv_2_strides_0': [2],
'v_conv_2_strides_1': [2],
'v_conv_2_strides_2': [2],
'v_conv_2_pad': ['same'],
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': [30],
'v_conv_3_kernel': [3],
'v_conv_3_strides_0': [1],
'v_conv_3_strides_1': [1],
'v_conv_3_strides_2': [1],
'v_conv_3_pad': ['same'],
'v_spatial_drop_rate_2': [0.3],
'v_conv_4_filters': [30],
'v_conv_4_kernel': [3],
'v_conv_4_strides_0': [1],
'v_conv_4_strides_1': [1],
'v_conv_4_strides_2': [1],
'v_conv_4_pad': ['same'],
# 2
'v_conv_5_filters': [30],
'v_conv_5_kernel': [3],
'v_conv_5_strides_0': [1],
'v_conv_5_strides_1': [1],
'v_conv_5_strides_2': [1],
'v_conv_5_pad': ['same'],
'v_spatial_drop_rate_3': [0.3],
'v_conv_6_filters': [30],
'v_conv_6_kernel': [3],
'v_conv_6_strides_0': [1],
'v_conv_6_strides_1': [1],
'v_conv_6_strides_2': [1],
'v_conv_6_pad': ['same'],
# 3
'v_conv_7_filters': [30],
'v_conv_7_kernel': [3],
'v_conv_7_strides_0': [1],
'v_conv_7_strides_1': [1],
'v_conv_7_strides_2': [1],
'v_conv_7_pad': ['same'],
'v_spatial_drop_rate_4': [0.3],
'v_conv_8_filters': [30],
'v_conv_8_kernel': [3],
'v_conv_8_strides_0': [1],
'v_conv_8_strides_1': [1],
'v_conv_8_strides_2': [1],
'v_conv_8_pad': ['same'],
# 4
'v_conv_9_filters': [40],
'v_conv_9_kernel': [3],
'v_conv_9_strides_0': [2],
'v_conv_9_strides_1': [2],
'v_conv_9_strides_2': [2],
'v_conv_9_pad': ['same'],
'v_spatial_drop_rate_5': [0.3],
'v_conv_10_filters': [40],
'v_conv_10_kernel': [3],
'v_conv_10_strides_0': [1],
'v_conv_10_strides_1': [1],
'v_conv_10_strides_2': [1],
'v_conv_10_pad': ['same'],
'v_conv_11_filters': [40],
'v_conv_11_kernel': [3],
'v_conv_11_strides_0': [2],
'v_conv_11_strides_1': [2],
'v_conv_11_strides_2': [2],
'v_conv_11_pad': ['same'],
# 5
'v_conv_12_filters': [50],
'v_conv_12_kernel': [2],
'v_conv_12_strides_0': [2],
'v_conv_12_strides_1': [2],
'v_conv_12_strides_2': [2],
'v_conv_12_pad': ['same'],
'v_spatial_drop_rate_6': [0.3],
'v_conv_13_filters': [50],
'v_conv_13_kernel': [2],
'v_conv_13_strides_0': [1],
'v_conv_13_strides_1': [1],
'v_conv_13_strides_2': [1],
'v_conv_13_pad': ['same'],
'v_conv_14_filters': [50],
'v_conv_14_kernel': [1],
'v_conv_14_strides_0': [2],
'v_conv_14_strides_1': [2],
'v_conv_14_strides_2': [2],
'v_conv_14_pad': ['same'],
# 6
'v_conv_15_filters': [50],
'v_conv_15_kernel': [2],
'v_conv_15_strides_0': [2],
'v_conv_15_strides_1': [2],
'v_conv_15_strides_2': [2],
'v_conv_15_pad': ['same'],
'v_spatial_drop_rate_7': [0.3],
'v_conv_16_filters': [50],
'v_conv_16_kernel': [2],
'v_conv_16_strides_0': [1],
'v_conv_16_strides_1': [1],
'v_conv_16_strides_2': [1],
'v_conv_16_pad': ['same'],
'v_conv_17_filters': [50],
'v_conv_17_kernel': [1],
'v_conv_17_strides_0': [2],
'v_conv_17_strides_1': [2],
'v_conv_17_strides_2': [2],
'v_conv_17_pad': ['same'],
# ---
# Final Convs
'v_spatial_drop_rate_8': [0.5],
'v_conv_18_filters': [50],
'v_conv_18_kernel': [2],
'v_conv_18_strides_0': [1],
'v_conv_18_strides_1': [1],
'v_conv_18_strides_2': [1],
'v_conv_18_pad': ['valid'],
'dense_1_v_units': [75],
'dense_2_v_units': [50],
# ---
# 2D CONV
# ---
'intra_act_fn': [None],
'res_act_fn': ['elu'],
# Entry Fluoro Layers
'conv_0_filters': [30],
'conv_0_kernel': [5],
'conv_0_strides': [2],
'conv_0_pad': ['same'],
'spatial_drop_rate_0': [0.3],
'conv_1_filters': [30],
'conv_1_kernel': [5],
'conv_1_strides': [2],
'conv_1_pad': ['same'],
# ---
# Pool After Initial Layers
'pool_0_size': [2],
'pool_0_pad': ['same'],
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': [30],
'conv_2_kernel': [3],
'conv_2_strides': [1],
'conv_2_pad': ['same'],
'spatial_drop_rate_1': [0.3],
'conv_3_filters': [30],
'conv_3_kernel': [3],
'conv_3_strides': [1],
'conv_3_pad': ['same'],
# 2
'conv_4_filters': [30],
'conv_4_kernel': [3],
'conv_4_strides': [1],
'conv_4_pad': ['same'],
'spatial_drop_rate_2': [0.3],
'conv_5_filters': [30],
'conv_5_kernel': [3],
'conv_5_strides': [1],
'conv_5_pad': ['same'],
# 3
'conv_6_filters': [30],
'conv_6_kernel': [3],
'conv_6_strides': [1],
'conv_6_pad': ['same'],
'spatial_drop_rate_3': [0.3],
'conv_7_filters': [30],
'conv_7_kernel': [3],
'conv_7_strides': [1],
'conv_7_pad': ['same'],
# 4
'conv_8_filters': [30],
'conv_8_kernel': [3],
'conv_8_strides': [1],
'conv_8_pad': ['same'],
'spatial_drop_rate_4': [0.3],
'conv_9_filters': [30],
'conv_9_kernel': [3],
'conv_9_strides': [1],
'conv_9_pad': ['same'],
# 5
'conv_10_filters': [30],
'conv_10_kernel': [3],
'conv_10_strides': [1],
'conv_10_pad': ['same'],
'spatial_drop_rate_5': [0.3],
'conv_11_filters': [30],
'conv_11_kernel': [3],
'conv_11_strides': [1],
'conv_11_pad': ['same'],
# 6
'conv_12_filters': [30],
'conv_12_kernel': [3],
'conv_12_strides': [1],
'conv_12_pad': ['same'],
'spatial_drop_rate_6': [0.3],
'conv_13_filters': [30],
'conv_13_kernel': [3],
'conv_13_strides': [1],
'conv_13_pad': ['same'],
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': [None],
'c_res_act_fn': ['elu'],
# 0
'comb_0_filters': [60],
'comb_0_kernel': [3],
'comb_0_strides': [1],
'comb_0_pad': ['same'],
'comb_spatial_0': [0.3],
'comb_1_filters': [60],
'comb_1_kernel': [3],
'comb_1_strides': [1],
'comb_1_pad': ['same'],
# 1
'comb_2_filters': [60],
'comb_2_kernel': [3],
'comb_2_strides': [1],
'comb_2_pad': ['same'],
'comb_spatial_1': [0.3],
'comb_3_filters': [60],
'comb_3_kernel': [3],
'comb_3_strides': [1],
'comb_3_pad': ['same'],
# 2
'comb_4_filters': [60],
'comb_4_kernel': [3],
'comb_4_strides': [1],
'comb_4_pad': ['same'],
'comb_spatial_2': [0.3],
'comb_5_filters': [60],
'comb_5_kernel': [3],
'comb_5_strides': [1],
'comb_5_pad': ['same'],
# 3
'comb_6_filters': [60],
'comb_6_kernel': [3],
'comb_6_strides': [1],
'comb_6_pad': ['same'],
'comb_spatial_3': [0.3],
'comb_7_filters': [60],
'comb_7_kernel': [3],
'comb_7_strides': [1],
'comb_7_pad': ['same'],
# 4
'comb_8_filters': [60],
'comb_8_kernel': [3],
'comb_8_strides': [1],
'comb_8_pad': ['same'],
'comb_spatial_4': [0.3],
'comb_9_filters': [60],
'comb_9_kernel': [3],
'comb_9_strides': [1],
'comb_9_pad': ['same'],
# 5
'comb_10_filters': [60],
'comb_10_kernel': [2],
'comb_10_strides': [2],
'comb_10_pad': ['same'],
'comb_spatial_5': [0.3],
'comb_11_filters': [60],
'comb_11_kernel': [2],
'comb_11_strides': [1],
'comb_11_pad': ['same'],
'comb_12_filters': [60],
'comb_12_kernel': [1],
'comb_12_strides': [2],
'comb_12_pad': ['same'],
# 6
'comb_13_filters': [60],
'comb_13_kernel': [2],
'comb_13_strides': [2],
'comb_13_pad': ['same'],
'comb_spatial_6': [0.3],
'comb_14_filters': [60],
'comb_14_kernel': [2],
'comb_14_strides': [1],
'comb_14_pad': ['same'],
'comb_15_filters': [60],
'comb_15_kernel': [1],
'comb_15_strides': [2],
'comb_15_pad': ['same'],
# 7
'comb_16_filters': [60],
'comb_16_kernel': [2],
'comb_16_strides': [2],
'comb_16_pad': ['same'],
'comb_spatial_7': [0.3],
'comb_17_filters': [60],
'comb_17_kernel': [2],
'comb_17_strides': [1],
'comb_17_pad': ['same'],
'comb_18_filters': [60],
'comb_18_kernel': [1],
'comb_18_strides': [2],
'comb_18_pad': ['same'],
# ---
# Final Convs After Fluoro
'comb_19_filters': [60],
'comb_19_kernel': [2],
'comb_19_strides': [1],
'comb_19_pad': ['valid'],
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': [50],
'dense_comb_1_units': [50],
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': ['elu'],
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': [60],
'vox_flu_units_1': [50],
'vox_flu_units_2': [30],
'vox_flu_units_3': [15],
'vox_flu_units_4': [6],
# ---
# Cali Units
'cali_0_units': [20],
'cali_1_units': [20],
'cali_2_units': [20],
'cali_3_units': [6],
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': ['elu'],
'top_level_intra': [None],
# ---
# Top Level Dense
'top_dense_0': [6],
'top_dense_1': [6],
'top_dense_2': [6],
'top_dense_3': [6],
'top_dense_4': [6],
'top_dense_5': [6],
'top_dense_6': [6],
# Main Output
'main_output_units': [6],
'main_output_act': ['linear'],
# General Housekeeping
'v_conv_regularizer': [None],
'conv_regularizer': [None],
'dense_regularizer_1': [None],
'dense_regularizer_2': [None],
'activation_fn': ['elu'],
'kern_init': ['he_uniform'],
'model_opt': [tf.keras.optimizers.Adam, tf.keras.optimizers.Nadam, tf.keras.optimizers.Adadelta, tf.keras.optimizers.Adamax, tf.keras.optimizers.SGD],
'amsgrad': [True, False],
'learning_rate': [0.0001, 0.001, 0.01, 0.1],
'model_epochs': [40],
'model_batchsize': [9],
'model_loss': [cust_mean_squared_error_var, cust_mae_normalized, 'mse', 'mae', tf.keras.losses.Huber(delta=0.5)],
'model_metric': ['mae']
}
def fluoro_model(X_train, y_train, X_val, y_val, params):
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
if params['model_opt'] == tf.keras.optimizers.Adam:
model.compile(optimizer=params['model_opt'](lr=params['learning_rate'], amsgrad=params['amsgrad']), loss=params['model_loss'], metrics=[params['model_metric']])
elif params['model_opt'] == tf.keras.optimizers.SGD:
model.compile(optimizer=params['model_opt'](lr=params['learning_rate'], momentum=0.01, nesterov=True), loss=params['model_loss'], metrics=[params['model_metric']])
else:
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
# tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# model.summary()
# -----------------------------------------------------------------
result = model.fit(x={'input_vox': np.expand_dims(X_train[0], axis=-1), 'input_fluoro_1': np.expand_dims(X_train[1][:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(X_train[1][:, 1, :, :], axis=-1), 'input_cali': X_train[2]}, y=y_train, validation_data=([np.expand_dims(X_val[0], axis=-1), np.expand_dims(X_val[1][:, 0, :, :], axis=-1), np.expand_dims(X_val[1][:, 1, :, :], axis=-1), X_val[2]], y_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
return result, model
vox_mat_train, vox_mat_val, image_mat_train, image_mat_val, cali_mat_train, cali_mat_val, label_mat_train, label_mat_val = data_comp()
X_train = [vox_mat_train, image_mat_train, cali_mat_train]
y_train = label_mat_train
X_val = [vox_mat_val, image_mat_val, cali_mat_val]
y_val = label_mat_val
print('\n' * 3)
print('Here we go:')
print('\n' * 3)
t = talos.Scan(x=X_train, y=y_train, x_val=X_val, y_val=y_val, params=params, model=fluoro_model, fraction_limit=0.3, random_method='uniform_mersenne', clear_session=True, print_params=True, experiment_name=expr_name + '_' + expr_no)
| Python |
3D | john-drago/fluoro | code/hyperparameter/vox_fluoro/vox_fluoro_img_stnd_hyperas/vox_fluoro_img_stnd_hyperas.py | .py | 14,888 | 293 | import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import json
import csv
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/vox_fluoro'), 'vox_fluoro_img_stnd_hyperas'))
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None, num_of_samples=5):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=5, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
test_indxs, train_indxs = split_train_test(len(label_init), num_of_samples=5)
test_indxs = sorted(list(test_indxs))
train_indxs = sorted(list(train_indxs))
vox_mat_train = vox_init[:]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
return vox_mat_train, image_mat_train, cali_mat_train, label_mat_train
def fluoro_model(vox_mat_train, image_mat_train, cali_mat_train, label_mat_train):
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
var_dset = stats_file['var']
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
v_conv_1_filters = {{choice([20, 30, 40])}}
v_conv_1_kernel = {{choice([5, 7, 11, 13, 21])}}
v_conv_1_strides = {{choice([1, 2])}}
v_conv_1_pad = 'same'
v_spatial_drop_rate_1 = {{uniform(0, 1)}}
v_pool_1_size = {{choice([2, 3])}}
v_pool_1_pad = 'same'
v_conv_2_filters = {{choice([40, 50, 60, 80])}}
v_conv_2_kernel = {{choice([5, 7, 11])}}
v_conv_2_strides = {{choice([1, 2])}}
v_conv_2_pad = 'same'
v_spatial_drop_rate_2 = {{uniform(0, 1)}}
v_pool_2_size = {{choice([2, 3])}}
v_pool_2_pad = 'same'
v_conv_3_filters = {{choice([40, 50, 60, 80])}}
v_conv_3_kernel = {{choice([3, 5, 7])}}
v_conv_3_strides = {{choice([1, 2])}}
v_conv_3_pad = 'same'
v_spatial_drop_rate_3 = {{uniform(0, 1)}}
v_pool_3_size = {{choice([2, 3])}}
v_pool_3_pad = 'same'
dense_1_v_units = {{choice([750, 1000, 1500])}}
dense_2_v_units = {{choice([500, 750, 1000])}}
dense_3_v_units = {{choice([250, 500, 750])}}
conv_1_filters = {{choice([20, 30, 40, 50, 60])}}
conv_1_kernel = {{choice([3, 5, 7])}}
conv_1_strides = {{choice([1, 2])}}
conv_1_pad = 'same'
spatial_drop_rate_1 = {{uniform(0, 1)}}
pool_1_size = {{choice([2, 3])}}
pool_1_pad = 'same'
conv_2_filters = {{choice([40, 50, 60, 80])}}
conv_2_kernel = {{choice([3, 5, 7])}}
conv_2_strides = {{choice([1, 2])}}
conv_2_pad = 'same'
spatial_drop_rate_2 = {{uniform(0, 1)}}
pool_2_size = {{choice([2, 3])}}
pool_2_pad = 'same'
conv_3_filters = {{choice([40, 50, 60, 80])}}
conv_3_kernel = {{choice([3, 5, 7])}}
conv_3_strides = {{choice([1, 2])}}
conv_3_pad = 'same'
pool_3_size = {{choice([2, 3])}}
pool_3_pad = 'same'
dense_1_f_units = {{choice([40, 60, 80])}}
dense_2_f_units = {{choice([40, 60, 80])}}
dense_3_f_units = {{choice([40, 60, 80])}}
dense_1_cali_units = {{choice([10, 20, 30])}}
dense_2_cali_units = {{choice([10, 20, 30])}}
dense_1_co_units = {{choice([60, 80, 100, 200])}}
drop_1_comb_rate = {{uniform(0, 1)}}
dense_2_co_units = {{choice([20, 40, 60])}}
dense_3_co_units = {{choice([20, 40, 60])}}
dense_4_co_units = {{choice([20, 40, 60])}}
main_output_units = 6
main_output_act = 'linear'
conv_regularizer = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
dense_regularizer_1 = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
dense_regularizer_2 = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
activation_fn = {{choice(['elu', 'relu'])}}
kern_init = {{choice(['glorot_uniform', 'glorot_normal'])}}
model_opt = {{choice(['adam', 'nadam', 'adagrad', 'rmsprop'])}}
model_epochs = {{choice([30, 40, 50])}}
model_batchsize = 3
model_loss = cust_mean_squared_error_var
model_metric = cust_mean_squared_error_var
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
v_conv_1 = tf.keras.layers.Conv3D(filters=v_conv_1_filters, kernel_size=v_conv_1_kernel, strides=v_conv_1_strides, padding=v_conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_1)(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_1_size, padding=v_pool_1_pad, data_format=channel_order)(v_spat_1)
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=v_conv_2_filters, kernel_size=v_conv_2_kernel, strides=v_conv_2_strides, padding=v_conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_2)(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_2_size, padding=v_pool_2_pad, data_format=channel_order)(v_spat_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=v_conv_3_filters, kernel_size=v_conv_3_kernel, strides=v_conv_3_strides, padding=v_conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_3)(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_3_size, padding=v_pool_3_pad, data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
dense_1_v = tf.keras.layers.Dense(units=dense_1_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=dense_2_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=dense_3_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_v)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_pad, data_format=channel_order)(spat_1_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_2)(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_pad, data_format=channel_order)(spat_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_pad, data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = tf.keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_f_1)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_pad, data_format=channel_order)(spat_1_2)
conv_2_2 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_2)(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_pad, data_format=channel_order)(spat_2_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_pad, data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = tf.keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_f_2)
dense_1_cali = tf.keras.layers.Dense(units=dense_1_cali_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=dense_2_cali_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_cali)
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
dense_1_comb = tf.keras.layers.Dense(units=dense_1_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=drop_1_comb_rate)(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=dense_2_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=dense_3_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=dense_4_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_3_comb)
main_output = tf.keras.layers.Dense(units=main_output_units, activation=main_output_act, kernel_initializer=kern_init, name='main_output')(dense_4_comb)
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_split=0.2, epochs=model_epochs, batch_size=model_batchsize, shuffle=True, verbose=False)
return {'loss': np.amin(result.history['loss']), 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=fluoro_model, data=data_comp, algo=tpe.suggest, max_evals=3, trials=Trials())
json1 = json.dumps(best_run)
f = open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.json')), 'w')
f.write(json1)
f.close()
w = csv.writer(open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.csv')), 'w'))
for key, val in best_run.items():
w.writerow([key, val])
best_model.save(os.path.abspath(os.path.join(save_dir, 'vox_fluoro_img_stnd_hyperas' + '_' + 'best_model_hyperas.h5')))
| Python |
3D | john-drago/fluoro | code/hyperparameter/vox_fluoro/vox_fluoro_img_stnd_hyperas/vox_fluoro_img_stnd_hyperas_test.py | .py | 15,239 | 304 | import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import sys
import pickle
import json
import csv
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), 'vox_fluoro_img_stnd_hyperas'))
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None, num_of_samples=10):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=10, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
test_indxs, train_indxs = split_train_test(len(label_init), num_of_samples=None)
test_indxs = sorted(list(test_indxs))
train_indxs = sorted(list(train_indxs))
vox_mat_train = vox_init[:]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
return vox_mat_train, image_mat_train, cali_mat_train, label_mat_train
def fluoro_model(vox_mat_train, image_mat_train, cali_mat_train, label_mat_train):
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), 'vox_fluoro_img_stnd_hyperas'))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
std_dset = stats_file['std']
std_v = std_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
v_conv_1_filters = {{choice([20, 30, 40])}}
v_conv_1_kernel = {{choice([5, 7, 11, 13, 21])}}
v_conv_1_strides = {{choice([1, 2])}}
v_conv_1_pad = 'same'
v_spatial_drop_rate_1 = {{uniform(0, 1)}}
v_pool_1_size = {{choice([2, 3])}}
v_pool_1_pad = 'same'
v_conv_2_filters = {{choice([40, 50, 60, 80])}}
v_conv_2_kernel = {{choice([5, 7, 11])}}
v_conv_2_strides = {{choice([1, 2])}}
v_conv_2_pad = 'same'
v_spatial_drop_rate_2 = {{uniform(0, 1)}}
v_pool_2_size = {{choice([2, 3])}}
v_pool_2_pad = 'same'
v_conv_3_filters = {{choice([40, 50, 60, 80])}}
v_conv_3_kernel = {{choice([3, 5, 7])}}
v_conv_3_strides = {{choice([1, 2])}}
v_conv_3_pad = 'same'
v_spatial_drop_rate_3 = {{uniform(0, 1)}}
v_pool_3_size = {{choice([2, 3])}}
v_pool_3_pad = 'same'
dense_1_v_units = {{choice([750, 1000, 1500])}}
dense_2_v_units = {{choice([500, 750, 1000])}}
dense_3_v_units = {{choice([250, 500, 750])}}
conv_1_filters = {{choice([20, 30, 40, 50, 60])}}
conv_1_kernel = {{choice([3, 5, 7])}}
conv_1_strides = {{choice([1, 2])}}
conv_1_pad = 'same'
spatial_drop_rate_1 = {{uniform(0, 1)}}
pool_1_size = {{choice([2, 3])}}
pool_1_pad = 'same'
conv_2_filters = {{choice([40, 50, 60, 80])}}
conv_2_kernel = {{choice([3, 5, 7])}}
conv_2_strides = {{choice([1, 2])}}
conv_2_pad = 'same'
spatial_drop_rate_2 = {{uniform(0, 1)}}
pool_2_size = {{choice([2, 3])}}
pool_2_pad = 'same'
conv_3_filters = {{choice([40, 50, 60, 80])}}
conv_3_kernel = {{choice([3, 5, 7])}}
conv_3_strides = {{choice([1, 2])}}
conv_3_pad = 'same'
pool_3_size = {{choice([2, 3])}}
pool_3_pad = 'same'
dense_1_f_units = {{choice([40, 60, 80])}}
dense_2_f_units = {{choice([40, 60, 80])}}
dense_3_f_units = {{choice([40, 60, 80])}}
dense_1_cali_units = {{choice([10, 20, 30])}}
dense_2_cali_units = {{choice([10, 20, 30])}}
dense_1_co_units = {{choice([60, 80, 100, 200])}}
drop_1_comb_rate = {{uniform(0, 1)}}
dense_2_co_units = {{choice([20, 40, 60])}}
dense_3_co_units = {{choice([20, 40, 60])}}
dense_4_co_units = {{choice([20, 40, 60])}}
main_output_units = 6
main_output_act = 'linear'
conv_regularizer = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
dense_regularizer_1 = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
dense_regularizer_2 = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
activation_fn = {{choice(['elu', 'relu'])}}
kern_init = {{choice(['glorot_uniform', 'glorot_normal'])}}
model_opt = {{choice(['adam', 'nadam', 'adagrad', 'rmsprop'])}}
model_epochs = {{choice([30, 40, 50])}}
model_batchsize = 3
model_loss = cust_mean_squared_error_std
model_metric = cust_mean_squared_error_std
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
v_conv_1 = tf.keras.layers.Conv3D(filters=v_conv_1_filters, kernel_size=v_conv_1_kernel, strides=v_conv_1_strides, padding=v_conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_1)(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_1_size, padding=v_pool_1_pad, data_format=channel_order)(v_spat_1)
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=v_conv_2_filters, kernel_size=v_conv_2_kernel, strides=v_conv_2_strides, padding=v_conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_2)(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_2_size, padding=v_pool_2_pad, data_format=channel_order)(v_spat_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=v_conv_3_filters, kernel_size=v_conv_3_kernel, strides=v_conv_3_strides, padding=v_conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_3)(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_3_size, padding=v_pool_3_pad, data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
dense_1_v = tf.keras.layers.Dense(units=dense_1_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=dense_2_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=dense_3_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_v)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_pad, data_format=channel_order)(spat_1_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_2)(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_pad, data_format=channel_order)(spat_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_pad, data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = tf.keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_f_1)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_pad, data_format=channel_order)(spat_1_2)
conv_2_2 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_2)(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_pad, data_format=channel_order)(spat_2_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_pad, data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = tf.keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_f_2)
dense_1_cali = tf.keras.layers.Dense(units=dense_1_cali_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=dense_2_cali_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_cali)
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
dense_1_comb = tf.keras.layers.Dense(units=dense_1_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=drop_1_comb_rate)(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=dense_2_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=dense_3_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=dense_4_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_3_comb)
main_output = tf.keras.layers.Dense(units=main_output_units, activation=main_output_act, kernel_initializer=kern_init, name='main_output')(dense_4_comb)
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, 'vox_fluoro_img_stnd_hyperas' + '_' + expr_no + '.png')), show_shapes=True)
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_split=0.2, epochs=model_epochs, batch_size=model_batchsize, shuffle=True, verbose=True)
return {'loss': np.amin(result.history['cust_mean_squared_error_std']), 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=fluoro_model, data=data_comp, algo=tpe.suggest, max_evals=5, trials=Trials())
json1 = json.dumps(best_run)
f = open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.json')), 'w')
f.write(json1)
f.close()
w = csv.writer(open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.csv')), 'w'))
for key, val in best_run.items():
w.writerow([key, val])
best_model.save(os.path.abspath(os.path.join(save_dir, 'vox_fluoro_img_stnd_hyperas' + '_' + 'best_model_hyperas.h5')))
| Python |
3D | john-drago/fluoro | code/hyperparameter/vox_fluoro/vox_fluoro_res_talos_test/vox_fluoro_res_talos_test.py | .py | 60,564 | 1,137 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
import talos
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def data_comp(num_of_samples=None):
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
pickle.dump(var_dict, hist_file)
hist_file.close()
return vox_mat_train, vox_mat_val, image_mat_train, image_mat_val, cali_mat_train, cali_mat_val, label_mat_train, label_mat_val
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
def cust_mae_normalized(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
std_dset = stats_file['std']
std_v = std_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.abs((y_true - y_pred) / std_v))
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': [None],
'v_res_act_fn': ['elu'],
'v_conv_0_filters': [30],
'v_conv_0_kernel': [9],
'v_conv_0_strides_0': [2],
'v_conv_0_strides_1': [2],
'v_conv_0_strides_2': [2],
'v_conv_0_pad': ['same'],
'v_spatial_drop_rate_0': [0.3],
'v_conv_1_filters': [30],
'v_conv_1_kernel': [5],
'v_conv_1_strides_0': [2],
'v_conv_1_strides_1': [2],
'v_conv_1_strides_2': [3],
'v_conv_1_pad': ['same'],
# ---
# Pool After Initial Layers
'v_pool_0_size': [2],
'v_pool_0_pad': ['same'],
# ---
# Second Run of Entry Layers
'v_conv_2_filters': [30],
'v_conv_2_kernel': [5],
'v_conv_2_strides_0': [2],
'v_conv_2_strides_1': [2],
'v_conv_2_strides_2': [2],
'v_conv_2_pad': ['same'],
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': [30],
'v_conv_3_kernel': [3],
'v_conv_3_strides_0': [1],
'v_conv_3_strides_1': [1],
'v_conv_3_strides_2': [1],
'v_conv_3_pad': ['same'],
'v_spatial_drop_rate_2': [0.3],
'v_conv_4_filters': [30],
'v_conv_4_kernel': [3],
'v_conv_4_strides_0': [1],
'v_conv_4_strides_1': [1],
'v_conv_4_strides_2': [1],
'v_conv_4_pad': ['same'],
# 2
'v_conv_5_filters': [30],
'v_conv_5_kernel': [3],
'v_conv_5_strides_0': [1],
'v_conv_5_strides_1': [1],
'v_conv_5_strides_2': [1],
'v_conv_5_pad': ['same'],
'v_spatial_drop_rate_3': [0.3],
'v_conv_6_filters': [30],
'v_conv_6_kernel': [3],
'v_conv_6_strides_0': [1],
'v_conv_6_strides_1': [1],
'v_conv_6_strides_2': [1],
'v_conv_6_pad': ['same'],
# 3
'v_conv_7_filters': [30],
'v_conv_7_kernel': [3],
'v_conv_7_strides_0': [1],
'v_conv_7_strides_1': [1],
'v_conv_7_strides_2': [1],
'v_conv_7_pad': ['same'],
'v_spatial_drop_rate_4': [0.3],
'v_conv_8_filters': [30],
'v_conv_8_kernel': [3],
'v_conv_8_strides_0': [1],
'v_conv_8_strides_1': [1],
'v_conv_8_strides_2': [1],
'v_conv_8_pad': ['same'],
# 4
'v_conv_9_filters': [40],
'v_conv_9_kernel': [3],
'v_conv_9_strides_0': [2],
'v_conv_9_strides_1': [2],
'v_conv_9_strides_2': [2],
'v_conv_9_pad': ['same'],
'v_spatial_drop_rate_5': [0.3],
'v_conv_10_filters': [40],
'v_conv_10_kernel': [3],
'v_conv_10_strides_0': [1],
'v_conv_10_strides_1': [1],
'v_conv_10_strides_2': [1],
'v_conv_10_pad': ['same'],
'v_conv_11_filters': [40],
'v_conv_11_kernel': [3],
'v_conv_11_strides_0': [2],
'v_conv_11_strides_1': [2],
'v_conv_11_strides_2': [2],
'v_conv_11_pad': ['same'],
# 5
'v_conv_12_filters': [50],
'v_conv_12_kernel': [2],
'v_conv_12_strides_0': [2],
'v_conv_12_strides_1': [2],
'v_conv_12_strides_2': [2],
'v_conv_12_pad': ['same'],
'v_spatial_drop_rate_6': [0.3],
'v_conv_13_filters': [50],
'v_conv_13_kernel': [2],
'v_conv_13_strides_0': [1],
'v_conv_13_strides_1': [1],
'v_conv_13_strides_2': [1],
'v_conv_13_pad': ['same'],
'v_conv_14_filters': [50],
'v_conv_14_kernel': [1],
'v_conv_14_strides_0': [2],
'v_conv_14_strides_1': [2],
'v_conv_14_strides_2': [2],
'v_conv_14_pad': ['same'],
# 6
'v_conv_15_filters': [50],
'v_conv_15_kernel': [2],
'v_conv_15_strides_0': [2],
'v_conv_15_strides_1': [2],
'v_conv_15_strides_2': [2],
'v_conv_15_pad': ['same'],
'v_spatial_drop_rate_7': [0.3],
'v_conv_16_filters': [50],
'v_conv_16_kernel': [2],
'v_conv_16_strides_0': [1],
'v_conv_16_strides_1': [1],
'v_conv_16_strides_2': [1],
'v_conv_16_pad': ['same'],
'v_conv_17_filters': [50],
'v_conv_17_kernel': [1],
'v_conv_17_strides_0': [2],
'v_conv_17_strides_1': [2],
'v_conv_17_strides_2': [2],
'v_conv_17_pad': ['same'],
# ---
# Final Convs
'v_spatial_drop_rate_8': [0.5],
'v_conv_18_filters': [50],
'v_conv_18_kernel': [2],
'v_conv_18_strides_0': [1],
'v_conv_18_strides_1': [1],
'v_conv_18_strides_2': [1],
'v_conv_18_pad': ['valid'],
'dense_1_v_units': [75],
'dense_2_v_units': [50],
# ---
# 2D CONV
# ---
'intra_act_fn': [None],
'res_act_fn': ['elu'],
# Entry Fluoro Layers
'conv_0_filters': [30],
'conv_0_kernel': [5],
'conv_0_strides': [2],
'conv_0_pad': ['same'],
'spatial_drop_rate_0': [0.3],
'conv_1_filters': [30],
'conv_1_kernel': [5],
'conv_1_strides': [2],
'conv_1_pad': ['same'],
# ---
# Pool After Initial Layers
'pool_0_size': [2],
'pool_0_pad': ['same'],
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': [30],
'conv_2_kernel': [3],
'conv_2_strides': [1],
'conv_2_pad': ['same'],
'spatial_drop_rate_1': [0.3],
'conv_3_filters': [30],
'conv_3_kernel': [3],
'conv_3_strides': [1],
'conv_3_pad': ['same'],
# 2
'conv_4_filters': [30],
'conv_4_kernel': [3],
'conv_4_strides': [1],
'conv_4_pad': ['same'],
'spatial_drop_rate_2': [0.3],
'conv_5_filters': [30],
'conv_5_kernel': [3],
'conv_5_strides': [1],
'conv_5_pad': ['same'],
# 3
'conv_6_filters': [30],
'conv_6_kernel': [3],
'conv_6_strides': [1],
'conv_6_pad': ['same'],
'spatial_drop_rate_3': [0.3],
'conv_7_filters': [30],
'conv_7_kernel': [3],
'conv_7_strides': [1],
'conv_7_pad': ['same'],
# 4
'conv_8_filters': [30],
'conv_8_kernel': [3],
'conv_8_strides': [1],
'conv_8_pad': ['same'],
'spatial_drop_rate_4': [0.3],
'conv_9_filters': [30],
'conv_9_kernel': [3],
'conv_9_strides': [1],
'conv_9_pad': ['same'],
# 5
'conv_10_filters': [30],
'conv_10_kernel': [3],
'conv_10_strides': [1],
'conv_10_pad': ['same'],
'spatial_drop_rate_5': [0.3],
'conv_11_filters': [30],
'conv_11_kernel': [3],
'conv_11_strides': [1],
'conv_11_pad': ['same'],
# 6
'conv_12_filters': [30],
'conv_12_kernel': [3],
'conv_12_strides': [1],
'conv_12_pad': ['same'],
'spatial_drop_rate_6': [0.3],
'conv_13_filters': [30],
'conv_13_kernel': [3],
'conv_13_strides': [1],
'conv_13_pad': ['same'],
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': [None],
'c_res_act_fn': ['elu'],
# 0
'comb_0_filters': [60],
'comb_0_kernel': [3],
'comb_0_strides': [1],
'comb_0_pad': ['same'],
'comb_spatial_0': [0.3],
'comb_1_filters': [60],
'comb_1_kernel': [3],
'comb_1_strides': [1],
'comb_1_pad': ['same'],
# 1
'comb_2_filters': [60],
'comb_2_kernel': [3],
'comb_2_strides': [1],
'comb_2_pad': ['same'],
'comb_spatial_1': [0.3],
'comb_3_filters': [60],
'comb_3_kernel': [3],
'comb_3_strides': [1],
'comb_3_pad': ['same'],
# 2
'comb_4_filters': [60],
'comb_4_kernel': [3],
'comb_4_strides': [1],
'comb_4_pad': ['same'],
'comb_spatial_2': [0.3],
'comb_5_filters': [60],
'comb_5_kernel': [3],
'comb_5_strides': [1],
'comb_5_pad': ['same'],
# 3
'comb_6_filters': [60],
'comb_6_kernel': [3],
'comb_6_strides': [1],
'comb_6_pad': ['same'],
'comb_spatial_3': [0.3],
'comb_7_filters': [60],
'comb_7_kernel': [3],
'comb_7_strides': [1],
'comb_7_pad': ['same'],
# 4
'comb_8_filters': [60],
'comb_8_kernel': [3],
'comb_8_strides': [1],
'comb_8_pad': ['same'],
'comb_spatial_4': [0.3],
'comb_9_filters': [60],
'comb_9_kernel': [3],
'comb_9_strides': [1],
'comb_9_pad': ['same'],
# 5
'comb_10_filters': [60],
'comb_10_kernel': [2],
'comb_10_strides': [2],
'comb_10_pad': ['same'],
'comb_spatial_5': [0.3],
'comb_11_filters': [60],
'comb_11_kernel': [2],
'comb_11_strides': [1],
'comb_11_pad': ['same'],
'comb_12_filters': [60],
'comb_12_kernel': [1],
'comb_12_strides': [2],
'comb_12_pad': ['same'],
# 6
'comb_13_filters': [60],
'comb_13_kernel': [2],
'comb_13_strides': [2],
'comb_13_pad': ['same'],
'comb_spatial_6': [0.3],
'comb_14_filters': [60],
'comb_14_kernel': [2],
'comb_14_strides': [1],
'comb_14_pad': ['same'],
'comb_15_filters': [60],
'comb_15_kernel': [1],
'comb_15_strides': [2],
'comb_15_pad': ['same'],
# 7
'comb_16_filters': [60],
'comb_16_kernel': [2],
'comb_16_strides': [2],
'comb_16_pad': ['same'],
'comb_spatial_7': [0.3],
'comb_17_filters': [60],
'comb_17_kernel': [2],
'comb_17_strides': [1],
'comb_17_pad': ['same'],
'comb_18_filters': [60],
'comb_18_kernel': [1],
'comb_18_strides': [2],
'comb_18_pad': ['same'],
# ---
# Final Convs After Fluoro
'comb_19_filters': [60],
'comb_19_kernel': [2],
'comb_19_strides': [1],
'comb_19_pad': ['valid'],
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': [50],
'dense_comb_1_units': [50],
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': ['elu'],
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': [60],
'vox_flu_units_1': [50],
'vox_flu_units_2': [30],
'vox_flu_units_3': [15],
'vox_flu_units_4': [6],
# ---
# Cali Units
'cali_0_units': [20],
'cali_1_units': [20],
'cali_2_units': [20],
'cali_3_units': [6],
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': ['elu'],
'top_level_intra': [None],
# ---
# Top Level Dense
'top_dense_0': [6],
'top_dense_1': [6],
'top_dense_2': [6],
'top_dense_3': [6],
'top_dense_4': [6],
'top_dense_5': [6],
'top_dense_6': [6],
# Main Output
'main_output_units': [6],
'main_output_act': ['linear'],
# General Housekeeping
'v_conv_regularizer': [None],
'conv_regularizer': [None],
'dense_regularizer_1': [None],
'dense_regularizer_2': [None],
'activation_fn': ['elu'],
'kern_init': ['glorot_uniform'],
'model_opt': [tf.keras.optimizers.Adam],
'learning_rate': [0.1],
'model_epochs': [1],
'model_batchsize': [3],
'model_loss': [cust_mean_squared_error_var, 'mae', tf.keras.losses.Huber(delta=0.775)],
'model_metric': ['mse']
}
def fluoro_model(X_train, y_train, X_val, y_val, params):
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
# tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# model.summary()
# -----------------------------------------------------------------
result = model.fit(x={'input_vox': np.expand_dims(X_train[0], axis=-1), 'input_fluoro_1': np.expand_dims(X_train[1][:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(X_train[1][:, 1, :, :], axis=-1), 'input_cali': X_train[2]}, y=y_train, validation_data=([np.expand_dims(X_val[0], axis=-1), np.expand_dims(X_val[1][:, 0, :, :], axis=-1), np.expand_dims(X_val[1][:, 1, :, :], axis=-1), X_val[2]], y_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
return result, model
vox_mat_train, vox_mat_val, image_mat_train, image_mat_val, cali_mat_train, cali_mat_val, label_mat_train, label_mat_val = data_comp(3)
X_train = [vox_mat_train, image_mat_train, cali_mat_train]
y_train = label_mat_train
X_val = [vox_mat_val, image_mat_val, cali_mat_val]
y_val = label_mat_val
print('\n' * 3)
print('Here we go:')
print('\n' * 3)
t = talos.Scan(x=X_train, y=y_train, x_val=X_val, y_val=y_val, params=params, model=fluoro_model, fraction_limit=0.5, random_method='uniform_mersenne', clear_session=True, print_params=True, experiment_name=expr_name + '_' + expr_no)
| Python |
3D | john-drago/fluoro | code/hyperparameter/vox_fluoro/vox_fluoro_res_talos/vox_fluoro_res_talos.py | .py | 60,792 | 1,137 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
import talos
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def data_comp(num_of_samples=None):
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
pickle.dump(var_dict, hist_file)
hist_file.close()
return vox_mat_train, vox_mat_val, image_mat_train, image_mat_val, cali_mat_train, cali_mat_val, label_mat_train, label_mat_val
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
def cust_mae_normalized(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
std_dset = stats_file['std']
std_v = std_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.abs((y_true - y_pred) / std_v))
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': [None],
'v_res_act_fn': ['elu'],
'v_conv_0_filters': [30],
'v_conv_0_kernel': [9],
'v_conv_0_strides_0': [2],
'v_conv_0_strides_1': [2],
'v_conv_0_strides_2': [2],
'v_conv_0_pad': ['same'],
'v_spatial_drop_rate_0': [0.3],
'v_conv_1_filters': [30],
'v_conv_1_kernel': [5],
'v_conv_1_strides_0': [2],
'v_conv_1_strides_1': [2],
'v_conv_1_strides_2': [3],
'v_conv_1_pad': ['same'],
# ---
# Pool After Initial Layers
'v_pool_0_size': [2],
'v_pool_0_pad': ['same'],
# ---
# Second Run of Entry Layers
'v_conv_2_filters': [30],
'v_conv_2_kernel': [5],
'v_conv_2_strides_0': [2],
'v_conv_2_strides_1': [2],
'v_conv_2_strides_2': [2],
'v_conv_2_pad': ['same'],
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': [30],
'v_conv_3_kernel': [3],
'v_conv_3_strides_0': [1],
'v_conv_3_strides_1': [1],
'v_conv_3_strides_2': [1],
'v_conv_3_pad': ['same'],
'v_spatial_drop_rate_2': [0.3],
'v_conv_4_filters': [30],
'v_conv_4_kernel': [3],
'v_conv_4_strides_0': [1],
'v_conv_4_strides_1': [1],
'v_conv_4_strides_2': [1],
'v_conv_4_pad': ['same'],
# 2
'v_conv_5_filters': [30],
'v_conv_5_kernel': [3],
'v_conv_5_strides_0': [1],
'v_conv_5_strides_1': [1],
'v_conv_5_strides_2': [1],
'v_conv_5_pad': ['same'],
'v_spatial_drop_rate_3': [0.3],
'v_conv_6_filters': [30],
'v_conv_6_kernel': [3],
'v_conv_6_strides_0': [1],
'v_conv_6_strides_1': [1],
'v_conv_6_strides_2': [1],
'v_conv_6_pad': ['same'],
# 3
'v_conv_7_filters': [30],
'v_conv_7_kernel': [3],
'v_conv_7_strides_0': [1],
'v_conv_7_strides_1': [1],
'v_conv_7_strides_2': [1],
'v_conv_7_pad': ['same'],
'v_spatial_drop_rate_4': [0.3],
'v_conv_8_filters': [30],
'v_conv_8_kernel': [3],
'v_conv_8_strides_0': [1],
'v_conv_8_strides_1': [1],
'v_conv_8_strides_2': [1],
'v_conv_8_pad': ['same'],
# 4
'v_conv_9_filters': [40],
'v_conv_9_kernel': [3],
'v_conv_9_strides_0': [2],
'v_conv_9_strides_1': [2],
'v_conv_9_strides_2': [2],
'v_conv_9_pad': ['same'],
'v_spatial_drop_rate_5': [0.3],
'v_conv_10_filters': [40],
'v_conv_10_kernel': [3],
'v_conv_10_strides_0': [1],
'v_conv_10_strides_1': [1],
'v_conv_10_strides_2': [1],
'v_conv_10_pad': ['same'],
'v_conv_11_filters': [40],
'v_conv_11_kernel': [3],
'v_conv_11_strides_0': [2],
'v_conv_11_strides_1': [2],
'v_conv_11_strides_2': [2],
'v_conv_11_pad': ['same'],
# 5
'v_conv_12_filters': [50],
'v_conv_12_kernel': [2],
'v_conv_12_strides_0': [2],
'v_conv_12_strides_1': [2],
'v_conv_12_strides_2': [2],
'v_conv_12_pad': ['same'],
'v_spatial_drop_rate_6': [0.3],
'v_conv_13_filters': [50],
'v_conv_13_kernel': [2],
'v_conv_13_strides_0': [1],
'v_conv_13_strides_1': [1],
'v_conv_13_strides_2': [1],
'v_conv_13_pad': ['same'],
'v_conv_14_filters': [50],
'v_conv_14_kernel': [1],
'v_conv_14_strides_0': [2],
'v_conv_14_strides_1': [2],
'v_conv_14_strides_2': [2],
'v_conv_14_pad': ['same'],
# 6
'v_conv_15_filters': [50],
'v_conv_15_kernel': [2],
'v_conv_15_strides_0': [2],
'v_conv_15_strides_1': [2],
'v_conv_15_strides_2': [2],
'v_conv_15_pad': ['same'],
'v_spatial_drop_rate_7': [0.3],
'v_conv_16_filters': [50],
'v_conv_16_kernel': [2],
'v_conv_16_strides_0': [1],
'v_conv_16_strides_1': [1],
'v_conv_16_strides_2': [1],
'v_conv_16_pad': ['same'],
'v_conv_17_filters': [50],
'v_conv_17_kernel': [1],
'v_conv_17_strides_0': [2],
'v_conv_17_strides_1': [2],
'v_conv_17_strides_2': [2],
'v_conv_17_pad': ['same'],
# ---
# Final Convs
'v_spatial_drop_rate_8': [0.5],
'v_conv_18_filters': [50],
'v_conv_18_kernel': [2],
'v_conv_18_strides_0': [1],
'v_conv_18_strides_1': [1],
'v_conv_18_strides_2': [1],
'v_conv_18_pad': ['valid'],
'dense_1_v_units': [75],
'dense_2_v_units': [50],
# ---
# 2D CONV
# ---
'intra_act_fn': [None],
'res_act_fn': ['elu'],
# Entry Fluoro Layers
'conv_0_filters': [30],
'conv_0_kernel': [5],
'conv_0_strides': [2],
'conv_0_pad': ['same'],
'spatial_drop_rate_0': [0.3],
'conv_1_filters': [30],
'conv_1_kernel': [5],
'conv_1_strides': [2],
'conv_1_pad': ['same'],
# ---
# Pool After Initial Layers
'pool_0_size': [2],
'pool_0_pad': ['same'],
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': [30],
'conv_2_kernel': [3],
'conv_2_strides': [1],
'conv_2_pad': ['same'],
'spatial_drop_rate_1': [0.3],
'conv_3_filters': [30],
'conv_3_kernel': [3],
'conv_3_strides': [1],
'conv_3_pad': ['same'],
# 2
'conv_4_filters': [30],
'conv_4_kernel': [3],
'conv_4_strides': [1],
'conv_4_pad': ['same'],
'spatial_drop_rate_2': [0.3],
'conv_5_filters': [30],
'conv_5_kernel': [3],
'conv_5_strides': [1],
'conv_5_pad': ['same'],
# 3
'conv_6_filters': [30],
'conv_6_kernel': [3],
'conv_6_strides': [1],
'conv_6_pad': ['same'],
'spatial_drop_rate_3': [0.3],
'conv_7_filters': [30],
'conv_7_kernel': [3],
'conv_7_strides': [1],
'conv_7_pad': ['same'],
# 4
'conv_8_filters': [30],
'conv_8_kernel': [3],
'conv_8_strides': [1],
'conv_8_pad': ['same'],
'spatial_drop_rate_4': [0.3],
'conv_9_filters': [30],
'conv_9_kernel': [3],
'conv_9_strides': [1],
'conv_9_pad': ['same'],
# 5
'conv_10_filters': [30],
'conv_10_kernel': [3],
'conv_10_strides': [1],
'conv_10_pad': ['same'],
'spatial_drop_rate_5': [0.3],
'conv_11_filters': [30],
'conv_11_kernel': [3],
'conv_11_strides': [1],
'conv_11_pad': ['same'],
# 6
'conv_12_filters': [30],
'conv_12_kernel': [3],
'conv_12_strides': [1],
'conv_12_pad': ['same'],
'spatial_drop_rate_6': [0.3],
'conv_13_filters': [30],
'conv_13_kernel': [3],
'conv_13_strides': [1],
'conv_13_pad': ['same'],
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': [None],
'c_res_act_fn': ['elu'],
# 0
'comb_0_filters': [60],
'comb_0_kernel': [3],
'comb_0_strides': [1],
'comb_0_pad': ['same'],
'comb_spatial_0': [0.3],
'comb_1_filters': [60],
'comb_1_kernel': [3],
'comb_1_strides': [1],
'comb_1_pad': ['same'],
# 1
'comb_2_filters': [60],
'comb_2_kernel': [3],
'comb_2_strides': [1],
'comb_2_pad': ['same'],
'comb_spatial_1': [0.3],
'comb_3_filters': [60],
'comb_3_kernel': [3],
'comb_3_strides': [1],
'comb_3_pad': ['same'],
# 2
'comb_4_filters': [60],
'comb_4_kernel': [3],
'comb_4_strides': [1],
'comb_4_pad': ['same'],
'comb_spatial_2': [0.3],
'comb_5_filters': [60],
'comb_5_kernel': [3],
'comb_5_strides': [1],
'comb_5_pad': ['same'],
# 3
'comb_6_filters': [60],
'comb_6_kernel': [3],
'comb_6_strides': [1],
'comb_6_pad': ['same'],
'comb_spatial_3': [0.3],
'comb_7_filters': [60],
'comb_7_kernel': [3],
'comb_7_strides': [1],
'comb_7_pad': ['same'],
# 4
'comb_8_filters': [60],
'comb_8_kernel': [3],
'comb_8_strides': [1],
'comb_8_pad': ['same'],
'comb_spatial_4': [0.3],
'comb_9_filters': [60],
'comb_9_kernel': [3],
'comb_9_strides': [1],
'comb_9_pad': ['same'],
# 5
'comb_10_filters': [60],
'comb_10_kernel': [2],
'comb_10_strides': [2],
'comb_10_pad': ['same'],
'comb_spatial_5': [0.3],
'comb_11_filters': [60],
'comb_11_kernel': [2],
'comb_11_strides': [1],
'comb_11_pad': ['same'],
'comb_12_filters': [60],
'comb_12_kernel': [1],
'comb_12_strides': [2],
'comb_12_pad': ['same'],
# 6
'comb_13_filters': [60],
'comb_13_kernel': [2],
'comb_13_strides': [2],
'comb_13_pad': ['same'],
'comb_spatial_6': [0.3],
'comb_14_filters': [60],
'comb_14_kernel': [2],
'comb_14_strides': [1],
'comb_14_pad': ['same'],
'comb_15_filters': [60],
'comb_15_kernel': [1],
'comb_15_strides': [2],
'comb_15_pad': ['same'],
# 7
'comb_16_filters': [60],
'comb_16_kernel': [2],
'comb_16_strides': [2],
'comb_16_pad': ['same'],
'comb_spatial_7': [0.3],
'comb_17_filters': [60],
'comb_17_kernel': [2],
'comb_17_strides': [1],
'comb_17_pad': ['same'],
'comb_18_filters': [60],
'comb_18_kernel': [1],
'comb_18_strides': [2],
'comb_18_pad': ['same'],
# ---
# Final Convs After Fluoro
'comb_19_filters': [60],
'comb_19_kernel': [2],
'comb_19_strides': [1],
'comb_19_pad': ['valid'],
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': [50],
'dense_comb_1_units': [50],
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': ['elu'],
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': [60],
'vox_flu_units_1': [50],
'vox_flu_units_2': [30],
'vox_flu_units_3': [15],
'vox_flu_units_4': [6],
# ---
# Cali Units
'cali_0_units': [20],
'cali_1_units': [20],
'cali_2_units': [20],
'cali_3_units': [6],
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': ['elu'],
'top_level_intra': [None],
# ---
# Top Level Dense
'top_dense_0': [6],
'top_dense_1': [6],
'top_dense_2': [6],
'top_dense_3': [6],
'top_dense_4': [6],
'top_dense_5': [6],
'top_dense_6': [6],
# Main Output
'main_output_units': [6],
'main_output_act': ['linear'],
# General Housekeeping
'v_conv_regularizer': [None],
'conv_regularizer': [None],
'dense_regularizer_1': [None],
'dense_regularizer_2': [None],
'activation_fn': ['elu'],
'kern_init': ['he_uniform'],
'model_opt': [tf.keras.optimizers.Adam, tf.keras.optimizers.Nadam, tf.keras.optimizers.Adadelta, tf.keras.optimizers.Adagrad, tf.keras.optimizers.Adamax],
'learning_rate': [0.0001, 0.001, 0.01, 0.1],
'model_epochs': [40],
'model_batchsize': [9],
'model_loss': [cust_mean_squared_error_var, cust_mae_normalized, 'mse', 'mae', 'mean_absolute_percentage_error', 'mean_squared_logarithmic_error', tf.keras.losses.Huber(delta=0.775)],
'model_metric': ['mae']
}
def fluoro_model(X_train, y_train, X_val, y_val, params):
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
# tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# model.summary()
# -----------------------------------------------------------------
result = model.fit(x={'input_vox': np.expand_dims(X_train[0], axis=-1), 'input_fluoro_1': np.expand_dims(X_train[1][:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(X_train[1][:, 1, :, :], axis=-1), 'input_cali': X_train[2]}, y=y_train, validation_data=([np.expand_dims(X_val[0], axis=-1), np.expand_dims(X_val[1][:, 0, :, :], axis=-1), np.expand_dims(X_val[1][:, 1, :, :], axis=-1), X_val[2]], y_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
return result, model
vox_mat_train, vox_mat_val, image_mat_train, image_mat_val, cali_mat_train, cali_mat_val, label_mat_train, label_mat_val = data_comp()
X_train = [vox_mat_train, image_mat_train, cali_mat_train]
y_train = label_mat_train
X_val = [vox_mat_val, image_mat_val, cali_mat_val]
y_val = label_mat_val
print('\n' * 3)
print('Here we go:')
print('\n' * 3)
t = talos.Scan(x=X_train, y=y_train, x_val=X_val, y_val=y_val, params=params, model=fluoro_model, fraction_limit=0.35, random_method='uniform_mersenne', clear_session=True, print_params=True, experiment_name=expr_name + '_' + expr_no)
| Python |
3D | john-drago/fluoro | code/hyperparameter/vox_fluoro/vox_fluoro_res_hyperas/vox_fluoro_res_hyperas.py | .py | 42,378 | 704 | import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import json
import csv
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/vox_fluoro'), 'vox_fluoro_res_hyperas'))
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None, num_of_samples=5):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=5, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
test_indxs, train_indxs = split_train_test(len(label_init), num_of_samples=5)
test_indxs = sorted(list(test_indxs))
train_indxs = sorted(list(train_indxs))
vox_mat_train = vox_init[:]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
return vox_mat_train, image_mat_train, cali_mat_train, label_mat_train
def fluoro_model(vox_mat_train, image_mat_train, cali_mat_train, label_mat_train):
v_intra_act_fn = None
v_res_act_fn = 'elu'
v_conv_0_filters = 30
v_conv_0_kernel = 9
v_conv_0_strides_0 = 2
v_conv_0_strides_1 = 2
v_conv_0_strides_2 = 2
v_conv_0_pad = 'same'
v_spatial_drop_rate_0 = 0.3
v_conv_1_filters = 30
v_conv_1_kernel = 5
v_conv_1_strides_0 = 2
v_conv_1_strides_1 = 2
v_conv_1_strides_2 = 3
v_conv_1_pad = 'same'
v_pool_0_size = 2
v_pool_0_pad = 'same'
v_conv_2_filters = 30
v_conv_2_kernel = 5
v_conv_2_strides_0 = 2
v_conv_2_strides_1 = 2
v_conv_2_strides_2 = 2
v_conv_2_pad = 'same'
v_conv_5_filters = 30
v_conv_5_kernel = 3
v_conv_5_strides_0 = 1
v_conv_5_strides_1 = 1
v_conv_5_strides_2 = 1
v_conv_5_pad = 'same'
v_spatial_drop_rate_3 = 0.3
v_conv_6_filters = 30
v_conv_6_kernel = 3
v_conv_6_strides_0 = 1
v_conv_6_strides_1 = 1
v_conv_6_strides_2 = 1
v_conv_6_pad = 'same'
v_conv_9_filters = 40
v_conv_9_kernel = 3
v_conv_9_strides_0 = 2
v_conv_9_strides_1 = 2
v_conv_9_strides_2 = 2
v_conv_9_pad = 'same'
v_spatial_drop_rate_5 = 0.3
v_conv_10_filters = 40
v_conv_10_kernel = 3
v_conv_10_strides_0 = 1
v_conv_10_strides_1 = 1
v_conv_10_strides_2 = 1
v_conv_10_pad = 'same'
v_conv_11_filters = 40
v_conv_11_kernel = 3
v_conv_11_strides_0 = 2
v_conv_11_strides_1 = 2
v_conv_11_strides_2 = 2
v_conv_11_pad = 'same'
v_spatial_drop_rate_8 = 0.5
v_conv_18_filters = 50
v_conv_18_kernel = 2
v_conv_18_strides_0 = 1
v_conv_18_strides_1 = 1
v_conv_18_strides_2 = 1
v_conv_18_pad = 'valid'
dense_1_v_units = 75
dense_2_v_units = 50
intra_act_fn = None
res_act_fn = 'elu'
conv_0_filters = 30
conv_0_kernel = 5
conv_0_strides = 2
conv_0_pad = 'same'
spatial_drop_rate_0 = 0.3
conv_1_filters = 30
conv_1_kernel = 5
conv_1_strides = 2
conv_1_pad = 'same'
pool_0_size = 2
pool_0_pad = 'same'
conv_2_filters = 30
conv_2_kernel = 3
conv_2_strides = 1
conv_2_pad = 'same'
spatial_drop_rate_1 = 0.3
conv_3_filters = 30
conv_3_kernel = 3
conv_3_strides = 1
conv_3_pad = 'same'
c_intra_act_fn = None
c_res_act_fn = 'elu'
comb_0_filters = 60
comb_0_kernel = 3
comb_0_strides = 1
comb_0_pad = 'same'
comb_spatial_0 = 0.3
comb_1_filters = 60
comb_1_kernel = 3
comb_1_strides = 1
comb_1_pad = 'same'
comb_10_filters = 60
comb_10_kernel = 2
comb_10_strides = 2
comb_10_pad = 'same'
comb_spatial_5 = 0.3
comb_11_filters = 60
comb_11_kernel = 2
comb_11_strides = 1
comb_11_pad = 'same'
comb_12_filters = 60
comb_12_kernel = 1
comb_12_strides = 2
comb_12_pad = 'same'
comb_19_filters = 60
comb_19_kernel = 2
comb_19_strides = 1
comb_19_pad = 'valid'
dense_comb_0_units = 50
dense_comb_1_units = 50
flu_vox_act_fn = 'elu'
vox_flu_units_0 = 60
vox_flu_units_1 = 50
vox_flu_units_2 = 30
vox_flu_units_3 = 15
vox_flu_units_4 = 6
cali_0_units = 20
cali_1_units = 20
cali_2_units = 20
cali_3_units = 6
top_level_act_fn = 'elu'
top_level_intra = None
top_dense_0 = 6
top_dense_1 = 6
top_dense_2 = 6
top_dense_3 = 6
top_dense_4 = 6
top_dense_5 = 6
top_dense_6 = 6
main_output_units = 6
main_output_act = 'linear'
v_conv_regularizer = None
conv_regularizer = None
dense_regularizer_1 = None
dense_regularizer_2 = None
activation_fn = 'elu'
kern_init = 'glorot_uniform'
model_opt = 'adam'
learning_rate = 0.001
model_epochs = {{choice([1, 2])}}
model_batchsize = 3
model_loss = 'mse'
model_metric = 'mse'
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
v_conv_0 = tf.keras.layers.Conv3D(filters=v_conv_0_filters, kernel_size=v_conv_0_kernel, strides=(v_conv_0_strides_0, v_conv_0_strides_1, v_conv_0_strides_2), padding=v_conv_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_0)(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=v_conv_1_filters, kernel_size=v_conv_1_kernel, strides=(v_conv_1_strides_0, v_conv_1_strides_1, v_conv_1_strides_2), padding=v_conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_spat_0)
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_0_size, padding=v_pool_0_pad, data_format=channel_order)(v_conv_1)
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=v_conv_2_filters, kernel_size=v_conv_2_kernel, strides=(v_conv_2_strides_0, v_conv_2_strides_1, v_conv_2_strides_2), padding=v_conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
v_conv_5 = tf.keras.layers.Conv3D(filters=v_conv_5_filters, kernel_size=v_conv_5_kernel, strides=(v_conv_5_strides_0, v_conv_5_strides_1, v_conv_5_strides_2), padding=v_conv_5_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(bn_2)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_3)(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=v_conv_6_filters, kernel_size=v_conv_6_kernel, strides=(v_conv_6_strides_0, v_conv_6_strides_1, v_conv_6_strides_2), padding=v_conv_6_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, bn_2])
v_act_1 = tf.keras.layers.Activation(activation=v_res_act_fn)(v_add_1)
v_conv_5 = tf.keras.layers.Conv3D(filters=v_conv_5_filters, kernel_size=v_conv_5_kernel, strides=(v_conv_5_strides_0, v_conv_5_strides_1, v_conv_5_strides_2), padding=v_conv_5_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_act_1)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_3)(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=v_conv_6_filters, kernel_size=v_conv_6_kernel, strides=(v_conv_6_strides_0, v_conv_6_strides_1, v_conv_6_strides_2), padding=v_conv_6_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_1])
v_act_1 = tf.keras.layers.Activation(activation=v_res_act_fn)(v_add_1)
v_conv_5 = tf.keras.layers.Conv3D(filters=v_conv_5_filters, kernel_size=v_conv_5_kernel, strides=(v_conv_5_strides_0, v_conv_5_strides_1, v_conv_5_strides_2), padding=v_conv_5_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_act_1)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_3)(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=v_conv_6_filters, kernel_size=v_conv_6_kernel, strides=(v_conv_6_strides_0, v_conv_6_strides_1, v_conv_6_strides_2), padding=v_conv_6_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_1])
v_act_1 = tf.keras.layers.Activation(activation=v_res_act_fn)(v_add_1)
v_conv_9 = tf.keras.layers.Conv3D(filters=v_conv_9_filters, kernel_size=v_conv_9_kernel, strides=(v_conv_9_strides_0, v_conv_9_strides_1, v_conv_9_strides_2), padding=v_conv_9_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_act_1)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_5)(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=v_conv_10_filters, kernel_size=v_conv_10_kernel, strides=(v_conv_10_strides_0, v_conv_10_strides_1, v_conv_10_strides_2), padding=v_conv_10_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=v_conv_11_filters, kernel_size=v_conv_11_kernel, strides=(v_conv_11_strides_0, v_conv_11_strides_1, v_conv_11_strides_2), padding=v_conv_11_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_act_1)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=v_res_act_fn)(v_add_3)
v_conv_9 = tf.keras.layers.Conv3D(filters=v_conv_9_filters, kernel_size=v_conv_9_kernel, strides=(v_conv_9_strides_0, v_conv_9_strides_1, v_conv_9_strides_2), padding=v_conv_9_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_act_3)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_5)(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=v_conv_10_filters, kernel_size=v_conv_10_kernel, strides=(v_conv_10_strides_0, v_conv_10_strides_1, v_conv_10_strides_2), padding=v_conv_10_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=v_conv_11_filters, kernel_size=v_conv_11_kernel, strides=(v_conv_11_strides_0, v_conv_11_strides_1, v_conv_11_strides_2), padding=v_conv_11_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_act_3)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=v_res_act_fn)(v_add_3)
v_conv_9 = tf.keras.layers.Conv3D(filters=v_conv_9_filters, kernel_size=v_conv_9_kernel, strides=(v_conv_9_strides_0, v_conv_9_strides_1, v_conv_9_strides_2), padding=v_conv_9_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_act_3)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_5)(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=v_conv_10_filters, kernel_size=v_conv_10_kernel, strides=(v_conv_10_strides_0, v_conv_10_strides_1, v_conv_10_strides_2), padding=v_conv_10_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=v_conv_11_filters, kernel_size=v_conv_11_kernel, strides=(v_conv_11_strides_0, v_conv_11_strides_1, v_conv_11_strides_2), padding=v_conv_11_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_intra_act_fn)(v_act_3)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=v_res_act_fn)(v_add_3)
bn_18 = tf.keras.layers.BatchNormalization()(v_act_3)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_8)(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=v_conv_18_filters, kernel_size=v_conv_18_kernel, strides=(v_conv_18_strides_0, v_conv_18_strides_1, v_conv_18_strides_2), padding=v_conv_18_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=v_conv_regularizer)(v_spat_8)
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=dense_1_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=dense_2_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=conv_0_filters, kernel_size=conv_0_kernel, strides=conv_0_strides, padding=conv_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_0)(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0_1)
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=pool_0_size, padding=pool_0_pad)(conv_1_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0_1 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=conv_0_filters, kernel_size=conv_0_kernel, strides=conv_0_strides, padding=conv_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_0)(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0_1)
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=pool_0_size, padding=pool_0_pad)(conv_1_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0_1 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, act_0])
act_0_2 = tf.keras.layers.Activation(activation=res_act_fn)(add_0)
comb_fluoro_0 = tf.keras.layers.concatenate([act_0_1, act_0_2])
comb_0 = tf.keras.layers.Conv2D(filters=comb_0_filters, kernel_size=comb_0_kernel, strides=comb_0_strides, padding=comb_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_0)(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=comb_1_filters, kernel_size=comb_1_kernel, strides=comb_1_strides, padding=comb_1_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_0)
comb_0 = tf.keras.layers.Conv2D(filters=comb_0_filters, kernel_size=comb_0_kernel, strides=comb_0_strides, padding=comb_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_0)(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=comb_1_filters, kernel_size=comb_1_kernel, strides=comb_1_strides, padding=comb_1_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([act_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_0)
comb_0 = tf.keras.layers.Conv2D(filters=comb_0_filters, kernel_size=comb_0_kernel, strides=comb_0_strides, padding=comb_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_0)(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=comb_1_filters, kernel_size=comb_1_kernel, strides=comb_1_strides, padding=comb_1_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([act_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_0)
comb_0 = tf.keras.layers.Conv2D(filters=comb_0_filters, kernel_size=comb_0_kernel, strides=comb_0_strides, padding=comb_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_0)(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=comb_1_filters, kernel_size=comb_1_kernel, strides=comb_1_strides, padding=comb_1_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([act_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_0)
comb_0 = tf.keras.layers.Conv2D(filters=comb_0_filters, kernel_size=comb_0_kernel, strides=comb_0_strides, padding=comb_0_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_0)(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=comb_1_filters, kernel_size=comb_1_kernel, strides=comb_1_strides, padding=comb_1_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([act_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_0)
comb_10 = tf.keras.layers.Conv2D(filters=comb_10_filters, kernel_size=comb_10_kernel, strides=comb_10_strides, padding=comb_10_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_5)(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=comb_11_filters, kernel_size=comb_11_kernel, strides=comb_11_strides, padding=comb_11_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=comb_12_filters, kernel_size=comb_12_kernel, strides=comb_12_strides, padding=comb_12_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_0)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_5)
comb_10 = tf.keras.layers.Conv2D(filters=comb_10_filters, kernel_size=comb_10_kernel, strides=comb_10_strides, padding=comb_10_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_5)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_5)(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=comb_11_filters, kernel_size=comb_11_kernel, strides=comb_11_strides, padding=comb_11_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=comb_12_filters, kernel_size=comb_12_kernel, strides=comb_12_strides, padding=comb_12_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_5)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_5)
comb_10 = tf.keras.layers.Conv2D(filters=comb_10_filters, kernel_size=comb_10_kernel, strides=comb_10_strides, padding=comb_10_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_5)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=comb_spatial_5)(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=comb_11_filters, kernel_size=comb_11_kernel, strides=comb_11_strides, padding=comb_11_pad, data_format=channel_order, activation=c_intra_act_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=comb_12_filters, kernel_size=comb_12_kernel, strides=comb_12_strides, padding=comb_12_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_5)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=c_res_act_fn)(add_5)
comb_19 = tf.keras.layers.Conv2D(filters=comb_19_filters, kernel_size=comb_19_kernel, strides=comb_19_strides, padding=comb_19_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(act_5)
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=dense_comb_0_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=dense_comb_1_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=flu_vox_act_fn)(fluoro_vox_comb)
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=vox_flu_units_0, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=vox_flu_units_1, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=vox_flu_units_2, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=vox_flu_units_3, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=vox_flu_units_4, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=cali_0_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=cali_1_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=cali_2_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=cali_3_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=top_level_act_fn)(top_level_comb)
top_dense_0 = tf.keras.layers.Dense(units=top_dense_0, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=top_dense_1, activation=top_level_intra, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=top_level_act_fn)(add_0)
top_dense_2 = tf.keras.layers.Dense(units=top_dense_2, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=top_dense_3, activation=top_level_intra, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=top_level_act_fn)(add_1)
top_dense_4 = tf.keras.layers.Dense(units=top_dense_2, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=top_dense_3, activation=top_level_intra, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=top_level_act_fn)(add_2)
top_dense_6 = tf.keras.layers.Dense(units=top_dense_4, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=None)(act_2)
main_output = tf.keras.layers.Dense(units=main_output_units, activation=main_output_act, kernel_initializer=kern_init, name='main_output')(top_dense_6)
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_split=0.2, epochs=model_epochs, batch_size=model_batchsize, shuffle=True, verbose=False)
return {'loss': np.amin(result.history['loss']), 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=fluoro_model, data=data_comp, algo=tpe.suggest, max_evals=3, trials=Trials())
json1 = json.dumps(best_run)
f = open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.json')), 'w')
f.write(json1)
f.close()
w = csv.writer(open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.csv')), 'w'))
for key, val in best_run.items():
w.writerow([key, val])
best_model.save(os.path.abspath(os.path.join(save_dir, 'vox_fluoro_img_stnd_hyperas' + '_' + 'best_model_hyperas.h5')))
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_no_bn/vox_fluoro_no_bn.py | .py | 64,042 | 1,255 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-20
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are goign to continue normalizing the calibration inputs between -1 and 1, but we will only run the min max on the training data set.
# We likewise are going to normalize the label data set, but we will only run the function over the training data set. Moreover, we scale the label data from between -1 and 1.
# We have also removed all dropout from this model, and we will see if the model can overfit the data. We also are going to remove all batch normalization.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mse' for the loss, and Nadam for the optimizer.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0,
'dense_1_v_units': 75,
'drop_2_v_rate': 0,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0,
'vox_flu_units_3': 15,
'vox_flu_units_4': 10,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0,
'cali_1_units': 20,
'drop_2_cali': 0,
'cali_2_units': 20,
'cali_3_units': 10,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0,
'top_dense_0': 10,
'top_dense_1': 10,
'top_dense_2': 10,
'top_drop_1': 0,
'top_dense_3': 10,
'top_dense_4': 10,
'top_drop_2': 0,
'top_dense_5': 10,
'top_dense_6': 10,
'top_drop_3': 0,
'top_dense_7': 10,
'top_dense_8': 10,
'top_drop_4': 0,
'top_dense_9': 10,
'top_dense_10': 10,
'top_drop_5': 0,
'top_dense_11': 10,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
# bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(v_conv_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
# bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_pool_0)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_2)
# bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
# bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([v_conv_4, v_conv_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
# bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([v_conv_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
# bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([v_conv_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(v_conv_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
# bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
# bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([v_conv_10, v_conv_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
# bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
# bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
# bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([v_conv_13, v_conv_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
# bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
# bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
# bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([v_conv_16, v_conv_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
# bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(v_act_5)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
# bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
# bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
# bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(comb_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
# bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, comb_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(comb_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
# bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, comb_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(comb_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
# bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, comb_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(comb_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
# bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, comb_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(comb_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
# bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, comb_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(comb_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
# bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([comb_11, comb_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(comb_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
# bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([comb_14, comb_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(comb_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
# bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([comb_17, comb_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
# bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(comb_flatten_1)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
# bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([dense_1_comb, dense_2_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_act)
# bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
# bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
# bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
# bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_3)
# bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
# bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
# bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
# bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(cali_2)
# bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([cali_3, vox_flu_4])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
# bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
# bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_1, cali_3])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
# bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([top_dense_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
# bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([top_dense_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(top_dense_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
# bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([top_dense_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(top_dense_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
# bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([top_dense_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_dense_10)
# bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([top_dense_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_train_min = np.min(cali_mat_train, axis=0)
cali_train_max = np.max(cali_mat_train, axis=0)
cali_train_std = np.std(cali_mat_train, axis=0)
cali_train_avg = np.mean(cali_mat_train, axis=0)
var_dict['cali_train_avg'] = cali_train_avg
var_dict['cali_train_std'] = cali_train_std
var_dict['cali_train_min'] = cali_train_min
var_dict['cali_train_max'] = cali_train_max
cali_train_min_max = min_max_norm(cali_mat_train)
cali_val_min_max = min_max_norm(cali_mat_val, data_min=cali_train_min, data_max=cali_train_max)
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_train_avg = np.mean(label_mat_train, axis=0)
label_train_std = np.std(label_mat_train, axis=0)
label_train_min = np.min(label_mat_train, axis=0)
label_train_max = np.max(label_mat_train, axis=0)
label_train_min_max = min_max_norm(label_mat_train, feature_range=(-1, 1))
label_val_min_max = min_max_norm(label_mat_val, feature_range=(-1, 1), data_min=label_train_min, data_max=label_train_max)
var_dict['label_train_avg'] = label_train_avg
var_dict['label_train_std'] = label_train_std
var_dict['label_train_min'] = label_train_min
var_dict['label_train_max'] = label_train_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_train_min_max}, y=label_train_min_max, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_val_min_max], label_val_min_max), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_deeper_bn/vox_fluoro_deeper_bn.py | .py | 32,332 | 606 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating how a deeper conv net, which paradoxically has fewer parameters would fair
# No regularization
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.3,
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 3,
'v_conv_2_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'valid',
'v_conv_3_filters': 40,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 2,
'v_conv_3_strides_1': 2,
'v_conv_3_strides_2': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 50,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_5_filters': 50,
'v_conv_5_kernel': 2,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 50,
'v_conv_6_kernel': 2,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 50,
'v_conv_7_kernel': 2,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 40,
'v_conv_8_kernel': 1,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
'dense_1_v_units': 350,
'dense_2_v_units': 250,
'dense_3_v_units': 250,
'dense_4_v_units': 200,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_4_filters': 60,
'conv_4_kernel': 3,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 3,
'conv_5_strides': 2,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'dense_1_f_units': 120,
'dense_2_f_units': 120,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 20,
'dense_3_cali_units': 20,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.RMSprop,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_1 = tf.keras.layers.BatchNormalization()(v_conv_1)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
bn_4 = tf.keras.layers.BatchNormalization()(v_pool_2)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_4)
v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
bn_8 = tf.keras.layers.BatchNormalization()(v_flatten_1)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_9)
bn_10 = tf.keras.layers.BatchNormalization()(dense_2_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(dense_3_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_11)
# -----------------------------------------------------------------
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_1)
bn_4 = tf.keras.layers.BatchNormalization()(pool_2_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(conv_6_1)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_2)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
bn_4 = tf.keras.layers.BatchNormalization()(pool_2_2)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(conv_6_2)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_4_v, dense_3_f_1, dense_3_f_2, dense_3_cali])
# -----------------------------------------------------------------# Dense Layers at Top of Model
bn_1 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_1_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(bn_2)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(dense_2_comb)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(dense_3_comb)
dense_drop_2 = tf.keras.layers.Dropout(rate=params['drop_2_comb_rate'])(bn_4)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(dense_drop_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
# -----------------------------------------------------------------
# v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
# v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
# v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
# v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
# # v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
# v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_spat_3)
# v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_4)
# v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_5)
# -----------------------------------------------------------------
# # Second run of 2D Conv Layers for Image 1
# conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
# spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
# pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# # Third run of 2D Conv Layers for Image 1
# conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
# pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
# # Fourth run of 2D Conv Layers for Image 1
# conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_3_1)
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_norm_nadam_lr_0-01_mse/vox_fluoro_norm_nadam_lr_0-01_mse.py | .py | 62,610 | 1,229 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-19
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are going to normalize the calibration inputs from -1 to 1.
# We likewise are going to normalize the label dataset based on the training and validation datasets. We are going to normalize for each instance over all of the instances.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to perform similar res_net style architecture. Going to also use Nadam as the optimizer and increase the LR to 0.01.
# This is similar to previous experiments, but we are going to use MSE instead for the loss function.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 100,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 75,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 50,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.01,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.concatenate([bn_21_f, bn_21_v])
# fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
# fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_comb)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_comb)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_sup = label_init[:]
label_mat_sup = label_mat_sup[list(list(train_indxs) + list(val_indxs))]
label_mat_sup_norm = min_max_norm(label_mat_sup)
label_mat_train = label_mat_sup_norm[:len(train_indxs)]
label_mat_val = label_mat_sup_norm[-len(val_indxs):]
label_t_mean = np.mean(label_mat_sup, axis=0)
label_t_std = np.std(label_mat_sup, axis=0)
label_t_min = np.min(label_mat_sup, axis=0)
label_t_max = np.max(label_mat_sup, axis=0)
var_dict['label_train_val_mean'] = label_t_mean
var_dict['label_train_val_std'] = label_t_std
var_dict['label_train_val_min'] = label_t_min
var_dict['label_train_val_max'] = label_t_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_test/vox_fluoro_res_v1.py | .py | 20,486 | 433 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# 3D CONV
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 1,
'v_conv_2_strides_1': 1,
'v_conv_2_strides_2': 1,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_1': 0.3,
'v_conv_3_filters': 30,
'v_conv_3_kernel': 5,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_4_filters': 30,
'v_conv_4_kernel': 5,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_conv_5_filters': 30,
'v_conv_5_kernel': 5,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 1,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'dense_1_v_units': 300,
'dense_2_v_units': 250,
'dense_3_v_units': 250,
'dense_4_v_units': 200,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 2,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_4_filters': 60,
'conv_4_kernel': 3,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'dense_1_f_units': 120,
'dense_2_f_units': 120,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 20,
'dense_3_cali_units': 20,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_add_0 = tf.keras.layers.Add()([v_conv_3, v_conv_2])
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_add_0)
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_add_0 = tf.keras.layers.Add()([v_conv_3, bn_1])
bn_3 = tf.keras.layers.BatchNormalization()(v_add_0)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_4)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_3)
v_add_0 = tf.keras.layers.Add()([v_conv_5, v_conv_6])
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_add_0)
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_2)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_add_0 = tf.keras.layers.Add()([v_conv_3, bn_1])
v_flatten_0 = tf.keras.layers.Flatten()(v_add_0)
# v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
# v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
# v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
# v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_pool_1)
# v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
# v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
# v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_4)
# v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
# v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
# v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_6)
# v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
# v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_7)
# v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_0)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_3_v)
# -----------------------------------------------------------------
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# -----------------------------------------------------------------
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
# pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_1_v)
# -----------------------------------------------------------------
# Model Housekeeping
# model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model = tf.keras.Model(inputs=[input_vox], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
# vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
# vox_init = vox_file['vox_dset']
# image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
# image_init = image_file['image_dset']
# label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
# label_init = label_file['labels_dset']
# cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
# cali_init = cali_file['cali_len3_rot']
# def split_train_test(shape, num_of_samples=None, ratio=0.2):
# if num_of_samples is None:
# shuffled_indices = np.random.choice(shape, size=shape, replace=False)
# else:
# shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
# test_set_size = int(len(shuffled_indices) * 0.2)
# test_indx = shuffled_indices[:test_set_size]
# train_indx = shuffled_indices[test_set_size:]
# return test_indx, train_indx
# num_of_samples = None
# test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
# val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
# val_indxs = train_sup_indxs[val_indxs]
# train_indxs = train_sup_indxs[train_indxs]
# test_indxs = sorted(list(test_indxs))
# val_indxs = sorted(list(val_indxs))
# train_indxs = sorted(list(train_indxs))
# hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
# var_dict = {}
# var_dict['test_indxs'] = test_indxs
# var_dict['val_indxs'] = val_indxs
# var_dict['train_indxs'] = train_indxs
# vox_mat_train = vox_init[:]
# vox_mat_val = vox_mat_train[val_indxs]
# vox_mat_train = vox_mat_train[train_indxs]
# vox_file.close()
# image_mat_train = image_init[:]
# image_mat_val = image_mat_train[val_indxs]
# image_mat_train = image_mat_train[train_indxs]
# image_file.close()
# cali_mat_train = cali_init[:]
# cali_mat_val = cali_mat_train[val_indxs]
# cali_mat_train = cali_mat_train[train_indxs]
# cali_file.close()
# label_mat_train = label_init[:]
# label_mat_val = label_mat_train[val_indxs]
# label_mat_train = label_mat_train[train_indxs]
# label_file.close()
# # -----------------------------------------------------------------
# print('\n\ncompletely loaded...\n\n')
# result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
# model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
# var_dict['result'] = result.history
# pickle.dump(var_dict, hist_file)
# hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_test/vox_fluoro_res_v2.py | .py | 43,363 | 828 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 40,
'conv_10_kernel': 3,
'conv_10_strides': 2,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 40,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
'conv_12_filters': 40,
'conv_12_kernel': 1,
'conv_12_strides': 2,
'conv_12_pad': 'same',
# 6
'conv_13_filters': 40,
'conv_13_kernel': 3,
'conv_13_strides': 2,
'conv_13_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_14_filters': 40,
'conv_14_kernel': 3,
'conv_14_strides': 1,
'conv_14_pad': 'same',
'conv_15_filters': 40,
'conv_15_kernel': 1,
'conv_15_strides': 2,
'conv_15_pad': 'same',
# 7
'conv_16_filters': 40,
'conv_16_kernel': 3,
'conv_16_strides': 2,
'conv_16_pad': 'same',
'spatial_drop_rate_7': 0.3,
'conv_17_filters': 40,
'conv_17_kernel': 3,
'conv_17_strides': 1,
'conv_17_pad': 'same',
'conv_18_filters': 40,
'conv_18_kernel': 1,
'conv_18_strides': 2,
'conv_18_pad': 'same',
# ---
# Final Conv Layers
'spatial_drop_rate_8': 0.3,
'conv_19_filters': 50,
'conv_19_kernel': 2,
'conv_19_strides': 1,
'conv_19_pad': 'valid',
# ---
# Dense Layers
'dense_0_f_units': 50,
'dense_1_f_units': 50,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 20,
'dense_3_cali_units': 20,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
# 1
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
v_add_0 = tf.keras.layers.Add()([v_conv_4, bn_2])
# 2
bn_4 = tf.keras.layers.BatchNormalization()(v_add_0)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
v_add_1 = tf.keras.layers.Add()([v_conv_6, bn_4])
# 3
bn_6 = tf.keras.layers.BatchNormalization()(v_add_1)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_4)
v_add_2 = tf.keras.layers.Add()([v_conv_8, bn_6])
# 4
bn_8 = tf.keras.layers.BatchNormalization()(v_add_2)
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_5)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_8)
v_add_3 = tf.keras.layers.Add()([v_conv_10, v_conv_11])
# 5
bn_10 = tf.keras.layers.BatchNormalization()(v_add_3)
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_11)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_6)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_10)
v_add_4 = tf.keras.layers.Add()([v_conv_13, v_conv_14])
# 6
bn_12 = tf.keras.layers.BatchNormalization()(v_add_4)
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_12)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_7'])(bn_13)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_7)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_12)
v_add_5 = tf.keras.layers.Add()([v_conv_16, v_conv_17])
# ---
# Final Conv Layers
bn_14 = tf.keras.layers.BatchNormalization()(v_add_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_14)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_15 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_15)
bn_16 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_16)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# 1
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
add_0 = tf.keras.layers.Add()([conv_3_1, bn_2])
# 2
bn_4 = tf.keras.layers.BatchNormalization()(add_0)
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
add_1 = tf.keras.layers.Add()([conv_5_1, bn_4])
# 3
bn_6 = tf.keras.layers.BatchNormalization()(add_1)
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
add_2 = tf.keras.layers.Add()([conv_7_1, bn_6])
# 4
bn_8 = tf.keras.layers.BatchNormalization()(add_2)
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
add_3 = tf.keras.layers.Add()([conv_9_1, bn_8])
# 5
bn_10 = tf.keras.layers.BatchNormalization()(add_3)
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_10)
add_4 = tf.keras.layers.Add()([conv_11_1, conv_12_1])
# 6
bn_12 = tf.keras.layers.BatchNormalization()(add_4)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_12)
bn_13 = tf.keras.layers.BatchNormalization()(conv_13_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_14_1 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
conv_15_1 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_12)
add_5 = tf.keras.layers.Add()([conv_14_1, conv_15_1])
# 7
bn_14 = tf.keras.layers.BatchNormalization()(add_5)
conv_16_1 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_16_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_14)
bn_15 = tf.keras.layers.BatchNormalization()(conv_16_1)
spat_7_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_15)
conv_17_1 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_1)
conv_18_1 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_14)
add_6 = tf.keras.layers.Add()([conv_17_1, conv_18_1])
# ---
# Final Conv Layers
bn_16 = tf.keras.layers.BatchNormalization()(add_6)
spat_8_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_16)
conv_19_1 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_1)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_1)
bn_17 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_1 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_17)
bn_18 = tf.keras.layers.BatchNormalization()(dense_0_f_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_18)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_2 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_2)
spat_0_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_2)
# ---
# Pool After Initial Layers
pool_0_2 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_2)
# ---
# Run of Residual Layers
# 1
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_2)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
add_0 = tf.keras.layers.Add()([conv_3_2, bn_2])
# 2
bn_4 = tf.keras.layers.BatchNormalization()(add_0)
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_5)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
add_1 = tf.keras.layers.Add()([conv_5_2, bn_4])
# 3
bn_6 = tf.keras.layers.BatchNormalization()(add_1)
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_2 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
add_2 = tf.keras.layers.Add()([conv_7_2, bn_6])
# 4
bn_8 = tf.keras.layers.BatchNormalization()(add_2)
conv_8_2 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_2)
spat_4_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_2 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_2)
add_3 = tf.keras.layers.Add()([conv_9_2, bn_8])
# 5
bn_10 = tf.keras.layers.BatchNormalization()(add_3)
conv_10_2 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_2)
spat_5_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_2 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_2)
conv_12_2 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_10)
add_4 = tf.keras.layers.Add()([conv_11_2, conv_12_2])
# 6
bn_12 = tf.keras.layers.BatchNormalization()(add_4)
conv_13_2 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_12)
bn_13 = tf.keras.layers.BatchNormalization()(conv_13_2)
spat_6_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_14_2 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_2)
conv_15_2 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_12)
add_5 = tf.keras.layers.Add()([conv_14_2, conv_15_2])
# 7
bn_14 = tf.keras.layers.BatchNormalization()(add_5)
conv_16_2 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_16_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_14)
bn_15 = tf.keras.layers.BatchNormalization()(conv_16_2)
spat_7_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_15)
conv_17_2 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_2)
conv_18_2 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_14)
add_6 = tf.keras.layers.Add()([conv_17_2, conv_18_2])
# ---
# Final Conv Layers
bn_16 = tf.keras.layers.BatchNormalization()(add_6)
spat_8_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_16)
conv_19_2 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_2)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_2)
bn_17 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_2 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_17)
bn_18 = tf.keras.layers.BatchNormalization()(dense_0_f_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_18)
# -----------------------------------------------------------------
# ---
# Combine the fluoro inputs
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_0_comb)
# -----------------------------------------------------------------
# Model Housekeeping
# model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
# vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
# vox_init = vox_file['vox_dset']
# image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
# image_init = image_file['image_dset']
# label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
# label_init = label_file['labels_dset']
# cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
# cali_init = cali_file['cali_len3_rot']
# def split_train_test(shape, num_of_samples=None, ratio=0.2):
# if num_of_samples is None:
# shuffled_indices = np.random.choice(shape, size=shape, replace=False)
# else:
# shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
# test_set_size = int(len(shuffled_indices) * 0.2)
# test_indx = shuffled_indices[:test_set_size]
# train_indx = shuffled_indices[test_set_size:]
# return test_indx, train_indx
# num_of_samples = None
# test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
# val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
# val_indxs = train_sup_indxs[val_indxs]
# train_indxs = train_sup_indxs[train_indxs]
# test_indxs = sorted(list(test_indxs))
# val_indxs = sorted(list(val_indxs))
# train_indxs = sorted(list(train_indxs))
# hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
# var_dict = {}
# var_dict['test_indxs'] = test_indxs
# var_dict['val_indxs'] = val_indxs
# var_dict['train_indxs'] = train_indxs
# vox_mat_train = vox_init[:]
# vox_mat_val = vox_mat_train[val_indxs]
# vox_mat_train = vox_mat_train[train_indxs]
# vox_file.close()
# image_mat_train = image_init[:]
# image_mat_val = image_mat_train[val_indxs]
# image_mat_train = image_mat_train[train_indxs]
# image_file.close()
# cali_mat_train = cali_init[:]
# cali_mat_val = cali_mat_train[val_indxs]
# cali_mat_train = cali_mat_train[train_indxs]
# cali_file.close()
# label_mat_train = label_init[:]
# label_mat_val = label_mat_train[val_indxs]
# label_mat_train = label_mat_train[train_indxs]
# label_file.close()
# # -----------------------------------------------------------------
# print('\n\ncompletely loaded...\n\n')
# result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
# model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
# var_dict['result'] = result.history
# pickle.dump(var_dict, hist_file)
# hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_test/vox_fluoro_res_test.py | .py | 49,047 | 935 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 40,
'conv_10_kernel': 3,
'conv_10_strides': 2,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 40,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
'conv_12_filters': 40,
'conv_12_kernel': 1,
'conv_12_strides': 2,
'conv_12_pad': 'same',
# 6
'conv_13_filters': 40,
'conv_13_kernel': 3,
'conv_13_strides': 2,
'conv_13_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_14_filters': 40,
'conv_14_kernel': 3,
'conv_14_strides': 1,
'conv_14_pad': 'same',
'conv_15_filters': 40,
'conv_15_kernel': 1,
'conv_15_strides': 2,
'conv_15_pad': 'same',
# 7
'conv_16_filters': 40,
'conv_16_kernel': 3,
'conv_16_strides': 2,
'conv_16_pad': 'same',
'spatial_drop_rate_7': 0.3,
'conv_17_filters': 40,
'conv_17_kernel': 3,
'conv_17_strides': 1,
'conv_17_pad': 'same',
'conv_18_filters': 40,
'conv_18_kernel': 1,
'conv_18_strides': 2,
'conv_18_pad': 'same',
# ---
# Final Conv Layers
'spatial_drop_rate_8': 0.3,
'conv_19_filters': 50,
'conv_19_kernel': 2,
'conv_19_strides': 1,
'conv_19_pad': 'valid',
# ---
# Dense Layers
'dense_0_f_units': 50,
'dense_1_f_units': 50,
'dense_comb_1_units': 50,
'dense_comb_2_units': 50,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 6,
'dense_comb_v_1_units': 20,
'dense_comb_v_2_units': 6,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
'res_act_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_15 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_15)
bn_16 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_16)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
add_4 = tf.keras.layers.Add()([bn_12, bn_13])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 6
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_14)
conv_14_1 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_15 = tf.keras.layers.BatchNormalization()(conv_14_1)
conv_15_1 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_16 = tf.keras.layers.BatchNormalization()(conv_15_1)
add_5 = tf.keras.layers.Add()([bn_15, bn_16])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 7
conv_16_1 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_17 = tf.keras.layers.BatchNormalization()(conv_16_1)
spat_7_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_17)
conv_17_1 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_1)
bn_18 = tf.keras.layers.BatchNormalization()(conv_17_1)
conv_18_1 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_19 = tf.keras.layers.BatchNormalization()(conv_18_1)
add_6 = tf.keras.layers.Add()([bn_18, bn_19])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# ---
# Final Conv Layers
bn_20 = tf.keras.layers.BatchNormalization()(act_6)
spat_8_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_20)
conv_19_1 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_1)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_1)
bn_21 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_1 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_21)
bn_22 = tf.keras.layers.BatchNormalization()(dense_0_f_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_22)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_2 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_2)
spat_0_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_2)
# ---
# Pool After Initial Layers
pool_0_2 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_2)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_2)
# 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_2)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 2
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_5)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_2)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 3
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_2 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_2)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 4
conv_8_2 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_2)
spat_4_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_2 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_2)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_2)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 5
conv_10_2 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_2)
spat_5_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_2 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_2)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_2)
conv_12_2 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_2)
add_4 = tf.keras.layers.Add()([bn_12, bn_13])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 6
conv_13_2 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_2)
spat_6_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_14)
conv_14_2 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_2)
bn_15 = tf.keras.layers.BatchNormalization()(conv_14_2)
conv_15_2 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_16 = tf.keras.layers.BatchNormalization()(conv_15_2)
add_5 = tf.keras.layers.Add()([bn_15, bn_16])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 7
conv_16_2 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_17 = tf.keras.layers.BatchNormalization()(conv_16_2)
spat_7_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_17)
conv_17_2 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_2)
bn_18 = tf.keras.layers.BatchNormalization()(conv_17_2)
conv_18_2 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_19 = tf.keras.layers.BatchNormalization()(conv_18_2)
add_6 = tf.keras.layers.Add()([bn_18, bn_19])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# ---
# Final Conv Layers
bn_20 = tf.keras.layers.BatchNormalization()(act_6)
spat_8_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_20)
conv_19_2 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_2)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_2)
bn_21 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_2 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_21)
bn_22 = tf.keras.layers.BatchNormalization()(dense_0_f_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_22)
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
# -----------------------------------------------------------------
# ---
# Combine the fluoro inputs together
dense_comb_f_0 = tf.keras.layers.Add()([dense_1_f_1, dense_1_f_2])
dense_comb_act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(dense_comb_f_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_comb_act_0)
dense_comb_f_1 = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_comb_f_1)
dense_comb_f_2 = tf.keras.layers.Dense(units=params['dense_comb_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
# -----------------------------------------------------------------
# ---
# Combine the fluoro with the vox
dense_comb_v_0 = tf.keras.layers.Add()([dense_comb_f_2, dense_2_v])
dense_comb_v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(dense_comb_v_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_comb_v_act_0)
dense_comb_v_1 = tf.keras.layers.Dense(units=params['dense_comb_v_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_comb_v_1)
dense_comb_v_2 = tf.keras.layers.Dense(units=params['dense_comb_v_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
# -----------------------------------------------------------------
top_comb = tf.keras.layers.Add()([dense_comb_v_2, bn_2])
top_comb_act = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(top_comb)
top_dense_1 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_comb_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_1)
top_dense_2 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_2, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(add_0)
top_dense_1 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_1)
top_dense_2 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_2, act_0])
act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(add_0)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(act_0)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_norm_nadam_elu_act_final/vox_fluoro_norm_nadam_elu_act_final.py | .py | 62,527 | 1,229 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-18
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are going to normalize the calibration inputs from -1 to 1.
# We likewise are going to normalize the label dataset based on the training and validation datasets. We are going to normalize for each instance over all of the instances.
# We are going to also do per image normalization between -1 and 1.
# Similar to previous file, but we are going to try the final later with an 'elu' acfitvation function, instead of the linear activation function.
# Also going to use MAE for the loss.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 100,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 75,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 50,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.01,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.concatenate([bn_21_f, bn_21_v])
# fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
# fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_comb)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_comb)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation='elu', kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_sup = label_init[:]
label_mat_sup = label_mat_sup[list(list(train_indxs) + list(val_indxs))]
label_mat_sup_norm = min_max_norm(label_mat_sup)
label_mat_train = label_mat_sup_norm[:len(train_indxs)]
label_mat_val = label_mat_sup_norm[-len(val_indxs):]
label_t_mean = np.mean(label_mat_sup, axis=0)
label_t_std = np.std(label_mat_sup, axis=0)
label_t_min = np.min(label_mat_sup, axis=0)
label_t_max = np.max(label_mat_sup, axis=0)
var_dict['label_train_val_mean'] = label_t_mean
var_dict['label_train_val_std'] = label_t_std
var_dict['label_train_val_min'] = label_t_min
var_dict['label_train_val_max'] = label_t_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_min_max_1/vox_fluoro_min_max_1.py | .py | 63,401 | 1,256 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-20
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are goign to continue normalizing the calibration inputs between -1 and 1, but we will only run the min max on the training data set.
# We likewise are going to normalize the label data set, but we will only run the function over the training data set. Moreover, we scale the label data from between -2 and 2.
# We have also removed all dropout from this model, and we will see if the model can overfit the data.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mse' for the loss, and Nadam for the optimizer.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0,
'dense_1_v_units': 75,
'drop_2_v_rate': 0,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0,
'cali_1_units': 20,
'drop_2_cali': 0,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_train_min = np.min(cali_mat_train, axis=0)
cali_train_max = np.max(cali_mat_train, axis=0)
cali_train_std = np.std(cali_mat_train, axis=0)
cali_train_avg = np.mean(cali_mat_train, axis=0)
var_dict['cali_train_avg'] = cali_train_avg
var_dict['cali_train_std'] = cali_train_std
var_dict['cali_train_min'] = cali_train_min
var_dict['cali_train_max'] = cali_train_max
cali_train_min_max = min_max_norm(cali_mat_train)
cali_val_min_max = min_max_norm(cali_mat_val, data_min=cali_train_min, data_max=cali_train_max)
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_train_avg = np.mean(label_mat_train, axis=0)
label_train_std = np.std(label_mat_train, axis=0)
label_train_min = np.min(label_mat_train, axis=0)
label_train_max = np.max(label_mat_train, axis=0)
label_train_min_max = min_max_norm(label_mat_train, feature_range=(-2, 2))
label_val_min_max = min_max_norm(label_mat_val, feature_range=(-2, 2), data_min=label_train_min, data_max=label_train_max)
var_dict['label_train_avg'] = label_train_avg
var_dict['label_train_std'] = label_train_std
var_dict['label_train_min'] = label_train_min
var_dict['label_train_max'] = label_train_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_train_min_max}, y=label_train_min_max, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_val_min_max], label_val_min_max), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_update/vox_fluoro_res_update.py | .py | 60,680 | 1,171 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
'conv_regularizer': tf.keras.regularizers.l1(1e-7),
'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 40,
'model_batchsize': 9,
'model_loss': cust_mean_squared_error_var,
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_norm_nadam_lr_0-01_mae/vox_fluoro_norm_nadam_lr_0-01_mae.py | .py | 62,546 | 1,229 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-18
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are going to normalize the calibration inputs from -1 to 1.
# We likewise are going to normalize the label dataset based on the training and validation datasets. We are going to normalize for each instance over all of the instances.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to perform similar res_net style architecture. Going to also use Nadam as the optimizer and increase the LR to 0.01.
# Also going to use MAE for the loss.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 100,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 75,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 50,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.01,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.concatenate([bn_21_f, bn_21_v])
# fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
# fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_comb)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_comb)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_sup = label_init[:]
label_mat_sup = label_mat_sup[list(list(train_indxs) + list(val_indxs))]
label_mat_sup_norm = min_max_norm(label_mat_sup)
label_mat_train = label_mat_sup_norm[:len(train_indxs)]
label_mat_val = label_mat_sup_norm[-len(val_indxs):]
label_t_mean = np.mean(label_mat_sup, axis=0)
label_t_std = np.std(label_mat_sup, axis=0)
label_t_min = np.min(label_mat_sup, axis=0)
label_t_max = np.max(label_mat_sup, axis=0)
var_dict['label_train_val_mean'] = label_t_mean
var_dict['label_train_val_std'] = label_t_std
var_dict['label_train_val_min'] = label_t_min
var_dict['label_train_val_max'] = label_t_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_base/vox_fluoro_base.py | .py | 18,787 | 373 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
# import sys
import pickle
from sklearn.model_selection import train_test_split
# sys.path.append(os.path.abspath(os.path.expanduser('~/fluoro/code')))
# import datacomp.h5py_multidimensional_array as h5py_multidimensional_array
# from datacomp.h5py_multidimensional_array import variable_matrix_loader
# expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'))
# os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
print('Image sub size:', image_train_sub.shape)
print('Label sub size:', label_train_sub.shape)
print('Cali sub size:', cali_train_sub.shape)
print('Image val size:', image_val.shape)
print('Label val size:', label_val.shape)
print('Cali val size:', cali_val.shape)
print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (198, 162, 564, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
mean_dset = stats_file['mean']
std_dset = stats_file['std']
var_dset = stats_file['var']
mean_v = mean_dset[:]
std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(((y_pred - y_true) / var_v)*1000)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 10,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 2,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'regularizer_l1': 0.1,
'regularizer_l2': 0.25,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': 'adam',
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error,
'model_metric': cust_mean_squared_error
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(bn_1_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(bn_1_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'], loss=params['model_loss'], metrics=[params['model_metric']])
# tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[:]
total_num_of_samples = label_mat.shape[0]
label_file.close()
print('num of train samples:', total_num_of_samples)
subset_size = 2000
hist_file = open('vox_fluoro_hist_objects', 'wb')
hist_dict = {}
for subset_iter in range(int(np.ceil(total_num_of_samples / subset_size))):
print('First_index:\t', subset_iter * subset_size)
print('Last_index:\t', subset_iter * subset_size + subset_size)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = data_comp(first_indx=subset_iter * subset_size, last_indx=subset_iter * subset_size + subset_size)
print('\n\n\n\nGot past the load:\n\n\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_train_sub[:], axis=-1), 'input_fluoro_1': np.expand_dims(image_train_sub[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_train_sub[:, 1, :, :], axis=-1), 'input_cali': cali_train_sub[:]}, y=label_train_sub[:], validation_data=([np.expand_dims(vox_val, axis=-1), np.expand_dims(image_val[:, 0, :, :], axis=-1), np.expand_dims(image_val[:, 1, :, :], axis=-1), cali_val], label_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
hist_dict['result' + '_' + str(subset_iter)] = result
pickle.dump(hist_dict, hist_file)
hist_file.close()
# result = model.fit(x={'input_vox': np.expand_dims(vox_train_sub, axis=-1), 'input_fluoro_1': np.expand_dims(image_train_sub[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_train_sub[:, 1, :, :], axis=-1), 'input_cali': cali_train_sub}, y=label_train_sub, validation_data=([np.expand_dims(vox_val, axis=-1), np.expand_dims(image_val[:, 0, :, :], axis=-1), np.expand_dims(image_val[:, 1, :, :], axis=-1), cali_val], label_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.join(os.getcwd(), 'vox_fluoro_model_save' + '_' + expr_no + '.h5'))
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_img_stnd_loss/vox_fluoro_img_stnd_loss.py | .py | 22,149 | 457 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization
expr_name = sys.argv[0][:-3]
expr_no = '2'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'dense_regularizer_1': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'dense_regularizer_2': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_std,
'model_metric': cust_mean_squared_error_std
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, 'vox_fluoro_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_img_stnd_100_loss/vox_fluoro_img_stnd_100_loss.py | .py | 20,842 | 457 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# sys.path.append(os.path.abspath(os.path.expanduser('~/fluoro/code')))
# import datacomp.h5py_multidimensional_array as h5py_multidimensional_array
# from datacomp.h5py_multidimensional_array import variable_matrix_loader
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(100 * tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(100 * tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 2,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'regularizer_l1': 0.1,
'regularizer_l2': 0.25,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': 'adam',
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_std,
'model_metric': cust_mean_squared_error_std
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'], loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, 'vox_fluoro_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res/vox_fluoro_res.py | .py | 57,192 | 1,112 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.3,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.3,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.3,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.3,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.3,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.3,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.3,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.3,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_units_1': 50,
'vox_flu_units_2': 30,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'cali_1_units': 20,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_dense_3': 6,
'top_dense_4': 6,
'top_dense_5': 6,
'top_dense_6': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 100,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_norm_mse/vox_fluoro_norm_mse.py | .py | 62,529 | 1,228 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-18
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are going to normalize the calibration inputs from -1 to 1.
# We likewise are going to normalize the label dataset based on the training and validation datasets. We are going to normalize for each instance over all of the instances.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mse' for the loss as opposed to the 'mae' in order to penalize bigger losses.
# We are also going to change the optimizer to Nadam as opposed to Adam.
# Lastly, we are going to increase the epochs to 50.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.concatenate([bn_21_f, bn_21_v])
# fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_comb)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_sup = label_init[:]
label_mat_sup = label_mat_sup[list(list(train_indxs) + list(val_indxs))]
label_mat_sup_norm = min_max_norm(label_mat_sup)
label_mat_train = label_mat_sup_norm[:len(train_indxs)]
label_mat_val = label_mat_sup_norm[-len(val_indxs):]
label_t_mean = np.mean(label_mat_sup, axis=0)
label_t_std = np.std(label_mat_sup, axis=0)
label_t_min = np.min(label_mat_sup, axis=0)
label_t_max = np.max(label_mat_sup, axis=0)
var_dict['label_train_val_mean'] = label_t_mean
var_dict['label_train_val_std'] = label_t_std
var_dict['label_train_val_min'] = label_t_min
var_dict['label_train_val_max'] = label_t_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_mae/vox_fluoro_res_mae.py | .py | 57,754 | 1,132 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mae_normalized(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
std_dset = stats_file['std']
std_v = std_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.abs((y_true - y_pred) / std_v))
def cust_mae(y_true, y_pred):
return tf.keras.backend.mean(tf.keras.backend.abs((y_true - y_pred)))
def cust_mean_squared_error(y_true, y_pred):
return tf.keras.backend.mean(tf.keras.backend.square((y_true - y_pred)))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.3,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.3,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.3,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.3,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.3,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.3,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.3,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.3,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_units_1': 50,
'vox_flu_units_2': 30,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'cali_1_units': 20,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_dense_3': 6,
'top_dense_4': 6,
'top_dense_5': 6,
'top_dense_6': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 100,
'model_batchsize': 6,
'model_loss': cust_mae_normalized,
'model_metric': cust_mae_normalized
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_base_2_overfit/vox_fluoro_base_2_overfit.py | .py | 27,364 | 506 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-23
# We are going to go back to earlier architecture to see if we can overfit the training set.
# We are going to also do per image normalization between -1 and 1.
# We are not going to normalize the label data set. Instead, we will attempt to create a loss function that is better scaled than just normalization of the outputs.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def mean_scaled_error_abs(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.sum(tf.keras.backend.abs(y_pred - y_true) / tf.keras.backend.abs(tf.cast(mean_v, tf.float32)))
def mean_scaled_error_sq(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.sum(tf.keras.backend.square(y_pred - y_true) / tf.keras.backend.square(tf.cast(mean_v, tf.float32)))
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 9,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0,
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 3,
'v_conv_2_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'valid',
'v_conv_3_filters': 40,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 2,
'v_conv_3_strides_1': 2,
'v_conv_3_strides_2': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 50,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_5_filters': 50,
'v_conv_5_kernel': 2,
'v_conv_5_strides_0': 2,
'v_conv_5_strides_1': 2,
'v_conv_5_strides_2': 2,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 50,
'v_conv_6_kernel': 2,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 60,
'v_conv_7_kernel': 2,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 40,
'v_conv_8_kernel': 1,
'v_conv_8_strides_0': 2,
'v_conv_8_strides_1': 2,
'v_conv_8_strides_2': 2,
'v_conv_8_pad': 'same',
'dense_1_v_units': 200,
'dense_2_v_units': 200,
'dense_3_v_units': 150,
'dense_4_v_units': 125,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 2,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.,
'conv_4_filters': 60,
'conv_4_kernel': 2,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 2,
'conv_5_strides': 2,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.,
'conv_6_filters': 80,
'conv_6_kernel': 2,
'conv_6_strides': 2,
'conv_6_pad': 'same',
'dense_1_f_units': 100,
'dense_2_f_units': 80,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 40,
'dense_3_cali_units': 30,
# Top Level Dense Units
'dense_1_co_units': 300,
'drop_1_comb_rate': 0.,
'dense_2_co_units': 250,
'dense_3_co_units': 200,
'drop_2_comb_rate': 0.,
'dense_4_co_units': 150,
'dense_5_co_units': 100,
'dense_6_co_units': 80,
'dense_7_co_units': 60,
'dense_8_co_units': 40,
'dense_9_co_units': 20,
'dense_10_co_units': 10,
'dense_11_co_units': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.SGD,
'learning_rate': 0.01,
'nest_bool': False,
'SGD_momentum': 0,
'model_epochs': 1000,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mse'
}
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_1 = tf.keras.layers.BatchNormalization()(v_conv_1)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_4)
v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
bn_8 = tf.keras.layers.BatchNormalization()(v_flatten_1)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_9)
bn_10 = tf.keras.layers.BatchNormalization()(dense_2_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(dense_3_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_11)
# -----------------------------------------------------------------
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
pool_1_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(conv_6_1)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
pool_1_2 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_2)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
# pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
bn_4 = tf.keras.layers.BatchNormalization()(conv_4_2)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(conv_6_2)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_4_v, dense_3_f_1, dense_3_f_2, dense_3_cali])
# -----------------------------------------------------------------# Dense Layers at Top of Model
bn_1 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_1_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(bn_2)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(dense_2_comb)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(dense_3_comb)
dense_drop_2 = tf.keras.layers.Dropout(rate=params['drop_2_comb_rate'])(bn_4)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_2)
dense_5_comb = tf.keras.layers.Dense(units=params['dense_5_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_4_comb)
dense_6_comb = tf.keras.layers.Dense(units=params['dense_6_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_5_comb)
dense_7_comb = tf.keras.layers.Dense(units=params['dense_7_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_6_comb)
dense_8_comb = tf.keras.layers.Dense(units=params['dense_8_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_7_comb)
dense_9_comb = tf.keras.layers.Dense(units=params['dense_9_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_8_comb)
dense_10_comb = tf.keras.layers.Dense(units=params['dense_10_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_9_comb)
dense_11_comb = tf.keras.layers.Dense(units=params['dense_11_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_10_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], activity_regularizer=None, name='main_output')(dense_11_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate'], momentum=params['SGD_momentum'], nesterov=params['nest_bool']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_mat_base = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_mat_base_1 = image_grp_1['min_max_dset_per_image']
image_mat_base_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_mat_base = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_mat_base = label_file['labels_dset']
num_of_samples = 1
test_indxs, train_sup_indxs = split_train_test(len(label_mat_base), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
val_indxs = list([1])
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
# vox_mat_base = vox_init[:]
vox_mat_val = vox_mat_base[val_indxs]
vox_mat_train = vox_mat_base[train_indxs]
vox_file.close()
# image_mat_base_1 = image_init_1[:]
image_mat_val_1 = image_mat_base_1[val_indxs]
image_mat_train_1 = image_mat_base_1[train_indxs]
# image_mat_base_2 = image_init_2[:]
image_mat_val_2 = image_mat_base_2[val_indxs]
image_mat_train_2 = image_mat_base_2[train_indxs]
image_file.close()
# cali_mat_base = cali_init[:]
cali_mat_val = cali_mat_base[val_indxs]
cali_mat_train = cali_mat_base[train_indxs]
cali_file.close()
# label_mat_base = label_init[:]
label_mat_val = label_mat_base[val_indxs]
label_mat_train = label_mat_base[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_mean_scale_abs/vox_fluoro_mean_scale_abs.py | .py | 61,791 | 1,213 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-23
# We are continuing the usage of the architecture based on the residual nets.
# In this file, we are going to normalize the calibration inputs from -1 to 1.
# We are going to also do per image normalization between -1 and 1.
# We are not going to normalize the label data set. Instead, we will attempt to create a loss function that is better scaled than just normalization of the outputs.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def mean_scaled_error(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.sum(tf.keras.backend.abs(y_pred - y_true) / tf.keras.backend.abs(tf.cast(mean_v, tf.float32)))
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0,
'dense_1_v_units': 75,
'drop_2_v_rate': 0,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0,
'cali_1_units': 20,
'drop_2_cali': 0,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 40,
'model_batchsize': 6,
'model_loss': mean_scaled_error,
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output', activity_regularizer=None)(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_base = vox_init[:]
vox_mat_val = vox_mat_base[val_indxs]
vox_mat_train = vox_mat_base[train_indxs]
vox_file.close()
image_mat_base_1 = image_init_1[:]
image_mat_val_1 = image_mat_base_1[val_indxs]
image_mat_train_1 = image_mat_base_1[train_indxs]
image_mat_base_2 = image_init_2[:]
image_mat_val_2 = image_mat_base_2[val_indxs]
image_mat_train_2 = image_mat_base_2[train_indxs]
image_file.close()
cali_mat_base = cali_init[:]
cali_mat_val = cali_mat_base[val_indxs]
cali_mat_train = cali_mat_base[train_indxs]
cali_file.close()
label_mat_base = label_init[:]
label_mat_val = label_mat_base[val_indxs]
label_mat_train = label_mat_base[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_update_mae/vox_fluoro_res_update_mae.py | .py | 60,658 | 1,171 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
'conv_regularizer': tf.keras.regularizers.l1(1e-7),
'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 40,
'model_batchsize': 5,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_norm/vox_fluoro_norm.py | .py | 62,334 | 1,225 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-18
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are going to normalize the calibration inputs from -1 to 1.
# We likewise are going to normalize the label dataset based on the training and validation datasets. We are going to normalize for each instance over all of the instances.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mae' for the loss.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 40,
'model_batchsize': 6,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_sup = label_init[:]
label_mat_sup = label_mat_sup[list(list(train_indxs) + list(val_indxs))]
label_mat_sup_norm = min_max_norm(label_mat_sup)
label_mat_train = label_mat_sup_norm[:len(train_indxs)]
label_mat_val = label_mat_sup_norm[-len(val_indxs):]
label_t_mean = np.mean(label_mat_sup, axis=0)
label_t_std = np.std(label_mat_sup, axis=0)
label_t_min = np.min(label_mat_sup, axis=0)
label_t_max = np.max(label_mat_sup, axis=0)
var_dict['label_train_val_mean'] = label_t_mean
var_dict['label_train_val_std'] = label_t_std
var_dict['label_train_val_min'] = label_t_min
var_dict['label_train_val_max'] = label_t_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_img_stnd/vox_fluoro_img_stnd.py | .py | 22,149 | 457 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization
expr_name = sys.argv[0][:-3]
expr_no = '2'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'dense_regularizer_1': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'dense_regularizer_2': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_std,
'model_metric': cust_mean_squared_error_std
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, 'vox_fluoro_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_l1_0-1_l2_0-1_var_loss/vox_fluoro_l1_0-1_l2_0-1_var_loss.py | .py | 22,262 | 458 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization to 0.1 and 0.1 respectively
# This experiment is also going to evaluate the var loss as opposed to the std one
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'dense_regularizer_1': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'dense_regularizer_2': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.1),
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_base_2/vox_fluoro_base_2.py | .py | 26,684 | 484 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-23
# We are going to go back to earlier architecture to see if we can overfit the training set.
# We are going to also do per image normalization between -1 and 1.
# We are not going to normalize the label data set. Instead, we will attempt to create a loss function that is better scaled than just normalization of the outputs.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def mean_scaled_error(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.sum(tf.keras.backend.abs(y_pred - y_true) / tf.keras.backend.abs(tf.cast(mean_v, tf.float32)))
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 9,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0,
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 3,
'v_conv_2_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'valid',
'v_conv_3_filters': 40,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 2,
'v_conv_3_strides_1': 2,
'v_conv_3_strides_2': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 50,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_5_filters': 50,
'v_conv_5_kernel': 2,
'v_conv_5_strides_0': 2,
'v_conv_5_strides_1': 2,
'v_conv_5_strides_2': 2,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 50,
'v_conv_6_kernel': 2,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 60,
'v_conv_7_kernel': 2,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 40,
'v_conv_8_kernel': 1,
'v_conv_8_strides_0': 2,
'v_conv_8_strides_1': 2,
'v_conv_8_strides_2': 2,
'v_conv_8_pad': 'same',
'dense_1_v_units': 200,
'dense_2_v_units': 200,
'dense_3_v_units': 150,
'dense_4_v_units': 125,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 2,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.,
'conv_4_filters': 60,
'conv_4_kernel': 2,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 2,
'conv_5_strides': 2,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.,
'conv_6_filters': 80,
'conv_6_kernel': 2,
'conv_6_strides': 2,
'conv_6_pad': 'same',
'dense_1_f_units': 100,
'dense_2_f_units': 80,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 40,
'dense_3_cali_units': 30,
# Top Level Dense Units
'dense_1_co_units': 300,
'drop_1_comb_rate': 0.,
'dense_2_co_units': 250,
'dense_3_co_units': 200,
'drop_2_comb_rate': 0.,
'dense_4_co_units': 150,
'dense_5_co_units': 100,
'dense_6_co_units': 80,
'dense_7_co_units': 60,
'dense_8_co_units': 40,
'dense_9_co_units': 20,
'dense_10_co_units': 10,
'dense_11_co_units': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 6,
'model_loss': mean_scaled_error,
'model_metric': 'mae'
}
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_1 = tf.keras.layers.BatchNormalization()(v_conv_1)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_4)
v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
bn_8 = tf.keras.layers.BatchNormalization()(v_flatten_1)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_9)
bn_10 = tf.keras.layers.BatchNormalization()(dense_2_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(dense_3_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_11)
# -----------------------------------------------------------------
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
pool_1_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(conv_6_1)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
pool_1_2 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_2)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
# pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
bn_4 = tf.keras.layers.BatchNormalization()(conv_4_2)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(conv_6_2)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_4_v, dense_3_f_1, dense_3_f_2, dense_3_cali])
# -----------------------------------------------------------------# Dense Layers at Top of Model
bn_1 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_1_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(bn_2)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(dense_2_comb)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(dense_3_comb)
dense_drop_2 = tf.keras.layers.Dropout(rate=params['drop_2_comb_rate'])(bn_4)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_2)
dense_5_comb = tf.keras.layers.Dense(units=params['dense_5_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_4_comb)
dense_6_comb = tf.keras.layers.Dense(units=params['dense_6_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_5_comb)
dense_7_comb = tf.keras.layers.Dense(units=params['dense_7_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_6_comb)
dense_8_comb = tf.keras.layers.Dense(units=params['dense_8_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_7_comb)
dense_9_comb = tf.keras.layers.Dense(units=params['dense_9_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_8_comb)
dense_10_comb = tf.keras.layers.Dense(units=params['dense_10_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_9_comb)
dense_11_comb = tf.keras.layers.Dense(units=params['dense_11_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_10_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], activity_regularizer=None, name='main_output')(dense_11_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['min_max_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_base = vox_init[:]
vox_mat_val = vox_mat_base[val_indxs]
vox_mat_train = vox_mat_base[train_indxs]
vox_file.close()
image_mat_base_1 = image_init_1[:]
image_mat_val_1 = image_mat_base_1[val_indxs]
image_mat_train_1 = image_mat_base_1[train_indxs]
image_mat_base_2 = image_init_2[:]
image_mat_val_2 = image_mat_base_2[val_indxs]
image_mat_train_2 = image_mat_base_2[train_indxs]
image_file.close()
cali_mat_base = cali_init[:]
cali_mat_val = cali_mat_base[val_indxs]
cali_mat_train = cali_mat_base[train_indxs]
cali_file.close()
label_mat_base = label_init[:]
label_mat_val = label_mat_base[val_indxs]
label_mat_train = label_mat_base[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_deeper_reg/vox_fluoro_deeper_reg.py | .py | 31,567 | 590 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating how a deeper conv net, which paradoxically has fewer parameters would fair
# No regularization
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.3,
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 3,
'v_conv_2_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'valid',
'v_conv_3_filters': 40,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 2,
'v_conv_3_strides_1': 2,
'v_conv_3_strides_2': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 50,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_5_filters': 50,
'v_conv_5_kernel': 2,
'v_conv_5_strides_0': 2,
'v_conv_5_strides_1': 2,
'v_conv_5_strides_2': 2,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 50,
'v_conv_6_kernel': 2,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 50,
'v_conv_7_kernel': 2,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 1,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
'dense_1_v_units': 300,
'dense_2_v_units': 250,
'dense_3_v_units': 250,
'dense_4_v_units': 200,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 2,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_4_filters': 60,
'conv_4_kernel': 3,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'dense_1_f_units': 120,
'dense_2_f_units': 120,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 20,
'dense_3_cali_units': 20,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.0001, l2=0.0001),
'conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.0001, l2=0.0001),
'dense_regularizer_1': tf.keras.regularizers.l1_l2(l1=0.0001, l2=0.0001),
'dense_regularizer_2': tf.keras.regularizers.l1_l2(l1=0.0001, l2=0.0001),
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
bn_1 = tf.keras.layers.BatchNormalization()(v_spat_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
bn_3 = tf.keras.layers.BatchNormalization()(v_spat_2)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_3)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
bn_4 = tf.keras.layers.BatchNormalization()(v_pool_2)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_4)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
bn_5 = tf.keras.layers.BatchNormalization()(v_spat_3)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_5)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_6)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_7)
v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_3_v)
# -----------------------------------------------------------------
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
bn_1 = tf.keras.layers.BatchNormalization()(spat_1_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_3_1)
bn_3 = tf.keras.layers.BatchNormalization()(spat_2_1)
conv_4_1 = tf.keras.layers.SeparableConv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_3)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_1)
bn_4 = tf.keras.layers.BatchNormalization()(pool_2_1)
conv_5_1 = tf.keras.layers.SeparableConv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_5_1)
conv_6_1 = tf.keras.layers.SeparableConv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(conv_6_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
bn_1 = tf.keras.layers.BatchNormalization()(spat_1_2)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_1)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_2)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_3_2)
bn_3 = tf.keras.layers.BatchNormalization()(spat_2_2)
conv_4_2 = tf.keras.layers.SeparableConv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_3)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
bn_4 = tf.keras.layers.BatchNormalization()(pool_2_2)
conv_5_2 = tf.keras.layers.SeparableConv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_5_2)
conv_6_2 = tf.keras.layers.SeparableConv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(conv_6_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_4_v, dense_3_f_1, dense_3_f_2, dense_3_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
bn_1 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_1)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
bn_2 = tf.keras.layers.BatchNormalization()(dense_drop_1)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_2)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_drop_2 = tf.keras.layers.Dropout(rate=params['drop_2_comb_rate'])(dense_3_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(dense_drop_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
# -----------------------------------------------------------------
# v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
# v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
# v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
# v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
# # v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
# v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_spat_3)
# v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_4)
# v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_5)
# -----------------------------------------------------------------
# # Second run of 2D Conv Layers for Image 1
# conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
# spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
# pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# # Third run of 2D Conv Layers for Image 1
# conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
# pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
# # Fourth run of 2D Conv Layers for Image 1
# conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_3_1)
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_rms_prop/vox_fluoro_res_rms_prop.py | .py | 57,195 | 1,112 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.3,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.3,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.3,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.3,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.3,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.3,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.3,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.3,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_units_1': 50,
'vox_flu_units_2': 30,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'cali_1_units': 20,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_dense_3': 6,
'top_dense_4': 6,
'top_dense_5': 6,
'top_dense_6': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.RMSprop,
'learning_rate': 0.001,
'model_epochs': 100,
'model_batchsize': 6,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_overfit/vox_fluoro_overfit.py | .py | 64,037 | 1,257 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-21
# We are continuing the usage of the architecture based on the residual nets
# The main purpose of this test is to see if we can overfit the data. We are only going to try to train on 12 samples to try to overfit the training data.
# In this file, we are goign to continue normalizing the calibration inputs between -1 and 1, but we will only run the min max on the training data set.
# We are going to keep the label data set normalized from -1 to 1.
# We have also removed all dropout from this model, and we will see if the model can overfit the data.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mse' for the loss, and Nadam for the optimizer.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0,
'dense_1_v_units': 75,
'drop_2_v_rate': 0,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0,
'vox_flu_units_3': 15,
'vox_flu_units_4': 10,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0,
'cali_1_units': 20,
'drop_2_cali': 0,
'cali_2_units': 20,
'cali_3_units': 10,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0,
'top_dense_0': 10,
'top_dense_1': 10,
'top_dense_2': 10,
'top_drop_1': 0,
'top_dense_3': 10,
'top_dense_4': 10,
'top_drop_2': 0,
'top_dense_5': 10,
'top_dense_6': 10,
'top_drop_3': 0,
'top_dense_7': 10,
'top_dense_8': 10,
'top_drop_4': 0,
'top_dense_9': 10,
'top_dense_10': 10,
'top_drop_5': 0,
'top_dense_11': 10,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 2000,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
# bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(v_conv_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
# bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_pool_0)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_2)
# bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
# bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([v_conv_4, v_conv_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
# bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([v_conv_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
# bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([v_conv_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(v_conv_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
# bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
# bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([v_conv_10, v_conv_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
# bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
# bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
# bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([v_conv_13, v_conv_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
# bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
# bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
# bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([v_conv_16, v_conv_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
# bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(v_act_5)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
# bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
# bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
# bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(comb_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
# bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, comb_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(comb_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
# bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, comb_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(comb_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
# bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, comb_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(comb_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
# bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, comb_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(comb_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
# bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, comb_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(comb_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
# bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([comb_11, comb_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(comb_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
# bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([comb_14, comb_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(comb_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
# bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([comb_17, comb_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
# bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(comb_flatten_1)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
# bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([dense_1_comb, dense_2_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_act)
# bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
# bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
# bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
# bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_3)
# bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
# bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
# bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
# bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(cali_2)
# bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([cali_3, vox_flu_4])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
# bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
# bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_1, cali_3])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
# bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([top_dense_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
# bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([top_dense_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(top_dense_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
# bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([top_dense_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(top_dense_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
# bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([top_dense_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_dense_10)
# bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([top_dense_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = 120
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_train_min = np.min(cali_mat_train, axis=0)
cali_train_max = np.max(cali_mat_train, axis=0)
cali_train_std = np.std(cali_mat_train, axis=0)
cali_train_avg = np.mean(cali_mat_train, axis=0)
var_dict['cali_train_avg'] = cali_train_avg
var_dict['cali_train_std'] = cali_train_std
var_dict['cali_train_min'] = cali_train_min
var_dict['cali_train_max'] = cali_train_max
cali_train_min_max = min_max_norm(cali_mat_train)
cali_val_min_max = min_max_norm(cali_mat_val, data_min=cali_train_min, data_max=cali_train_max)
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_train_avg = np.mean(label_mat_train, axis=0)
label_train_std = np.std(label_mat_train, axis=0)
label_train_min = np.min(label_mat_train, axis=0)
label_train_max = np.max(label_mat_train, axis=0)
label_train_min_max = min_max_norm(label_mat_train, feature_range=(-2, 2))
label_val_min_max = min_max_norm(label_mat_val, feature_range=(-2, 2), data_min=label_train_min, data_max=label_train_max)
var_dict['label_train_avg'] = label_train_avg
var_dict['label_train_std'] = label_train_std
var_dict['label_train_min'] = label_train_min
var_dict['label_train_max'] = label_train_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_train_min_max}, y=label_train_min_max, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_val_min_max], label_val_min_max), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_l1_l2/vox_fluoro_img_stnd.py | .py | 22,260 | 460 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# sys.path.append(os.path.abspath(os.path.expanduser('~/fluoro/code')))
# import datacomp.h5py_multidimensional_array as h5py_multidimensional_array
# from datacomp.h5py_multidimensional_array import variable_matrix_loader
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 2,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.25),
'dense_regularizer_1': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.25),
'dense_regularizer_2': tf.keras.regularizers.l1_l2(l1=0.1, l2=0.25),
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': 'adam',
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_std,
'model_metric': cust_mean_squared_error_std
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'], loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, 'vox_fluoro_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_small_norm/vox_fluoro_small_norm.py | .py | 30,589 | 631 | import numpy as np
import h5py
import tensorflow as tf
import os
import sys
import pickle
import datetime
# 2019-09-30
# In this file we are going to complete unit testing to see where the current model goes wrong.
# We are not initially going to use batch normalization or dropout.
expr_name = sys.argv[0][:-3]
save_dir = os.path.abspath(os.getcwd())
os.makedirs(save_dir, exist_ok=True)
save_image = True
# -----------------------------------------------------------------
# Initialize TensorBoard / Keras Callbacks information
root_logdir = os.path.join(save_dir, 'tf_logs')
root_pydir = os.path.join(save_dir, 'py_hist')
run_id = datetime.datetime.now().strftime("%Y_%m_%d__%H%M%S")
log_dir = os.path.join(root_logdir, 'run_' + run_id)
py_dir = os.path.join(root_pydir, 'run_' + run_id)
os.makedirs(py_dir, exist_ok=True)
tf.debugging.set_log_device_placement(True)
# -----------------------------------------------------------------
# This part of the file will create a replica of the python file so that we can have a running history of python scripts that have been run.
with open(expr_name + '.py') as f:
with open(os.path.join(py_dir, 'run_' + run_id + '.py'), 'w') as f1:
for line in f:
f1.write(line)
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
np.random.seed(np.random.choice(2**16))
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
# -----------------------------------------------------------------
# This is the file, which will help us normalize our input data to common ranges.
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
def min_max_norm_per_image(data_set, feature_range=(-1, 1), axis=(1, 2)):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
while (len(data_min.shape) < len(data_set.shape)) and (len(data_max.shape) < len(data_set.shape)):
# print('Min shape:', data_min.shape)
# print('Max shape:', data_max.shape)
data_min = np.expand_dims(data_min, axis=1)
data_max = np.expand_dims(data_max, axis=1)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
params = {
# 3D CONV Layers
'v_conv_0_filters': 30,
'v_conv_0_kernel': 7,
'v_conv_0_strides': 2,
'v_conv_0_pad': 'same',
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
'v_conv_2_filters': 30,
'v_conv_2_kernel': 3,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 1,
'v_conv_3_pad': 'same',
'v_conv_4_filters': 40,
'v_conv_4_kernel': 3,
'v_conv_4_strides': 2,
'v_conv_4_pad': 'same',
'v_conv_5_filters': 40,
'v_conv_5_kernel': 3,
'v_conv_5_strides': 1,
'v_conv_5_pad': 'same',
'v_conv_6_filters': 40,
'v_conv_6_kernel': 3,
'v_conv_6_strides': 1,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 50,
'v_conv_7_kernel': 3,
'v_conv_7_strides': 1,
'v_conv_7_pad': 'same',
'v_conv_8_filters': 50,
'v_conv_8_kernel': 2,
'v_conv_8_strides': 2,
'v_conv_8_pad': 'same',
'v_conv_9_filters': 50,
'v_conv_9_kernel': 2,
'v_conv_9_strides': 2,
'v_conv_9_pad': 'same',
'v_conv_10_filters': 60,
'v_conv_10_kernel': 2,
'v_conv_10_strides': 1,
'v_conv_10_pad': 'valid',
'dense_0_vox_units': 100,
'dense_1_vox_units': 80,
'dense_2_vox_units': 80,
# 2D CONV Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'conv_1_filters': 40,
'conv_1_kernel': 3,
'conv_1_strides': 1,
'conv_1_pad': 'same',
'pool_0_size': 2,
'pool_0_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'conv_3_filters': 40,
'conv_3_kernel': 3,
'conv_3_strides': 2,
'conv_3_pad': 'same',
'conv_4_filters': 40,
'conv_4_kernel': 3,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'conv_5_filters': 40,
'conv_5_kernel': 2,
'conv_5_strides': 1,
'conv_5_pad': 'same',
'conv_6_filters': 40,
'conv_6_kernel': 2,
'conv_6_strides': 2,
'conv_6_pad': 'valid',
'conv_7_filters': 40,
'conv_7_kernel': 2,
'conv_7_strides': 2,
'conv_7_pad': 'valid',
'conv_8_filters': 40,
'conv_8_kernel': 2,
'conv_8_strides': 1,
'conv_8_pad': 'valid',
'dense_0_flu_units': 80,
'dense_1_flu_units': 80,
'dense_2_flu_units': 80,
# Cali Dense Units
'dense_0_cali_units': 100,
'dense_1_cali_units': 100,
'dense_2_cali_units': 100,
'dense_3_cali_units': 100,
'dense_4_cali_units': 50,
# Top Level Dense Units
'dense_1_co_units': 300,
'dense_2_co_units': 250,
'dense_3_co_units': 200,
'dense_4_co_units': 150,
'dense_5_co_units': 100,
'dense_6_co_units': 100,
'dense_7_co_units': 100,
'dense_8_co_units': 6,
'drop_1_comb_rate': 0.3,
'drop_2_comb_rate': 0.3,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
'act_reg': None,
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 100,
'model_batchsize': 5,
'model_loss': 'mse',
'model_metric': 'mse'
}
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
img_input_shape = (128, 128, 1)
channel_order = 'channels_last'
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.layers.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_cali = tf.keras.layers.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
input_fluoro_1 = tf.keras.layers.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.layers.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
# input_cali_1 = tf.keras.backend.print_tensor(input_cali)
# -----------------------------------------------------------------
# Voxel Analysis
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=params['v_conv_0_strides'], padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(v_conv_1)
pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'])(bn_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(pool_0)
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=params['v_conv_4_strides'], padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=params['v_conv_5_strides'], padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=params['v_conv_6_strides'], padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_5)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=params['v_conv_7_strides'], padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=params['v_conv_8_strides'], padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=params['v_conv_9_strides'], padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=params['v_conv_10_strides'], padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_9)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_flatten_0 = tf.keras.layers.Flatten()(bn_10)
dense_0_vox = tf.keras.layers.Dense(units=params['dense_0_vox_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(v_flatten_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_0_vox)
dense_1_vox = tf.keras.layers.Dense(units=params['dense_1_vox_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_vox)
dense_2_vox = tf.keras.layers.Dense(units=params['dense_1_vox_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_1)
bn_2_v = tf.keras.layers.BatchNormalization()(dense_2_vox)
# -----------------------------------------------------------------
# Fluoro Analysis 1
conv_0 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(conv_0)
conv_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1)
pool_0 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'], data_format=channel_order)(bn_1)
conv_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(pool_0)
bn_2 = tf.keras.layers.BatchNormalization()(conv_2)
conv_3 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3)
conv_4 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(conv_4)
conv_5 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5)
conv_6 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_5)
bn_6 = tf.keras.layers.BatchNormalization()(conv_6)
conv_7 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(conv_7)
conv_8 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(conv_8)
flatten_1 = tf.keras.layers.Flatten()(bn_8)
dense_0_flu = tf.keras.layers.Dense(units=params['dense_0_flu_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(flatten_1)
bn_0 = tf.keras.layers.BatchNormalization()(dense_0_flu)
dense_1_flu = tf.keras.layers.Dense(units=params['dense_1_flu_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_flu)
dense_2_flu = tf.keras.layers.Dense(units=params['dense_2_flu_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_1)
bn_2_1 = tf.keras.layers.BatchNormalization()(dense_2_flu)
# -----------------------------------------------------------------
# Fluoro Analysis 2
conv_0 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(conv_0)
conv_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1)
pool_0 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'], data_format=channel_order)(bn_1)
conv_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(pool_0)
bn_2 = tf.keras.layers.BatchNormalization()(conv_2)
conv_3 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3)
conv_4 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(conv_4)
conv_5 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5)
conv_6 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_5)
bn_6 = tf.keras.layers.BatchNormalization()(conv_6)
conv_7 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(conv_7)
conv_8 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(conv_8)
flatten_1 = tf.keras.layers.Flatten()(bn_8)
dense_0_flu = tf.keras.layers.Dense(units=params['dense_0_flu_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(flatten_1)
bn_0 = tf.keras.layers.BatchNormalization()(dense_0_flu)
dense_1_flu = tf.keras.layers.Dense(units=params['dense_1_flu_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_flu)
dense_2_flu = tf.keras.layers.Dense(units=params['dense_2_flu_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_1)
bn_2_2 = tf.keras.layers.BatchNormalization()(dense_2_flu)
# -----------------------------------------------------------------
# Dense After Cali
dense_0_cali = tf.keras.layers.Dense(units=params['dense_0_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(input_cali)
bn_0 = tf.keras.layers.BatchNormalization()(dense_0_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(dense_3_cali)
dense_4_cali = tf.keras.layers.Dense(units=params['dense_4_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_3)
bn_4_cali = tf.keras.layers.BatchNormalization()(dense_4_cali)
# -----------------------------------------------------------------
# Combine Cali, Vox, Fluoro
# concat_1 = tf.keras.layers.concatenate([bn_4_cali])
# concat_1 = tf.keras.layers.concatenate([bn_4_cali, bn_2_1, bn_2_2])
concat_1 = tf.keras.layers.concatenate([bn_4_cali, bn_2_1, bn_2_2, bn_2_v])
# -----------------------------------------------------------------
# Dense Layers at Top
# bn_1 = tf.keras.layers.BatchNormalization()(input_cali)
# bn_1 = tf.keras.layers.BatchNormalization()(input_cali_1)
# dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_4_cali)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(concat_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(dense_2_comb)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(dense_3_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(dense_4_comb)
dense_5_comb = tf.keras.layers.Dense(units=params['dense_5_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_5)
bn_6 = tf.keras.layers.BatchNormalization()(dense_5_comb)
dense_6_comb = tf.keras.layers.Dense(units=params['dense_6_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_6_comb)
dense_7_comb = tf.keras.layers.Dense(units=params['dense_7_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_7_comb)
dense_8_comb = tf.keras.layers.Dense(units=params['dense_8_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['act_reg'])(bn_8)
# -----------------------------------------------------------------
# Output Layer
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=None, kernel_initializer=params['kern_init'], activity_regularizer=None, name='main_output')(dense_8_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
# model = tf.keras.Model(inputs=[input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
run_opts = tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom=True)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']], options=run_opts)
if save_image:
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(py_dir, 'run_' + run_id + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
# Next we are going to ensure that we can accurately load sample data
num_of_samples = None
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_mat_base = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_mat_base = cali_file['cali_len3_rot']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_mat_base = label_file['labels_dset']
# -----------------------------------------------------------------
test_indxs, train_sup_indxs = split_train_test(len(label_mat_base), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects' + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
var_dict['cali_train_avg'] = np.mean(cali_mat_base[sorted(train_indxs)], axis=0)
var_dict['cali_train_std'] = np.std(cali_mat_base[sorted(train_indxs)], axis=0)
var_dict['cali_train_min'] = np.min(cali_mat_base[sorted(train_indxs)], axis=0)
var_dict['cali_train_max'] = np.max(cali_mat_base[sorted(train_indxs)], axis=0)
var_dict['label_train_avg'] = np.mean(label_mat_base[sorted(train_indxs)], axis=0)
var_dict['label_train_std'] = np.std(label_mat_base[sorted(train_indxs)], axis=0)
var_dict['label_train_min'] = np.min(label_mat_base[sorted(train_indxs)], axis=0)
var_dict['label_train_max'] = np.max(label_mat_base[sorted(train_indxs)], axis=0)
vox_mat_sup = vox_mat_base[:]
vox_mat_train = vox_mat_sup[train_indxs]
vox_mat_val = vox_mat_sup[val_indxs]
image_mat_sup_1 = image_init_1[:]
image_mat_sup_2 = image_init_2[:]
image_mat_train_1 = image_mat_sup_1[train_indxs]
image_mat_train_2 = image_mat_sup_2[train_indxs]
image_mat_val_1 = image_mat_sup_1[val_indxs]
image_mat_val_2 = image_mat_sup_2[val_indxs]
cali_mat_sup = cali_mat_base[:]
cali_train_min_max = min_max_norm(cali_mat_sup[train_indxs])
cali_val_min_max = min_max_norm(cali_mat_sup[val_indxs], data_min=var_dict['cali_train_min'], data_max=var_dict['cali_train_max'])
label_mat_sup = label_mat_base[:]
label_train_min_max = min_max_norm(label_mat_sup[train_indxs])
label_val_min_max = min_max_norm(label_mat_sup[val_indxs], data_min=var_dict['label_train_min'], data_max=var_dict['label_train_max'])
vox_file.close()
image_file.close()
cali_file.close()
label_file.close()
# -----------------------------------------------------------------
tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir, histogram_freq=1, batch_size=params['model_batchsize'], write_grads=True, write_images=True)
terminate_if_nan = tf.keras.callbacks.TerminateOnNaN()
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1),
'input_fluoro_1': image_mat_train_1,
'input_fluoro_2': image_mat_train_2,
'input_cali': cali_train_min_max},
y=label_train_min_max,
validation_data=([np.expand_dims(vox_mat_val, axis=-1),
image_mat_val_1,
image_mat_val_2,
cali_val_min_max],
label_val_min_max),
epochs=params['model_epochs'],
batch_size=params['model_batchsize'],
shuffle=True,
verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_no_bn_mae/vox_fluoro_no_bn_mae.py | .py | 64,042 | 1,255 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-20
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are goign to continue normalizing the calibration inputs between -1 and 1, but we will only run the min max on the training data set.
# We likewise are going to normalize the label data set, but we will only run the function over the training data set. Moreover, we scale the label data from between -1 and 1.
# We have also removed all dropout from this model, and we will see if the model can overfit the data. We also are going to remove all batch normalization.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mse' for the loss, and Nadam for the optimizer.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0,
'dense_1_v_units': 75,
'drop_2_v_rate': 0,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0,
'vox_flu_units_3': 15,
'vox_flu_units_4': 10,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0,
'cali_1_units': 20,
'drop_2_cali': 0,
'cali_2_units': 20,
'cali_3_units': 10,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0,
'top_dense_0': 10,
'top_dense_1': 10,
'top_dense_2': 10,
'top_drop_1': 0,
'top_dense_3': 10,
'top_dense_4': 10,
'top_drop_2': 0,
'top_dense_5': 10,
'top_dense_6': 10,
'top_drop_3': 0,
'top_dense_7': 10,
'top_dense_8': 10,
'top_drop_4': 0,
'top_dense_9': 10,
'top_dense_10': 10,
'top_drop_5': 0,
'top_dense_11': 10,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
# bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(v_conv_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
# bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_pool_0)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_2)
# bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
# bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([v_conv_4, v_conv_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
# bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([v_conv_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
# bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([v_conv_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(v_conv_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
# bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
# bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([v_conv_10, v_conv_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
# bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
# bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
# bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([v_conv_13, v_conv_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
# bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
# bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
# bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([v_conv_16, v_conv_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
# bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(v_act_5)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
# bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
# bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
# bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(comb_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
# bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, comb_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(comb_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
# bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, comb_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(comb_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
# bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, comb_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(comb_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
# bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, comb_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(comb_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
# bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, comb_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(comb_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
# bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([comb_11, comb_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(comb_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
# bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([comb_14, comb_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(comb_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
# bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([comb_17, comb_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
# bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(comb_flatten_1)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
# bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([dense_1_comb, dense_2_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_act)
# bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
# bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
# bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
# bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_3)
# bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
# bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
# bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
# bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(cali_2)
# bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([cali_3, vox_flu_4])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
# bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
# bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_1, cali_3])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
# bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([top_dense_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
# bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([top_dense_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(top_dense_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
# bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([top_dense_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(top_dense_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
# bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([top_dense_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_dense_10)
# bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([top_dense_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_train_min = np.min(cali_mat_train, axis=0)
cali_train_max = np.max(cali_mat_train, axis=0)
cali_train_std = np.std(cali_mat_train, axis=0)
cali_train_avg = np.mean(cali_mat_train, axis=0)
var_dict['cali_train_avg'] = cali_train_avg
var_dict['cali_train_std'] = cali_train_std
var_dict['cali_train_min'] = cali_train_min
var_dict['cali_train_max'] = cali_train_max
cali_train_min_max = min_max_norm(cali_mat_train)
cali_val_min_max = min_max_norm(cali_mat_val, data_min=cali_train_min, data_max=cali_train_max)
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_train_avg = np.mean(label_mat_train, axis=0)
label_train_std = np.std(label_mat_train, axis=0)
label_train_min = np.min(label_mat_train, axis=0)
label_train_max = np.max(label_mat_train, axis=0)
label_train_min_max = min_max_norm(label_mat_train, feature_range=(-1, 1))
label_val_min_max = min_max_norm(label_mat_val, feature_range=(-1, 1), data_min=label_train_min, data_max=label_train_max)
var_dict['label_train_avg'] = label_train_avg
var_dict['label_train_std'] = label_train_std
var_dict['label_train_min'] = label_train_min
var_dict['label_train_max'] = label_train_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_train_min_max}, y=label_train_min_max, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_val_min_max], label_val_min_max), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_std/vox_fluoro_std.py | .py | 62,625 | 1,230 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-18
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are going to standardize the calibration inputs with mean 0 and std 1
# We likewise are going to standardize the label dataset based on the training and validation datasets. We are going to standardize for each label over all of the instances, with mean 0 and std 1.
# We are going to also do per image normalization with mean 0 and std 1.
# In this file, we are going to use 'mse' for the loss as opposed to the 'mae' in order to penalize bigger losses.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.4,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.4,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.4,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.4,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.4,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.4,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.4,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0.3,
'dense_1_v_units': 75,
'drop_2_v_rate': 0.3,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.4,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.4,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.4,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.4,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.4,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.4,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.4,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.4,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.4,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.4,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.4,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.4,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.4,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.4,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.4,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0.3,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0.3,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0.3,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0.3,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0.3,
'cali_1_units': 20,
'drop_2_cali': 0.3,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0.2,
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_drop_1': 0.2,
'top_dense_3': 6,
'top_dense_4': 6,
'top_drop_2': 0.2,
'top_dense_5': 6,
'top_dense_6': 6,
'top_drop_3': 0.2,
'top_dense_7': 6,
'top_dense_8': 6,
'top_drop_4': 0.2,
'top_dense_9': 6,
'top_dense_10': 6,
'top_drop_5': 0.2,
'top_dense_11': 6,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 40,
'model_batchsize': 6,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(bn_19)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(bn_20)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(bn_20)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(bn_1)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(bn_2)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(bn_3)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(bn_1)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(bn_2)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(bn_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(bn_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(bn_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(bn_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([bn_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(bn_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([bn_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([bn_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
def feature_scaler(data_set, mean=0, std=1, axis=0):
data_set_0_1 = (data_set - np.mean(data_set, axis=axis)) / (np.std(data_set, axis=axis) / std) + mean
return data_set_0_1
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['std_scale_dset_per_image']
image_init_2 = image_grp_2['std_scale_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration_norm_std.h5py'), 'r')
cali_init = cali_file['std_scale_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_sup = label_init[:]
label_mat_sup = label_mat_sup[list(list(train_indxs) + list(val_indxs))]
label_mat_sup_norm = feature_scaler(label_mat_sup)
label_mat_train = label_mat_sup_norm[:len(train_indxs)]
label_mat_val = label_mat_sup_norm[-len(val_indxs):]
label_t_mean = np.mean(label_mat_sup, axis=0)
label_t_std = np.std(label_mat_sup, axis=0)
label_t_min = np.min(label_mat_sup, axis=0)
label_t_max = np.max(label_mat_sup, axis=0)
var_dict['label_train_val_mean'] = label_t_mean
var_dict['label_train_val_std'] = label_t_std
var_dict['label_train_val_min'] = label_t_min
var_dict['label_train_val_max'] = label_t_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_two_lin_no_reg/vox_fluoro_two_lin_no_reg.py | .py | 22,143 | 458 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization to 0 and 0 respectively
# This experiment is also going to evaluate the var loss as opposed to the std one
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_res_batch_10/vox_fluoro_res_batch_10.py | .py | 57,193 | 1,112 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0.3,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0.3,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0.3,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0.3,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0.3,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0.3,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0.3,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0.3,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_units_1': 50,
'vox_flu_units_2': 30,
'vox_flu_units_3': 15,
'vox_flu_units_4': 6,
# ---
# Cali Units
'cali_0_units': 20,
'cali_1_units': 20,
'cali_2_units': 20,
'cali_3_units': 6,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_dense_0': 6,
'top_dense_1': 6,
'top_dense_2': 6,
'top_dense_3': 6,
'top_dense_4': 6,
'top_dense_5': 6,
'top_dense_6': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 100,
'model_batchsize': 10,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, bn_12])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_13)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, bn_14])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(bn_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, bn_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(bn_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, bn_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(bn_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, bn_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(bn_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, bn_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(bn_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, bn_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(bn_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([bn_11, bn_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(bn_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([bn_14, bn_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(bn_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([bn_17, bn_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_19)
bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_20)
bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([bn_21_f, bn_21_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_3)
bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([bn_4_c, bn_5_comb])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_level_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([bn_1, bn_4_c])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([bn_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_1)
bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([bn_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_6)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_deeper/vox_fluoro_deeper.py | .py | 30,584 | 575 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating how a deeper conv net, which paradoxically has fewer parameters would fair
# No regularization
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.3,
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 3,
'v_conv_2_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'valid',
'v_conv_3_filters': 40,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 2,
'v_conv_3_strides_1': 2,
'v_conv_3_strides_2': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 50,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_5_filters': 50,
'v_conv_5_kernel': 2,
'v_conv_5_strides_0': 2,
'v_conv_5_strides_1': 2,
'v_conv_5_strides_2': 2,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 50,
'v_conv_6_kernel': 2,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 50,
'v_conv_7_kernel': 2,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 1,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
'dense_1_v_units': 300,
'dense_2_v_units': 250,
'dense_3_v_units': 250,
'dense_4_v_units': 200,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 2,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_4_filters': 60,
'conv_4_kernel': 3,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'dense_1_f_units': 120,
'dense_2_f_units': 120,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 20,
'dense_3_cali_units': 20,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_pool_1)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_4)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_6)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_7)
v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_3_v)
# -----------------------------------------------------------------
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_3_1)
conv_4_1 = tf.keras.layers.SeparableConv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_1)
conv_5_1 = tf.keras.layers.SeparableConv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_5_1)
conv_6_1 = tf.keras.layers.SeparableConv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(conv_6_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_3_2)
conv_4_2 = tf.keras.layers.SeparableConv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
conv_5_2 = tf.keras.layers.SeparableConv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_5_2)
conv_6_2 = tf.keras.layers.SeparableConv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(conv_6_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_4_v, dense_3_f_1, dense_3_f_2, dense_3_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_drop_2 = tf.keras.layers.Dropout(rate=params['drop_2_comb_rate'])(dense_3_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(dense_drop_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
# -----------------------------------------------------------------
# v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
# v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
# v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
# v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
# # v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
# v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_spat_3)
# v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_4)
# v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_5)
# -----------------------------------------------------------------
# # Second run of 2D Conv Layers for Image 1
# conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
# spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
# pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# # Third run of 2D Conv Layers for Image 1
# conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
# pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
# # Fourth run of 2D Conv Layers for Image 1
# conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_3_1)
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_no_reg_var_loss/vox_fluoro_no_reg_var_loss.py | .py | 22,141 | 458 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization to 0 and 0 respectively
# This experiment is also going to evaluate the var loss as opposed to the std one
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_two_lin_l1_0-005_l2_0-005/vox_fluoro_two_lin_l1_0-005_l2_0-005.py | .py | 22,253 | 458 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization to 0 and 0 respectively
# This experiment is also going to evaluate the var loss as opposed to the std one
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': tf.keras.regularizers.l1_l2(l1=0.005, l2=0.005),
'dense_regularizer_1': tf.keras.regularizers.l1_l2(l1=0.005, l2=0.005),
'dense_regularizer_2': tf.keras.regularizers.l1_l2(l1=0.005, l2=0.005),
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=None, kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_img_no_l1_l2_loss/vox_fluoro_img_no_l1_l2_loss.py | .py | 22,032 | 457 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating changing the l1 and l2 regularization
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.5,
'v_pool_1_size': 3,
'v_pool_1_pad': 'valid',
'v_conv_2_filters': 40,
'v_conv_2_kernel': 5,
'v_conv_2_strides': 2,
'v_conv_2_pad': 'same',
'v_spatial_drop_rate_2': 0.5,
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_3_filters': 80,
'v_conv_3_kernel': 3,
'v_conv_3_strides': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_3': 0.2,
'v_pool_3_size': 2,
'v_pool_3_pad': 'same',
'dense_1_v_units': 1000,
'dense_2_v_units': 500,
'dense_3_v_units': 250,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'spatial_drop_rate_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_filters': 80,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'pool_3_size': 2,
'pool_3_pad': 'same',
'dense_1_f_units': 60,
'dense_2_f_units': 60,
'dense_3_f_units': 60,
# Calibration Dense Layers
'dense_1_cali_units': 10,
'dense_2_cali_units': 10,
# Top Level Dense Units
'dense_1_co_units': 80,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 20,
'dense_3_co_units': 20,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_std,
'model_metric': cust_mean_squared_error_std
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# First run of 3D Conv Layers
# v_bn_1 = tf.keras.layers.BatchNormalization(input_shape=vox_input_shape)(input_vox)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=params['v_conv_1_strides'], padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_spat_1)
# Second run of 3D Conv Layers
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=params['v_conv_2_strides'], padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# Third run of 3D Conv Layers
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=params['v_conv_3_strides'], padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
# Dense Layers After Flattended 3D Conv
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 1
# per_image_stand_1 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_1)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_1_1 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_1)
# Second run of 2D Conv Layers for Image 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# Third run of 2D Conv Layers for Image 1
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
# Dense Layers After Flattended 2D Conv
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_1)
# -----------------------------------------------------------------
# First run of 2D Conv Layers for Image 2
# per_image_stand_2 = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), input_fluoro_2)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_1_2 = tf.keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
# Second run of 2D Conv Layers for Image 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
# Third run of 2D Conv Layers for Image 1
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
# Dense Layers After Flattended 2D Conv
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_f_2)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_cali)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
# -----------------------------------------------------------------
# Dense Layers at Top of Model
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_3_comb)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, 'vox_fluoro_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
| Python |
3D | john-drago/fluoro | code/vox_fluoro/history/vox_fluoro_deeper_bn_nadam/vox_fluoro_deeper_bn_nadam.py | .py | 32,330 | 606 | import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
# This experiment is evaluating how a deeper conv net, which paradoxically has fewer parameters would fair
# No regularization
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_mat = vox_init[first_indx:last_indx]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[first_indx:last_indx]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_mat = label_init[first_indx:last_indx]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_mat = cali_init[first_indx:last_indx]
cali_file.close()
vox_train_cum, vox_test, image_train_cum, image_test, cali_train_cum, cali_test, label_train_cum, label_test = train_test_split(vox_mat, image_mat, cali_mat, label_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:', image_mat.shape)
# print('Label mat size:', label_mat.shape)
# print('Cali mat size:', cali_mat.shape)
# print('Image cum size:', image_train_cum.shape)
# print('Label cum size:', label_train_cum.shape)
# print('Cali cum size:', cali_train_cum.shape)
# print('Image test size:', image_test.shape)
# print('Label test size:', label_test.shape)
# print('Cali test size:', cali_test.shape)
vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val = train_test_split(vox_train_cum, image_train_cum, cali_train_cum, label_train_cum, shuffle=True, test_size=0.2, random_state=42)
# print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
# print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return vox_train_sub, vox_val, image_train_sub, image_val, cali_train_sub, cali_val, label_train_sub, label_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# def root_mean_squared_error(y_true, y_pred):
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true)))
# def root_mean_squared_error(y_true, y_pred):
# base_dir = os.path.expanduser('~/fluoro/data/compilation')
# stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
# var_v = var_dset[:]
# stats_file.close()
# return tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v))
def cust_mean_squared_error_std(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
std_dset = stats_file['std']
# var_dset = stats_file['var']
# mean_v = mean_dset[:]
std_v = std_dset[:]
# var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square((y_pred - y_true) / std_v))
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
params = {
# 3D CONV
'v_conv_1_filters': 30,
'v_conv_1_kernel': 11,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 2,
'v_conv_1_pad': 'same',
'v_spatial_drop_rate_1': 0.3,
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 3,
'v_conv_2_pad': 'same',
'v_pool_1_size': 2,
'v_pool_1_pad': 'valid',
'v_conv_3_filters': 40,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 2,
'v_conv_3_strides_1': 2,
'v_conv_3_strides_2': 2,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 50,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 2,
'v_conv_4_strides_1': 2,
'v_conv_4_strides_2': 2,
'v_conv_4_pad': 'same',
'v_pool_2_size': 2,
'v_pool_2_pad': 'same',
'v_conv_5_filters': 50,
'v_conv_5_kernel': 2,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 50,
'v_conv_6_kernel': 2,
'v_conv_6_strides_0': 2,
'v_conv_6_strides_1': 2,
'v_conv_6_strides_2': 2,
'v_conv_6_pad': 'same',
'v_conv_7_filters': 50,
'v_conv_7_kernel': 2,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 40,
'v_conv_8_kernel': 1,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
'dense_1_v_units': 350,
'dense_2_v_units': 250,
'dense_3_v_units': 250,
'dense_4_v_units': 200,
# 2D CONV
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_2_filters': 40,
'conv_2_kernel': 3,
'conv_2_strides': 2,
'conv_2_pad': 'same',
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_3_filters': 50,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_4_filters': 60,
'conv_4_kernel': 3,
'conv_4_strides': 2,
'conv_4_pad': 'same',
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_5_filters': 60,
'conv_5_kernel': 3,
'conv_5_strides': 2,
'conv_5_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'dense_1_f_units': 120,
'dense_2_f_units': 120,
'dense_3_f_units': 80,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 20,
'dense_3_cali_units': 20,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.002,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_1 = tf.keras.layers.BatchNormalization()(v_conv_1)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_1'])(bn_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_1_size'], padding=params['v_pool_1_pad'], data_format=channel_order)(v_conv_2)
bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_conv_4)
bn_4 = tf.keras.layers.BatchNormalization()(v_pool_2)
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_4)
v_flatten_1 = tf.keras.layers.Flatten()(v_conv_8)
bn_8 = tf.keras.layers.BatchNormalization()(v_flatten_1)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
bn_9 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_9)
bn_10 = tf.keras.layers.BatchNormalization()(dense_2_v)
dense_3_v = tf.keras.layers.Dense(units=params['dense_3_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_10)
bn_11 = tf.keras.layers.BatchNormalization()(dense_3_v)
dense_4_v = tf.keras.layers.Dense(units=params['dense_4_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_11)
# -----------------------------------------------------------------
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_1)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_1)
bn_4 = tf.keras.layers.BatchNormalization()(pool_2_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(conv_6_1)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_1)
dense_2_f_1 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_1_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_1)
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(conv_2_2)
bn_2 = tf.keras.layers.BatchNormalization()(pool_1_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_3_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_3)
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(conv_4_2)
bn_4 = tf.keras.layers.BatchNormalization()(pool_2_2)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_5_filters'], kernel_size=params['conv_5_kernel'], strides=params['conv_5_strides'], padding=params['conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_4)
bn_5 = tf.keras.layers.BatchNormalization()(conv_5_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_5)
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(conv_6_2)
# Dense Layers After Flattended 2D Conv
bn_6 = tf.keras.layers.BatchNormalization()(flatten_1_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_6)
bn_7 = tf.keras.layers.BatchNormalization()(dense_1_f_2)
dense_2_f_2 = tf.keras.layers.Dense(units=params['dense_2_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_7)
bn_8 = tf.keras.layers.BatchNormalization()(dense_2_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=params['dense_3_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_8)
# -----------------------------------------------------------------
# Dense Layers Over Calibration Data
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
dense_3_cali = tf.keras.layers.Dense(units=params['dense_3_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_2)
# -----------------------------------------------------------------
# Combine Vox Data, Fluoro Data, and Cali Data
dense_0_comb = tf.keras.layers.concatenate([dense_4_v, dense_3_f_1, dense_3_f_2, dense_3_cali])
# -----------------------------------------------------------------# Dense Layers at Top of Model
bn_1 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_1_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_1_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=params['drop_1_comb_rate'])(bn_2)
dense_2_comb = tf.keras.layers.Dense(units=params['dense_2_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(dense_drop_1)
bn_3 = tf.keras.layers.BatchNormalization()(dense_2_comb)
dense_3_comb = tf.keras.layers.Dense(units=params['dense_3_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(bn_3)
bn_4 = tf.keras.layers.BatchNormalization()(dense_3_comb)
dense_drop_2 = tf.keras.layers.Dropout(rate=params['drop_2_comb_rate'])(bn_4)
dense_4_comb = tf.keras.layers.Dense(units=params['dense_4_co_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(dense_drop_2)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(dense_4_comb)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
hist_file.close()
# -----------------------------------------------------------------
# v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
# v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_bn_2)
# v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_2)
# v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_2_size'], padding=params['v_pool_2_pad'], data_format=channel_order)(v_spat_2)
# v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_pool_2)
# v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_3)
# # v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_3_size'], padding=params['v_pool_3_pad'], data_format=channel_order)(v_spat_3)
# v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_spat_3)
# v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_4)
# v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(v_conv_5)
# -----------------------------------------------------------------
# # Second run of 2D Conv Layers for Image 1
# conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_1_1)
# spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(conv_2_1)
# pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_1)
# # Third run of 2D Conv Layers for Image 1
# conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_2_1)
# pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(conv_3_1)
# # Fourth run of 2D Conv Layers for Image 1
# conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_3_1)
| Python |
3D | john-drago/fluoro | code/datacomp/norm_and_std_dsets.py | .py | 9,248 | 238 | '''
This module will generate the necessary data points for all of the datasets (for training fluoroscopic neural net) in order to perform normalization and standardization.
'''
import numpy as np
import h5py
import os
import time
# -----------------------------------------------------------------
load_dir = '/Volumes/Seagate/fluoro'
save_dir = '/Volumes/Seagate/fluoro'
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
def min_max_norm_per_image(data_set, feature_range=(-1, 1), axis=(1, 2)):
data_min = np.min(data_set, axis=axis)
data_max = np.max(data_set, axis=axis)
while (len(data_min.shape) < len(data_set.shape)) and (len(data_max.shape) < len(data_set.shape)):
# print('Min shape:', data_min.shape)
# print('Max shape:', data_max.shape)
data_min = np.expand_dims(data_min, axis=1)
data_max = np.expand_dims(data_max, axis=1)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
def feature_scaler(data_set, mean=0, std=1, axis=0):
data_set_0_1 = (data_set - np.mean(data_set, axis=axis)) / (np.std(data_set, axis=axis) / std) + mean
return data_set_0_1
def feature_scaler_per_image(data_set, mean=0, std=1, axis=(1, 2)):
data_mean = np.mean(data_set, axis=axis)
data_std = np.std(data_set, axis=axis)
while (len(data_mean.shape) < len(data_set.shape)) and (len(data_std.shape) < len(data_set.shape)):
# print('Min shape:', data_mean.shape)
# print('Max shape:', data_std.shape)
data_mean = np.expand_dims(data_mean, axis=1)
data_std = np.expand_dims(data_std, axis=1)
data_set_0_1 = (data_set - data_mean) / (data_std / std) + mean
return data_set_0_1
def inv_feature_scaler(data_set, data_mean, data_std, axis=0):
inv_data = (data_set - np.mean(data_set, axis=axis)) * (data_std / np.std(data_set, axis=axis)) + data_mean
return inv_data
# -----------------------------------------------------------------
# labels_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
# label_norm_std_file = h5py.File(os.path.join(save_dir, 'labels_norm_std.h5py'), 'w')
# labels_mat = labels_file['labels_dset']
# labels_dset = labels_mat[:]
# labels_std = np.std(labels_dset, axis=0)
# labels_mean = np.mean(labels_dset, axis=0)
# labels_var = np.var(labels_dset, axis=0)
# labels_min = np.min(labels_dset, axis=0)
# labels_max = np.max(labels_dset, axis=0)
# labels_min_max_norm = min_max_norm(labels_dset)
# labels_std_scale = feature_scaler(labels_dset)
# std_dset = label_norm_std_file.create_dataset('std', data=labels_std)
# mean_dset = label_norm_std_file.create_dataset('mean', data=labels_mean)
# var_dset = label_norm_std_file.create_dataset('var', data=labels_var)
# min_dset = label_norm_std_file.create_dataset('min', data=labels_min)
# max_dset = label_norm_std_file.create_dataset('max', data=labels_max)
# min_max_norm_dset = label_norm_std_file.create_dataset('min_max_dset', data=labels_min_max_norm)
# std_scale_dset = label_norm_std_file.create_dataset('std_scale_dset', data=labels_std_scale)
# label_norm_std_file.close()
# labels_file.close()
# -----------------------------------------------------------------
# cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
# cali_norm_std_file = h5py.File(os.path.join(save_dir, 'calibration_norm_std.h5py'), 'w')
# cali_mat = cali_file['cali_len3_rot']
# cali_dset = cali_mat[:]
# cali_std = np.std(cali_dset, axis=0)
# cali_mean = np.mean(cali_dset, axis=0)
# cali_var = np.var(cali_dset, axis=0)
# cali_min = np.min(cali_dset, axis=0)
# cali_max = np.max(cali_dset, axis=0)
# cali_min_max_norm = min_max_norm(cali_dset)
# cali_std_scale = feature_scaler(cali_dset)
# std_dset = cali_norm_std_file.create_dataset('std', data=cali_std)
# mean_dset = cali_norm_std_file.create_dataset('mean', data=cali_mean)
# var_dset = cali_norm_std_file.create_dataset('var', data=cali_var)
# min_dset = cali_norm_std_file.create_dataset('min', data=cali_min)
# max_dset = cali_norm_std_file.create_dataset('max', data=cali_max)
# min_max_norm_dset = cali_norm_std_file.create_dataset('min_max_dset', data=cali_min_max_norm)
# std_scale_dset = cali_norm_std_file.create_dataset('std_scale_dset', data=cali_std_scale)
# cali_norm_std_file.close()
# cali_file.close()
# -----------------------------------------------------------------
# image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
# image_norm_std_file = h5py.File(os.path.join(save_dir, 'images_norm_std.h5py'), 'w')
# image_mat = image_file['image_dset']
# image_dset_1 = np.expand_dims(image_mat[:, 0, :, :], axis=-1)
# image_std_1 = np.std(image_dset_1, axis=(1, 2))
# image_mean_1 = np.mean(image_dset_1, axis=(1, 2))
# image_var_1 = np.var(image_dset_1, axis=(1, 2))
# image_min_1 = np.min(image_dset_1, axis=(1, 2))
# image_max_1 = np.max(image_dset_1, axis=(1, 2))
# image_min_max_norm_1 = min_max_norm_per_image(image_dset_1)
# image_std_scale_1 = feature_scaler_per_image(image_dset_1)
# fluoro_1 = image_norm_std_file.create_group('image_1')
# std_dset = fluoro_1.create_dataset('std_per_image', data=image_std_1)
# mean_dset = fluoro_1.create_dataset('mean_per_image', data=image_mean_1)
# var_dset = fluoro_1.create_dataset('var_per_image', data=image_var_1)
# min_dset = fluoro_1.create_dataset('min_per_image', data=image_min_1)
# max_dset = fluoro_1.create_dataset('max_per_image', data=image_max_1)
# min_max_norm_dset = fluoro_1.create_dataset('min_max_dset_per_image', data=image_min_max_norm_1)
# std_scale_dset = fluoro_1.create_dataset('std_scale_dset_per_image', data=image_std_scale_1)
# image_dset_2 = np.expand_dims(image_mat[:, 1, :, :], axis=-1)
# image_std_2 = np.std(image_dset_2, axis=(1, 2))
# image_mean_2 = np.mean(image_dset_2, axis=(1, 2))
# image_var_2 = np.var(image_dset_2, axis=(1, 2))
# image_min_2 = np.min(image_dset_2, axis=(1, 2))
# image_max_2 = np.max(image_dset_2, axis=(1, 2))
# image_min_max_norm_2 = min_max_norm_per_image(image_dset_2)
# image_std_scale_2 = feature_scaler_per_image(image_dset_2)
# fluoro_2 = image_norm_std_file.create_group('image_2')
# std_dset = fluoro_2.create_dataset('std_per_image', data=image_std_2)
# mean_dset = fluoro_2.create_dataset('mean_per_image', data=image_mean_2)
# var_dset = fluoro_2.create_dataset('var_per_image', data=image_var_2)
# min_dset = fluoro_2.create_dataset('min_per_image', data=image_min_2)
# max_dset = fluoro_2.create_dataset('max_per_image', data=image_max_2)
# min_max_norm_dset = fluoro_2.create_dataset('min_max_dset_per_image', data=image_min_max_norm_2)
# std_scale_dset = fluoro_2.create_dataset('std_scale_dset_per_image', data=image_std_scale_2)
# image_norm_std_file.close()
# image_file.close()
# -----------------------------------------------------------------
# vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
# vox_norm_std_file = h5py.File(os.path.join(save_dir, 'voxels_norm_std.h5py'), 'w')
# vox_mat = vox_file['vox_dset']
# vox_dset = np.expand_dims(vox_mat, axis=-1)
# vox_std = np.std(vox_dset, axis=(1, 2, 3))
# vox_mean = np.mean(vox_dset, axis=(1, 2, 3))
# vox_var = np.var(vox_dset, axis=(1, 2, 3))
# vox_min = np.min(vox_dset, axis=(1, 2, 3))
# vox_max = np.max(vox_dset, axis=(1, 2, 3))
# vox_min_max_norm = min_max_norm_per_image(vox_dset, axis=(1, 2, 3))
# vox_std_scale = feature_scaler_per_image(vox_dset, axis=(1, 2, 3))
# std_dset = vox_norm_std_file.create_dataset('std', data=vox_std)
# mean_dset = vox_norm_std_file.create_dataset('mean', data=vox_mean)
# var_dset = vox_norm_std_file.create_dataset('var', data=vox_var)
# min_dset = vox_norm_std_file.create_dataset('min', data=vox_min)
# max_dset = vox_norm_std_file.create_dataset('max', data=vox_max)
# min_max_norm_dset = vox_norm_std_file.create_dataset('min_max_dset', data=vox_min_max_norm, compression='lzf')
# std_scale_dset = vox_norm_std_file.create_dataset('std_scale_dset', data=vox_std_scale, compression='lzf')
# vox_norm_std_file.close()
# vox_file.close()
| Python |
3D | john-drago/fluoro | code/datacomp/save_to_mat.py | .py | 5,151 | 168 | '''
This module will devise some functions that can create a .mat file in the expected format of the .mat files that will determine the position and rotation of the bone for each frame.
'''
import numpy as np
import scipy.io as sio
import os
from coord_change import Angles2Basis
def generate_save_mat(dict_data, filename):
'''
We are expecting an input of a dictionary with
'Tibia_R'
'Tibia_V'
'Femur_R'
'Femur_V'
Femur is cup
Tibia is stem
'''
dt = [('R', 'O'), ('V', 'O')]
Cup_RV = np.zeros((1,), dtype=dt)
Cup_RV[0]['R'] = dict_data['Femur_R']
Cup_RV[0]['V'] = dict_data['Femur_V']
Stem_RV = np.zeros((1,), dtype=dt)
Stem_RV[0]['R'] = dict_data['Tibia_R']
Stem_RV[0]['V'] = dict_data['Tibia_V']
sio.savemat(filename, {'Cup_RV': Cup_RV, 'Stem_RV': Stem_RV})
return None
def extract_base_filename(path_to_frame):
list_of_frames = sorted(os.listdir(path_to_frame))
while True:
frame_guess = np.random.choice(list_of_frames)
if (frame_guess != '.DS_Store') and (frame_guess != 'cali'):
list_of_save_files = os.listdir(os.path.join(path_to_frame, frame_guess))
for save_file in list_of_save_files:
if save_file[-11:] == 'results.mat':
return save_file[:-16]
def create_output_vars_from_predicted(output_mat):
rots = output_mat[:, 0:3]
trans = output_mat[:, 3:6]
rot_output = np.zeros((rots.shape[0], 3, 3))
trans_ouput = trans
ticker = 0
for instance in rots:
rot_output[ticker, :, :] = Angles2Basis(instance)
ticker += 1
return rot_output, trans_ouput
def save_new_mats_from_training(model_predict_data, list_of_frames_matched, prediction_save_dir_path=os.path.expanduser('~/fluoro/data/prediction')):
'''
Assumes we made an evaluation matrix from a dataset, where we know what each entry in the matrix corresponds to. There should be an accompanying list of path to frames, where each entry corresponds to the instance evaluated along the first dimension by the model.
This function will take a matrix with the predicted values along with the accompanying list_of_frames_matched. It will then generate corresponding mat files for each frame predicted with both bones loaded into the mat file.
'''
if len(model_predict_data) != len(list_of_frames_matched * 2):
raise ValueError('The length of model_predict_data and list_of_frames_matched need to be equivalent.')
ticker = 0
for frame_path in list_of_frames_matched:
save_sub_path = ''
for dir1 in frame_path.split(os.sep)[-4:-1]:
save_sub_path = os.path.join(save_sub_path, dir1)
frame = frame_path.split(os.sep)[-1]
save_path = os.path.join(prediction_save_dir_path, save_sub_path)
os.makedirs(save_path, exist_ok=True)
rot_output, trans_output = create_output_vars_from_predicted(model_predict_data[2 * ticker: 2 * ticker + 2])
output_dict = {}
output_dict['Femur_R'] = rot_output[0]
output_dict['Femur_V'] = trans_output[0]
output_dict['Tibia_R'] = rot_output[1]
output_dict['Tibia_V'] = trans_output[1]
base_file_name = extract_base_filename(os.path.abspath(frame_path[:-4]))
mat_filename = base_file_name + frame + '_results.mat'
print(os.path.join(save_path, mat_filename))
generate_save_mat(dict_data=output_dict, filename=os.path.join(save_path, mat_filename))
ticker += 1
return None
if __name__ == '__main__':
import tensorflow as tf
import pickle
# -----------------------------------------------------------------
# This code will test whether or not the above code can create new .mat files from a simulated data set containing simulated predictions of rotation and translation
# dir_file = open(os.path.join(os.getcwd(), 'vox_fluoro_hist_objects.pkl'), 'rb')
# dict_of_paths = pickle.load(dir_file)
# dict_of_frames = dict_of_paths['dict_of_frames']
# list_of_path_to_frames = dict_of_paths['list_of_path_to_frames']
# random_activity_and_patient = np.random.choice(list_of_path_to_frames)
# int_list_of_test_frames = dict_of_frames[random_activity_and_patient]
# list_of_predict_frames = []
# for frame in int_list_of_test_frames:
# list_of_predict_frames.append(os.path.abspath(os.path.join(random_activity_and_patient, frame)))
# list_of_predict_frames.sort()
# test_dataset = np.random.rand(2 * len(list_of_predict_frames), 6)
# save_new_mats_from_training(test_dataset, list_of_predict_frames)
# dir_file.close()
# -----------------------------------------------------------------
# tf.keras.models.load_model()
# -----------------------------------------------------------------
# test_dict = {}
# test_dict['Femur_R'] = np.random.rand(3, 3)
# test_dict['Femur_V'] = np.random.rand(3)
# test_dict['Tibia_R'] = np.random.rand(3, 3)
# test_dict['Tibia_V'] = np.random.rand(3)
# save_path = os.path.join(os.getcwd(), 'test_1.mat')
| Python |
3D | john-drago/fluoro | code/datacomp/data_augmentation_test.py | .py | 53,534 | 1,376 | '''
This function will perform data augmentation on our current data set. Basically, we will do small translations and rotations on our voxel dataset to increase the number of instances we are currently training with.
'''
import os
import scipy.io as sio
import skimage
import numpy as np
import trimesh
import pandas as pd
import h5py
# ---------------------------------------------------------------
# Before we get started, need to first define some top-level variables, which future functions will make reference to.
top_level_dir = os.path.expanduser('~/fluoro/data')
save_dir = os.path.abspath('/Volumes/Seagate/fluoro')
# ---------------------------------------------------------------
# These are some functions, which will be useful for changing the data for the augmentation.
def Global2Local_Coord(rot_mat, trans_vector, points_in_global):
'''
func Global2Local_Coord(rot_mat, trans_vector, points_in_global)
- Takes "rotation matrix", whereby the columns form an orthonormal basis, describing the axes of the new coordinate system in terms of the global coordinate system: Should be of form 3x3. Matrix should be square and invertible.
[ e_1 e_2 e_3 ]
- Takes translation vector of size 3, which describes translation from global origin to new local origin (global origin ----> local origin).
- Takes points defined in the global coordinate frame.
- Returns positions (which were originally defined in the global coordinate frame) in new local coordinate frame.
'''
if rot_mat.shape[0] != rot_mat.shape[1]:
raise ValueError('Rotation Matrix should be square')
# elif trans_vector.shape != (3,) and trans_vector.shape != (1, 3):
# raise ValueError('Translation Matrix should be an array of size 3 or 1x3 matrix')
translated_points = points_in_global - trans_vector
points_in_local = np.transpose(np.matmul(np.linalg.inv(rot_mat), np.transpose(translated_points)))
return points_in_local
def Local2Global_Coord(rot_mat, trans_vector, points_in_local):
'''
function Local2Global_Coord(rot_mat, trans_vector, points_in_local)
- Takes "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. The matrix should be 3x3 and be invertible.
[ e_1 e_2 e_3 ]
- Takes translation vector of size 3, which describes translation from global origin to the new local origin (global origin ----> local origin).
- Takes points defined in the local coordinate frame.
- Returns positions (which were originally defined in the local coordinate frame) in the global coordinate frame.
'''
if rot_mat.shape[0] != rot_mat.shape[1]:
raise ValueError('Rotation Matrix should be square')
elif trans_vector.shape != (3,) and trans_vector.shape != (1, 3):
raise ValueError('Translation Matrix should be an array of size 3 or 1x3 matrix')
# print(rot_mat.shape)
# print(trans_vector.shape)
rotated_points = np.transpose(np.matmul(rot_mat, np.transpose(points_in_local)))
points_in_global = rotated_points + trans_vector
return points_in_global
def Basis2Angles(rot_mat):
'''
function Basis2Angles(rot_mat)
This function will take a "rotation matrix", whereby the columns form an orthonormal basis. The "rotation matrix" should describe the axes of the new coordinate system in terms of the global coordinate system. Matrix should be 3x3 and invertible.
[ e_1 e_2 e_3 ]
We are making the assumption that this rotation matrix is equivalent to three basis transformation in the follow order:
R_rot = R_z * R_y * R_x (order matters)
Returns a vector of size 3, which containes the following angles in order:
- theta, as part of rotation matrix:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
- phi
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 1 0 ]
[ -sin(phi) 0 cos(phi) ]
- psi
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(psi) cos(psi) 0 ]
[ 0 0 1 ]
'''
phi = np.arcsin(-rot_mat[2, 0])
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
if rot_mat[0, 0] / np.cos(phi) < 0:
psi = np.pi - psi
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
if rot_mat[2, 2] / np.cos(phi) < 0:
theta = np.pi - theta
rot_mat_guess = Angles2Basis([theta, phi, psi])
error = rot_mat_guess - rot_mat
epsilon = 0.000009
error_binary = (error < epsilon)
if not error_binary.all():
phi = np.pi - phi
psi = np.arcsin((rot_mat[1, 0]) / (np.cos(phi)))
if rot_mat[0, 0] / np.cos(phi) < 0:
psi = np.pi - psi
theta = np.arcsin((rot_mat[2, 1]) / (np.cos(phi)))
# if rot_mat[2, 2] / np.cos(phi) < 0:
# theta = np.pi - theta
rot_mat_guess = Angles2Basis(np.array(theta, phi, psi))
error = rot_mat_guess - rot_mat
epsilon = 0.000009
error_binary = (error < epsilon)
assert error_binary.all()
return np.array([theta, phi, psi])
def u_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.cos(angle_array[2])
def u_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.sin(angle_array[2])
def u_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return -np.sin(angle_array[1])
def v_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[2]) * np.sin(angle_array[0]) * np.sin(angle_array[1]) - np.cos(angle_array[0]) * np.sin(angle_array[2])
def v_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[2]) + np.sin(angle_array[0]) * np.sin(angle_array[1]) * np.sin(angle_array[2])
def v_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[1]) * np.sin(angle_array[0])
def w_x(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[2]) * np.sin(angle_array[1]) + np.sin(angle_array[0]) * np.sin(angle_array[2])
def w_y(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.sin(angle_array[1]) * np.sin(angle_array[2]) - np.cos(angle_array[2]) * np.sin(angle_array[0])
def w_z(angle_array):
'''
Computes corresponding part of rotation matrix, seen below:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
With the assumption that R_rot = R_z * R_y * R_x (order matters)
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
'''
return np.cos(angle_array[0]) * np.cos(angle_array[1])
def Angles2Basis(rot_ang_array):
'''
function Angles2Basis([theta,phi,psi])
With these angles, this function will compute the orthonormal basis for the coordinate system rotation according to to the following transformations:
[ 1 0 0 ]
R_x = [ 0 cos(theta) -sin(theta) ]
[ 0 sin(theta) cos(theta) ]
[ cos(phi) 0 sin(phi) ]
R_y = [ 0 cos(theta) 0 ]
[ -sin(phi) 0 cos(phi) ]
[ cos(psi) -sin(psi) 0 ]
R_z = [ sin(phi) cos(psi) 0 ]
[ 0 0 1 ]
We will find the rotation matrix after applying the following rotations, in order:
R_rot = R_z * R_y * R_x (order matters)
We will produce a rotation matrix of the form:
[ U_x V_x W_x ]
R_rot = [ U_y V_y W_y ]
[ U_z V_z W_z ]
'''
theta = rot_ang_array[0]
phi = rot_ang_array[1]
psi = rot_ang_array[2]
u_x = np.cos(phi) * np.cos(psi)
u_y = np.cos(phi) * np.sin(psi)
u_z = -np.sin(phi)
v_x = np.cos(psi) * np.sin(theta) * np.sin(phi) - np.cos(theta) * np.sin(psi)
v_y = np.cos(theta) * np.cos(psi) + np.sin(theta) * np.sin(phi) * np.sin(psi)
v_z = np.cos(phi) * np.sin(theta)
w_x = np.cos(theta) * np.cos(psi) * np.sin(phi) + np.sin(theta) * np.sin(psi)
w_y = np.cos(theta) * np.sin(phi) * np.sin(psi) - np.cos(psi) * np.sin(theta)
w_z = np.cos(theta) * np.cos(phi)
rot_mat = np.array([
[u_x, v_x, w_x],
[u_y, v_y, w_y],
[u_z, v_z, w_z]
])
return np.squeeze(rot_mat)
# ---------------------------------------------------------------
# We are first going to copy some old useful functions into this file to make them available for later calls. These functions will primarily focus on identifying where the data is located.
def generate_dict_of_acts_with_patients():
'''
This function will generate a dictionary of the different activities and all of the patients who did that activity.
The final format will be a dictionary where keys are the activities and the values are the different patients who did the activity.
Assuming that we are in path: */fluoro/data
'''
activity_list = []
pt_dict = {}
for direct1 in os.listdir(os.path.abspath(top_level_dir)):
if (direct1 != '.DS_Store') and (direct1 != 'compilation') and (direct1 != 'prediction'):
activity_list.append(direct1)
# print(direct1)
list_of_pts_for_act = []
for direct2 in os.listdir(os.path.join(top_level_dir, direct1)):
if direct2 != '.DS_Store':
list_of_pts_for_act.append(direct2)
# print(list_of_pts_for_act)
pt_dict[direct1] = list_of_pts_for_act
# print(pt_dict)
return pt_dict
def generate_dict_path_to_frames(dict_of_act_pts):
'''
This function will take a dictionary, where keys are activities and the values are the list of different patients who completed that given activity
This function will generate a dictionary with keys of path to frames and values of the list of frames at the corresponding path
Keys: path to directory where frames are
Values: list of frames
'''
path_to_frames_dict = {}
for act in dict_of_act_pts.keys():
pt_list = dict_of_act_pts[act]
for pt in pt_list:
dir_to_pt = os.path.join(top_level_dir, act, pt)
for direct3 in os.listdir(dir_to_pt):
list_of_frames = []
if direct3 != '.DS_Store' and direct3 != 'stl':
dir_to_frames = os.path.join(top_level_dir, act, pt, direct3)
# print(dir_to_frames)
for frame in os.listdir(dir_to_frames):
if (frame != '.DS_Store') and (frame != 'cali'):
list_of_frames.append(frame)
path_to_frames_dict[dir_to_frames] = list_of_frames
return path_to_frames_dict
def generate_comprehensive_list_of_frames(dict_path_to_frames):
'''
This function will take in the dictionary with the keys as paths to locations of frames and the values as the strings of the frames for a given path.
It will return a comprehensive list of all of the paths to the various frames that are stored under "~/fluoro/data".
'''
list_of_path_to_frames = []
for frme_path in dict_path_to_frames.keys():
for frame in dict_path_to_frames[frme_path]:
list_of_path_to_frames.append(os.path.join(frme_path, frame))
return sorted(list_of_path_to_frames)
# ---------------------------------------------------------------
def extract_calibration_data(path_to_cali):
'''
This function will return the R12 and V12 variables from the reg2fl***.mat file in the "data/activity/patient/laterality/cali" folder.
extract_calibration_data(path_to_cali)
input: expects a path to the directory where the cali frame is located
output: will output an array of the form: [ R12, V12 ]
'''
cali_str = 'cali'
for fle in os.listdir(os.path.join(path_to_cali, cali_str)):
if fle[0:6].lower() == 'reg2fl':
fluoro_file = sio.loadmat(os.path.join(path_to_cali, cali_str, fle))
# print(fle)
break
return [fluoro_file['R12'], fluoro_file['V12']]
def extract_image_data(path_to_frame, resize_shape=(128, 128)):
'''
This function will return the image data for a given frame from the two fluoroscopes that comprise the viewing area.
Additionally, the function will reshape the image size from standard 1024 x 1024 input to what is in the 'resize_shape' input
function: extract_image_data(path_to_frame, resize_shape=(128, 128))
input:
path_to_frame: where the frame that contains the image is located
resize_shape: what the resized image should be
output: will output an array of the form: [ image1, image 2 ]
'''
image_array = [0, 0]
for fle in os.listdir(os.path.normpath(path_to_frame)):
if fle[-4:] == '.png' and fle[0:2] == 'F1':
image_load = skimage.io.imread(os.path.join(path_to_frame, fle))
image_resize = skimage.transform.resize(image_load, resize_shape, anti_aliasing=True)
image_gray = skimage.color.rgb2gray(image_resize)
# image_array[0] = image_gray.reshape(resize_shape[0] * resize_shape[1])
image_array[0] = image_gray
# print(type(image_gray))
elif fle[-4:] == '.png' and fle[0:2] == 'F2':
image_load = skimage.io.imread(os.path.join(path_to_frame, fle))
image_resize = skimage.transform.resize(image_load, resize_shape, anti_aliasing=True)
image_gray = skimage.color.rgb2gray(image_resize)
# print(type(image_gray))
# image_array[1] = image_gray.reshape(resize_shape[0] * resize_shape[1])
image_array[1] = image_gray
# pass
return np.array(image_array)
def extract_labels_rot_trans_femur_tib_data(path_to_frame):
'''
This function will take in the path to the frame where the data for each registration has occurred. It will return the rotation matrix (converted to three angles) and translation vector for both the FEMUR and the TIBIA.
function extract_femur_tib_cup_data(path_to_frames)
input:
path to frames
output:
2x2 matrix:
[ [ R_angles of femur V_trans of femur ],
[ R_angles of tibia V_trans of tibia ] ]
'''
femur_keyword = 'Cup_RV'
tibia_keyword = 'Stem_RV'
# femur_tib_data = [[0, 0], [0, 0]]
femur_tib_data = np.zeros((2, 6))
for fle in os.listdir(os.path.normpath(path_to_frame)):
if fle[-4:] == '.mat':
results_file = sio.loadmat(os.path.join(path_to_frame, fle))
femur_data = results_file[femur_keyword]
tibia_data = results_file[tibia_keyword]
femur_rot = np.array(Basis2Angles(femur_data[0][0][0]))
femur_trans = femur_data[0][0][1]
femur_tib_data[0, 0:3] = femur_rot.reshape(3)
femur_tib_data[0, 3:6] = femur_trans.reshape(3)
tibia_rot = np.array(Basis2Angles(tibia_data[0][0][0]))
tibia_trans = tibia_data[0][0][1]
femur_tib_data[1, 0:3] = tibia_rot.reshape(3)
femur_tib_data[1, 3:6] = tibia_trans.reshape(3)
break
return femur_tib_data
def voxel_from_array(mesh_vertices, spacing=0.5, mark_origin=False, location_of_origin=np.array([0, 0, 0]), origin_value=2):
'''
This function will take in a matrix of the location of mesh vertices. It will then take the vertices and transform them into a binary voxel data set with a 1 located in the bin if a corresponding point is to be found. It will return the voxelized matrix.
input:
mesh_vertices --> expects np.array of locations of mesh vertices
spacing --> the spacing of the voxels in mm
output:
bin_mat --> a binary voxelized matrix wtih 1's corresponding to points with a corresponding vertex
'''
mesh_min_vec = np.min(mesh_vertices, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(mesh_vertices, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
mesh_min_mat = mesh_vertices - mesh_min_vec
range_vec = mesh_max_vec - mesh_min_vec
# print('range_vec:\t', range_vec)
bins_vec = np.ceil(range_vec / spacing)
# print('bins_vec:\t', bins_vec)
bin_mat = np.zeros(bins_vec.astype('int32'))
# print('bin_mat.shape:\t', bin_mat.shape)
for indx in range(mesh_vertices.shape[0]):
# print(int(np.floor(mesh_min_mat[indx, 0] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 1] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 2] / spacing)))
# print(type(int(np.floor(mesh_min_mat[indx, 0] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 1] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 2] / spacing))))
bin_mat[int(np.floor(mesh_min_mat[indx, 0] / spacing)):int(np.ceil(mesh_min_mat[indx, 0] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 1] / spacing)):int(np.ceil(mesh_min_mat[indx, 1] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 2] / spacing)):int(np.ceil(mesh_min_mat[indx, 2] / spacing)) + 1] = 1
if mark_origin:
location_of_origin = location_of_origin - mesh_min_vec
bin_mat[int(np.floor(location_of_origin[0] / spacing)), int(np.floor(location_of_origin[1] / spacing)), int(np.floor(location_of_origin[2] / spacing))] = origin_value
return bin_mat.astype('int8')
def extract_stl_to_meshpoints(mesh_obj, PTS_file, voxelize_dim=0.5, random_disp=False, random_seed=np.random.randint(low=0, high=2**32)):
'''
'''
# print('Random_seed: ', random_seed)
np.random.seed(random_seed)
PTS_file = np.array(PTS_file)
X_vec = np.array(PTS_file[0, :] - PTS_file[1, :])
Z_vec_pre = np.array(PTS_file[2, :] - PTS_file[3, :])
Y_vec = np.cross(Z_vec_pre, X_vec)
# We have to do the second cross, because we cannot a priori guarantee that the X and Z unit vectors are orthogonal. Once we generate an orthogonal Y unit vector, we can regenerate the Z unit vector based on the X and Y unit vectors to create a true orthonormal basis
Z_unit = np.cross(X_vec, Y_vec)
x_unit = X_vec / np.linalg.norm(X_vec)
y_unit = Y_vec / np.linalg.norm(Y_vec)
z_unit = Z_unit / np.linalg.norm(Z_unit)
rot_mat = np.array([
[x_unit[0], y_unit[0], z_unit[0]],
[x_unit[1], y_unit[1], z_unit[1]],
[x_unit[2], y_unit[2], z_unit[2]]])
origin_mesh_local = np.array(PTS_file[0:2, :].mean(axis=0))
verts_local_coord = Global2Local_Coord(rot_mat, origin_mesh_local, mesh_obj.vertices)
if random_disp:
return random_rotation_translation(verts_local_coord, rotation=True)
else:
return verts_local_coord
# ---------------------------------------------------------------
# These are the two main functions, which will be used for data augmentation.
def random_rotation_translation(mesh_vertices, rotation=True, translation=False):
'''
This function will take in array of the mesh vertices. It will then apply a random rotation and/or translation to the mesh_vertices. This function will output a new list array of the mesh vertices, which have been rotated and/or translated.
input:
mesh_vertices --> the input array of mesh vertices, which should be in their local coordinate frame, such that the local origin is [0,0,0].
rotation --> whether or not to apply a rotation to the dataset
translation --> whether or not to apply a random translation to the dataset
output:
updated_mesh_vertices --> the updated mesh vertices, which have been randomly transformed
'''
if rotation:
random_theta = np.random.rand(1) * 2 * np.pi
random_phi = np.random.rand(1) * 2 * np.pi
random_psi = np.random.rand(1) * 2 * np.pi
else:
random_theta = 0
random_phi = 0
random_psi = 0
rotation_angles = np.array([random_theta, random_phi, random_psi])
if translation:
random_x = np.random.rand(1) * 3
random_y = np.random.rand(1) * 3
random_z = np.random.rand(1) * 3
else:
random_x = 0
random_y = 0
random_z = 0
translation_vecs = np.array([random_x, random_y, random_z])
random_rot_mat = Angles2Basis(rotation_angles)
# print('random_rot_mat', random_rot_mat)
new_rand_positions = Local2Global_Coord(random_rot_mat, translation_vecs, mesh_vertices)
return new_rand_positions
def random_samples_selector(list_of_paths_incl_frames, numb_of_new_instances):
'''
This function will take the list of paths to the the directory where each individual frame's data is stored. It will then sample the list_of_paths to generate a new list of paths, which will include new paths to create random rotations on for data augmentation.
input:
list_of_paths_incl_frames --> list of all of the paths to where the frame data is actually held.
numb_of_new_instances --> the number of how many new instances wanted to be added to the initial data set
returns:
list_of_paths_data_aug --> this will return a new list of paths where the data can be sampled from
'''
random_sample_of_paths = np.random.choice(list_of_paths_incl_frames, numb_of_new_instances, replace=True)
list_of_paths_data_aug = list_of_paths_incl_frames + list(random_sample_of_paths)
return list_of_paths_data_aug
def determine_voxel_max_shape_from_mesh_vertices(list_of_mesh_vertices, spacing=0.5):
'''
We are anticipating getting a list of the various mesh vertices, which have been set to local coordinate system, so the origin can be expected to be at [0,0,0].
'''
max_shape_vector = np.zeros(3)
for mesh_verts in list_of_mesh_vertices:
mesh_min_vec = np.min(mesh_verts, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(mesh_verts, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
range_vec = mesh_max_vec - mesh_min_vec
bins_vec = np.ceil(range_vec / spacing)
if bins_vec[0] > max_shape_vector[0]:
max_shape_vector[0] = bins_vec[0]
if bins_vec[1] > max_shape_vector[1]:
max_shape_vector[1] = bins_vec[1]
if bins_vec[2] > max_shape_vector[2]:
max_shape_vector[2] = bins_vec[2]
return max_shape_vector
def matrix_padder_to_size(vox_mat, max_shape_vector):
pad_mat = np.zeros((3, 2))
# pad_mat = [[0, 0], [0, 0], [0, 0]]
# print('------------\n\nMatrix shape: \n', vox_mat[item].shape, '\n\n-----------', '\n\n')
# print('------------\n\nMax Shape Vector: \n', max_shape_vector, '\n\n-----------', '\n\n')
if vox_mat.shape[0] < max_shape_vector[0]:
pad_mat_0 = max_shape_vector[0] - vox_mat.shape[0]
# print('\n\npad_mat_0:\n', pad_mat_0, '\n\n')
if pad_mat_0 % 2 == 1:
pad_mat[0, 0] = pad_mat_0 // 2 + 1
pad_mat[0, 1] = pad_mat_0 // 2
else:
pad_mat[0, :] = pad_mat_0 / 2
# pad_mat[0][1] = pad_mat_0 / 2
if vox_mat.shape[1] < max_shape_vector[1]:
pad_mat_1 = max_shape_vector[1] - vox_mat.shape[1]
# print('\n\npad_mat_1:\n', pad_mat_1, '\n\n')
if pad_mat_1 % 2 == 1:
pad_mat[1, 0] = pad_mat_1 // 2 + 1
pad_mat[1, 1] = pad_mat_1 // 2
else:
pad_mat[1, :] = pad_mat_1 / 2
# pad_mat[1][1] = pad_mat_1 / 2
if vox_mat.shape[2] < max_shape_vector[2]:
pad_mat_2 = max_shape_vector[2] - vox_mat.shape[2]
# print('\n\npad_mat_2:\n', pad_mat_2, '\n\n')
if pad_mat_2 % 2 == 1:
pad_mat[2, 0] = pad_mat_2 // 2 + 1
pad_mat[2, 1] = pad_mat_2 // 2
else:
pad_mat[2, :] = pad_mat_2 / 2
# pad_mat[2][1] = pad_mat_2 / 2
# print('Pad mat: \n', pad_mat)
return np.pad(vox_mat, pad_width=pad_mat.astype(int), mode='constant')
# ---------------------------------------------------------------
# This part of the file will deal with creating functions that will store the massive amounts of data
def generate_cali_storage_mat(list_of_path_to_frames, path_to_save_dir, save_file_name='cali_aug', compression=None):
os.makedirs(path_to_save_dir, exist_ok=True)
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = len(list_of_path_to_frames) * len(list_of_bones)
calibration_data = np.zeros((total_number_of_frames, 6))
print('Calibration data shape: ', calibration_data.shape)
calibration_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
cali_dset = calibration_file.create_dataset('cali_dset', data=calibration_data, compression=compression)
ticker = 0
for frame in list_of_path_to_frames:
temp_cali_data = extract_calibration_data(os.path.abspath(os.sep.join(os.path.normpath(frame).split(os.sep)[:-1])))
temp_cali_rot = Basis2Angles(temp_cali_data[0])
temp_cali_trans = np.reshape(temp_cali_data[1], 3)
interim_cali_array = np.hstack((temp_cali_rot, temp_cali_trans))
cali_dset[2 * ticker: 2 * ticker + 2] = np.array([interim_cali_array, interim_cali_array])
ticker += 1
calibration_file.close()
calibration_data = None
return None
def generate_label_storage_mat(list_of_path_to_frames, path_to_save_dir, save_file_name='label_aug', compression=None):
os.makedirs(path_to_save_dir, exist_ok=True)
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = len(list_of_path_to_frames) * len(list_of_bones)
label_data = np.zeros((total_number_of_frames, 6))
print('Label data shape: ', label_data.shape)
label_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
label_dset = label_file.create_dataset('label_dset', data=label_data, compression=compression)
ticker = 0
for frame in list_of_path_to_frames:
label_dset[2 * ticker: 2 * ticker + 2] = extract_labels_rot_trans_femur_tib_data(os.path.abspath(frame))
ticker += 1
label_file.close()
label_data = None
return None
def generate_image_storage_mat(list_of_path_to_frames, path_to_save_dir, save_file_name='images_aug', compression=None):
os.makedirs(path_to_save_dir, exist_ok=True)
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = len(list_of_path_to_frames) * len(list_of_bones)
image_data = np.zeros((total_number_of_frames, 2, 128, 128))
print('Image data shape: ', image_data.shape)
image_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
image_dset = image_file.create_dataset('image_dset', data=image_data, compression=compression)
ticker = 0
for frame in list_of_path_to_frames:
image_dset[2 * ticker: 2 * ticker + 2] = extract_image_data(os.path.abspath(frame))
if ticker % 500 == 0:
print("Image: ", ticker)
ticker += 1
image_file.close()
image_data = None
return None
def generate_voxel_storage_mat(list_of_path_to_frames, path_to_save_dir, augmented_frames_number, save_file_name='voxels_aug', upload_set=50, compression='lzf', save_as_type='int8'):
import time
spacing = 0.5
total_number_of_frames = len(list_of_path_to_frames)
original_number_frames = total_number_of_frames - augmented_frames_number
random_seed_array = np.random.randint(low=0, high=2**32, size=augmented_frames_number * 2)
max_shape_vector = np.zeros(3)
ticker = -1
for path in list_of_path_to_frames:
load_mesh_path_time = time.time()
ticker += 1
lat = os.path.abspath(os.sep.join(os.path.normpath(path).split(os.sep)[:-1]))
lat_split = lat.split(os.sep)[-1]
return_vox_tib_fib = [0, 0]
if lat_split.lower() == 'lt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Left Femur
LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# Left Tibia
LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
if ticker >= original_number_frames:
seed_indexer = ticker - original_number_frames
# print('Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
if lat_split.lower() == 'rt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Right Femur
RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# Right Tibia
RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
if ticker >= original_number_frames:
seed_indexer = ticker - original_number_frames
# print('Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random')
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
for bone in return_vox_tib_fib:
mesh_min_vec = np.min(bone, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(bone, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
range_vec = mesh_max_vec - mesh_min_vec
bins_vec = np.ceil(range_vec / spacing)
if bins_vec[0] > max_shape_vector[0]:
max_shape_vector[0] = int(bins_vec[0])
if bins_vec[1] > max_shape_vector[1]:
max_shape_vector[1] = int(bins_vec[1])
if bins_vec[2] > max_shape_vector[2]:
max_shape_vector[2] = int(bins_vec[2])
time_to_load_mesh_path = time.time() - load_mesh_path_time
if ticker % 200 == 0:
print('\n')
print("Voxel mesh load: ", ticker)
print('Time to load mesh path: ', time_to_load_mesh_path)
# if ticker >= original_number_frames:
# print('Random')
# else:
# print('Not Random')
vox_mat_shape = np.array([total_number_of_frames * 2, int(max_shape_vector[0]), int(max_shape_vector[1]), int(max_shape_vector[2])]).astype('int16')
print('Voxel data shape: ', vox_mat_shape)
vox_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
vox_dset = vox_file.create_dataset('vox_dset', shape=vox_mat_shape, dtype=save_as_type, compression=compression)
ticker1 = -1
ticker3 = -1
for path_indx in range(int(np.ceil(len(list_of_path_to_frames) / upload_set))):
ticker1 += 1
num_sub_frames = len(list_of_path_to_frames[path_indx * upload_set: path_indx * upload_set + upload_set])
vox_mat_sub = np.zeros((2 * num_sub_frames, vox_mat_shape[1], vox_mat_shape[2], vox_mat_shape[3])).astype(save_as_type)
ticker2 = -1
for path in list_of_path_to_frames[path_indx * upload_set: path_indx * upload_set + upload_set]:
ticker2 += 1
ticker3 += 1
path_upload_time = time.time()
lat = os.path.abspath(os.sep.join(os.path.normpath(path).split(os.sep)[:-1]))
lat_split = lat.split(os.sep)[-1]
return_vox_tib_fib = [0, 0]
if lat_split.lower() == 'lt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Left Femur
LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# Left Tibia
LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
if ticker3 >= original_number_frames:
seed_indexer = ticker3 - original_number_frames
print('Random:', ticker3, 'Seed: ', seed_indexer)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
print('Not Random: ', ticker3)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
if lat_split.lower() == 'rt':
new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# Right Femur
RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# Right Tibia
RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
if ticker3 >= original_number_frames:
seed_indexer = ticker3 - original_number_frames
# print('Random:', ticker3, 'Seed: ', seed_indexer)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
else:
# print('Not Random: ', ticker3)
return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
vox_dset_mat_1 = voxel_from_array(return_vox_tib_fib[0], spacing=spacing, mark_origin=True)
vox_mat_sub[2 * ticker2] = matrix_padder_to_size(vox_dset_mat_1.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
vox_dset_mat_2 = voxel_from_array(return_vox_tib_fib[1], spacing=spacing, mark_origin=True)
vox_mat_sub[2 * ticker2 + 1] = matrix_padder_to_size(vox_dset_mat_2.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
return_vox_tib_fib = None
if ticker3 % 50 == 0:
print('Voxel: ', ticker3)
print('Voxel pad creation time: ', time.time() - path_upload_time)
vox_mat_sub_time = time.time()
vox_dset[2 * path_indx * upload_set: 2 * path_indx * upload_set + 2 * num_sub_frames] = vox_mat_sub.astype(save_as_type)
print('Time to upload vox_mat_sub per instance: ', (time.time() - vox_mat_sub_time) / (num_sub_frames * 2))
vox_mat_sub = None
vox_file.close()
return None
# ---------------------------------------------------------------
# ---------------------------------------------------------------
if __name__ == '__main__':
dict_of_acts = generate_dict_of_acts_with_patients()
dict_of_paths = generate_dict_path_to_frames(dict_of_acts)
list_of_frames = generate_comprehensive_list_of_frames(dict_of_paths)
list_of_frames = sorted(list_of_frames)
augmented_frames_number = 100
data_aug_list_of_frames = random_samples_selector(list_of_frames[:30], augmented_frames_number)
# generate_cali_storage_mat(data_aug_list_of_frames, save_dir)
# generate_label_storage_mat(data_aug_list_of_frames, save_dir)
# generate_image_storage_mat(data_aug_list_of_frames, save_dir)
# generate_voxel_storage_mat(data_aug_list_of_frames, save_dir, augmented_frames_number)
generate_voxel_storage_mat(data_aug_list_of_frames, save_dir, augmented_frames_number, save_file_name='voxels_test')
# ---------------------------------------------------------------
# def generate_voxel_storage_mat(list_of_path_to_frames, path_to_save_dir, augmented_frames_number, save_file_name='voxels_aug', compression='lzf', save_as_type='int8'):
# import time
# spacing = 0.5
# total_number_of_frames = len(list_of_path_to_frames)
# original_number_frames = total_number_of_frames - augmented_frames_number
# random_seed_array = np.random.randint(low=0, high=2**32, size=augmented_frames_number * 2)
# max_shape_vector = np.zeros(3)
# ticker = -1
# for path in list_of_path_to_frames:
# load_mesh_path_time = time.time()
# ticker += 1
# lat = os.path.abspath(os.sep.join(os.path.normpath(path).split(os.sep)[:-1]))
# lat_split = lat.split(os.sep)[-1]
# return_vox_tib_fib = [0, 0]
# if lat_split.lower() == 'lt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Left Femur
# LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
# LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# # Left Tibia
# LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
# LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
# if ticker >= original_number_frames:
# seed_indexer = ticker - original_number_frames
# # print('Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# # print('Not Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
# if lat_split.lower() == 'rt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Right Femur
# RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
# RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# # Right Tibia
# RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
# RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
# if ticker >= original_number_frames:
# seed_indexer = ticker - original_number_frames
# # print('Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# # print('Not Random')
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
# for bone in return_vox_tib_fib:
# mesh_min_vec = np.min(bone, axis=0)
# mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
# mesh_max_vec = np.max(bone, axis=0)
# mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
# range_vec = mesh_max_vec - mesh_min_vec
# bins_vec = np.ceil(range_vec / spacing)
# if bins_vec[0] > max_shape_vector[0]:
# max_shape_vector[0] = int(bins_vec[0])
# if bins_vec[1] > max_shape_vector[1]:
# max_shape_vector[1] = int(bins_vec[1])
# if bins_vec[2] > max_shape_vector[2]:
# max_shape_vector[2] = int(bins_vec[2])
# time_to_load_mesh_path = time.time() - load_mesh_path_time
# if ticker % 200 == 0:
# print('\n')
# print("Voxel mesh load: ", ticker)
# print('Time to load mesh path: ', time_to_load_mesh_path)
# if ticker >= original_number_frames:
# print('Random')
# else:
# print('Not Random')
# vox_mat_shape = np.array([total_number_of_frames * 2, int(max_shape_vector[0]), int(max_shape_vector[1]), int(max_shape_vector[2])]).astype('int16')
# print('Voxel data shape: ', vox_mat_shape)
# vox_file = h5py.File(os.path.join(path_to_save_dir, save_file_name + '.h5py'), 'w')
# vox_dset = vox_file.create_dataset('vox_dset', shape=vox_mat_shape, dtype=save_as_type, compression=None)
# ticker2 = -1
# for path in list_of_path_to_frames:
# time_to_vox_pad = time.time()
# ticker2 += 1
# lat = os.path.abspath(os.sep.join(os.path.normpath(path).split(os.sep)[:-1]))
# lat_split = lat.split(os.sep)[-1]
# return_vox_tib_fib = [0, 0]
# if lat_split.lower() == 'lt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Left Femur
# LFemur = trimesh.load(os.path.join(new_path, 'LFemur.stl'))
# LFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LFemur_PTS.txt'), header=None))
# # Left Tibia
# LTibia = trimesh.load(os.path.join(new_path, 'LTibia.stl'))
# LTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'LTibia_PTS.txt'), header=None))
# if ticker2 >= original_number_frames:
# seed_indexer = ticker2 - original_number_frames
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(LFemur, LFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(LTibia, LTibia_PTS, voxelize_dim=0.5, random_disp=False)
# if lat_split.lower() == 'rt':
# new_path = os.path.normpath(os.path.join(os.path.abspath(os.sep.join(os.path.normpath(lat).split(os.sep)[:-1])), 'stl'))
# # Right Femur
# RFemur = trimesh.load(os.path.join(new_path, 'RFemur.stl'))
# RFemur_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RFemur_PTS.txt'), header=None))
# # Right Tibia
# RTibia = trimesh.load(os.path.join(new_path, 'RTibia.stl'))
# RTibia_PTS = np.array(pd.read_csv(os.path.join(new_path, 'RTibia_PTS.txt'), header=None))
# if ticker2 >= original_number_frames:
# seed_indexer = ticker2 - original_number_frames
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer])
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=True, random_seed=random_seed_array[2 * seed_indexer + 1])
# else:
# return_vox_tib_fib[0] = extract_stl_to_meshpoints(RFemur, RFemur_PTS, voxelize_dim=0.5, random_disp=False)
# return_vox_tib_fib[1] = extract_stl_to_meshpoints(RTibia, RTibia_PTS, voxelize_dim=0.5, random_disp=False)
# time_vox_mat = time.time()
# vox_dset_mat_1 = voxel_from_array(return_vox_tib_fib[0], spacing=spacing, mark_origin=True)
# print('Make voxel array 1: ', time.time() - time_vox_mat)
# vox_dset[2 * ticker2] = matrix_padder_to_size(vox_dset_mat_1.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
# print('Upload voxel array 1: ', time.time() - time_vox_mat)
# time_vox_mat = time.time()
# vox_dset_mat_2 = voxel_from_array(return_vox_tib_fib[1], spacing=spacing, mark_origin=True)
# print('Make voxel array 2: ', time.time() - time_vox_mat)
# vox_dset[2 * ticker2 + 1] = matrix_padder_to_size(vox_dset_mat_2.astype(save_as_type), vox_mat_shape[1:]).astype(save_as_type)
# print('Upload voxel array 2: ', time.time() - time_vox_mat)
# print('\n')
# return_vox_tib_fib = [0, 0]
# time_to_vox_pad_load = time.time() - time_to_vox_pad
# if ticker2 % 20 == 0:
# print("Voxel: ", ticker2)
# print('Time to vox pad load: ', time_to_vox_pad_load)
# vox_file.close()
# return None
| Python |
3D | john-drago/fluoro | code/datacomp/data_organization.py | .py | 31,986 | 875 | '''
This file will organize the data in the 'fluoro/data' folder. It will organize the stl files and the photos into large matrices to allow for training.
'''
import os
from coord_change import Global2Local_Coord, Basis2Angles
import scipy.io as sio
import skimage
import numpy as np
import trimesh
import pandas as pd
import sys
from scipy import ndimage
import h5py
import pickle
from h5py_multidimensional_array import variable_matrix_storer, variable_matrix_padder
# from voxel_graph import simple_voxel_graph
print('\n', '\n', '\n')
# this should be located within the */fluoro/data folder
top_level_dir = '/Users/johndrago/fluoro/data'
def create_dir_path(*name):
'''
This function will take in a series of directory paths, i.e. create_dir_path('foo','bar','egg','fyegg') and will output a file path from left to right:
'/foo/bar/egg/fyegg'
'''
file_path = ''
ticker = 0
for direct in name:
ticker += 1
if not ticker == len(name):
file_path = file_path + direct + '/'
else:
file_path = file_path + direct
return os.path.normpath(file_path)
def generate_dict_of_acts_with_patients():
'''
This function will generate a dictionary of the different activities and all of the patients who did that activity.
The final format will be a dictionary where keys are the activities and the values are the different patients who did the activity.
Assuming that we are in path: */fluoro/data
'''
activity_list = []
pt_dict = {}
for direct1 in os.listdir(create_dir_path(top_level_dir)):
if direct1 != '.DS_Store' and direct1 != 'compilation' and direct1 != 'prediction':
activity_list.append(direct1)
# print(direct1)
list_of_pts_for_act = []
for direct2 in os.listdir(create_dir_path(top_level_dir, direct1)):
if direct2 != '.DS_Store':
list_of_pts_for_act.append(direct2)
# print(list_of_pts_for_act)
pt_dict[direct1] = list_of_pts_for_act
# print(pt_dict)
return pt_dict
def generate_dict_path_to_frames(dict_of_act_pts):
'''
This function will take a dictionary, where keys are activities and the values are the list of different patients who completed that given activity
This function will generate a dictionary with keys of path to frames and values of the list of frames at the corresponding path
Keys: path to directory where frames are
Values: list of frames
'''
path_to_frames_dict = {}
for act in dict_of_act_pts.keys():
pt_list = dict_of_act_pts[act]
for pt in pt_list:
dir_to_pt = create_dir_path(top_level_dir, act, pt)
for direct3 in os.listdir(dir_to_pt):
list_of_frames = []
if direct3 != '.DS_Store' and direct3 != 'stl':
dir_to_frames = create_dir_path(top_level_dir, act, pt, direct3)
# print(dir_to_frames)
for frame in os.listdir(dir_to_frames):
if (frame != '.DS_Store') and frame != 'cali':
list_of_frames.append(frame)
path_to_frames_dict[dir_to_frames] = list_of_frames
return path_to_frames_dict
def extract_calibration_data(path_to_cali):
'''
This function will return the R12 and V12 variables from the reg2fl***.mat file in the "data/activity/patient/laterality/cali" folder.
extract_calibration_data(path_to_cali)
input: expects a path to the directory where the cali frame is located
output: will output an array of the form: [ R12, V12 ]
'''
cali_str = 'cali'
for fle in os.listdir(create_dir_path(path_to_cali, cali_str)):
if fle[0:6].lower() == 'reg2fl':
fluoro_file = sio.loadmat(create_dir_path(path_to_cali, cali_str, fle))
# print(fle)
break
return [fluoro_file['R12'], fluoro_file['V12']]
def extract_image_data(path_to_frame, resize_shape=(128, 128)):
'''
This function will return the image data for a given frame from the two fluoroscopes that comprise the viewing area.
Additionally, the function will reshape the image size from standard 1024 x 1024 input to what is in the 'resize_shape' input
function: extract_image_data(path_to_frame, resize_shape=(128, 128))
input:
path_to_frame: where the frame that contains the image is located
resize_shape: what the resized image should be
output: will output an array of the form: [ image1, image 2 ]
'''
image_array = np.zeros((2, resize_shape[0], resize_shape[1]))
for fle in os.listdir(os.path.normpath(path_to_frame)):
if fle[-4:] == '.png' and fle[0:2] == 'F1':
image_load = skimage.io.imread(create_dir_path(path_to_frame, fle))
image_resize = skimage.transform.resize(image_load, resize_shape, anti_aliasing=True)
image_gray = skimage.color.rgb2gray(image_resize)
# image_array[0] = image_gray.reshape(resize_shape[0] * resize_shape[1])
image_array[0] = image_gray
# print(type(image_gray))
elif fle[-4:] == '.png' and fle[0:2] == 'F2':
image_load = skimage.io.imread(create_dir_path(path_to_frame, fle))
image_resize = skimage.transform.resize(image_load, resize_shape, anti_aliasing=True)
image_gray = skimage.color.rgb2gray(image_resize)
# print(type(image_gray))
# image_array[1] = image_gray.reshape(resize_shape[0] * resize_shape[1])
image_array[1] = image_gray
# pass
return np.array(image_array)
def extract_labels_rot_trans_femur_tib_data(path_to_frame):
'''
This function will take in the path to the frame where the data for each registration has occurred. It will return the rotation matrix (converted to three angles) and translation vector for both the FEMUR and the TIBIA.
function extract_femur_tib_cup_data(path_to_frames)
input:
path to frames
output:
2x2 matrix:
[ [ R_angles of femur V_trans of femur ],
[ R_angles of tibia V_trans of tibia ] ]
'''
femur_keyword = 'Cup_RV'
tibia_keyword = 'Stem_RV'
# femur_tib_data = [[0, 0], [0, 0]]
femur_tib_data = np.zeros((2, 6))
for fle in os.listdir(os.path.normpath(path_to_frame)):
if fle[-4:] == '.mat':
results_file = sio.loadmat(create_dir_path(path_to_frame, fle))
femur_data = results_file[femur_keyword]
tibia_data = results_file[tibia_keyword]
femur_rot = np.array(Basis2Angles(femur_data[0][0][0]))
femur_trans = femur_data[0][0][1]
femur_tib_data[0, 0:3] = femur_rot.reshape(3)
femur_tib_data[0, 3:6] = femur_trans.reshape(3)
tibia_rot = np.array(Basis2Angles(tibia_data[0][0][0]))
tibia_trans = tibia_data[0][0][1]
femur_tib_data[1, 0:3] = tibia_rot.reshape(3)
femur_tib_data[1, 3:6] = tibia_trans.reshape(3)
break
return femur_tib_data
def extract_stl_to_voxel_trimesh(mesh_obj, PTS_file, voxelize_dim=0.5):
'''
In sum, this function will:
1) take an STL file loaded as a mesh object and take the PTS file loaded as a pandas object
2) using the PTS file, determine local coordinate frame and shift STL point cloud to new local coordinate frame
3) voxelize the vertices of the point cloud to binary, depending on if a vertex would be in the corresponding voxel. Uses trimesh functionality to perform voxelization
4) return an array of both 3D voxel models for loaded model
function extract_stl_to_voxel_trimesh(path_to_frame, voxelize_dim=0.5)
input:
mesh_obj --> loaded trimesh mesh object (stl file)
PTS_file --> loaded pandas
voxelize_dim --> the scale of creating new voxel map
output:
3D binary voxel model as an array
This function assumes it will be passed the loaded trimesh mesh as an argument. This function will produce an a NumPy array of the binary voxel data.
In doing so, this function will also translate the points in the stl file to the local coordinate frame defined through the PTS files:
PTS file: ---> defines the new X and Z directions of the local coordinate system
X Coordinate system: From PTS row 1 to PTS row 0
Z Coordinate system: From PTS row 3 to PTS row 2
The origin of the new coordinate system is defined to be halfway between the two anatomical points, which demarcate the x-axis.
From these two coordinates, we can determine the Y axis, via the cross product of the unit vectors: Y = cross(z,x)
'''
PTS_file = np.array(PTS_file)
X_vec = np.array(PTS_file[0, :] - PTS_file[1, :])
Z_vec_pre = np.array(PTS_file[2, :] - PTS_file[3, :])
Y_vec = np.cross(Z_vec_pre, X_vec)
# We have to do the second cross, because we cannot a priori guarantee that the X and Z unit vectors are orthogonal. Once we generate an orthogonal Y unit vector, we can regenerate the Z unit vector based on the X and Y unit vectors to create a true orthonormal basis
Z_vec = np.cross(X_vec, Y_vec)
x_unit = X_vec / np.linalg.norm(X_vec)
y_unit = Y_vec / np.linalg.norm(Y_vec)
z_unit = Z_vec / np.linalg.norm(Z_vec)
rot_mat = np.array([
[x_unit[0], y_unit[0], z_unit[0]],
[x_unit[1], y_unit[1], z_unit[1]],
[x_unit[2], y_unit[2], z_unit[2]]])
origin_mesh_local = np.array(PTS_file[0:2, :].mean(axis=0))
STL_local_coord = Global2Local_Coord(rot_mat, origin_mesh_local, mesh_obj.vertices)
STL_local_coord_mesh = trimesh.Trimesh(vertices=STL_local_coord, faces=mesh_obj.faces)
STL_voxelized = STL_local_coord_mesh.voxelized(voxelize_dim)
vox_matrix = STL_voxelized.matrix
return_vox_matrix = vox_matrix.astype(int)
return return_vox_matrix
def voxel_from_array(mesh_vertices, spacing=0.5, mark_origin=False, location_of_origin=np.array([0, 0, 0]), origin_value=2):
'''
This function will take in a matrix of the location of mesh vertices. It will then take the vertices and transform them into a binary voxel data set with a 1 located in the bin if a corresponding point is to be found. It will return the voxelized matrix.
input:
mesh_vertices --> expects np.array of locations of mesh vertices
spacing --> the spacing of the voxels in mm
output:
bin_mat --> a binary voxelized matrix wtih 1's corresponding to points with a corresponding vertex
'''
mesh_min_vec = np.min(mesh_vertices, axis=0)
mesh_min_vec = np.where(mesh_min_vec > 0, 0, mesh_min_vec)
mesh_max_vec = np.max(mesh_vertices, axis=0)
mesh_max_vec = np.where(mesh_max_vec < 0, 0, mesh_max_vec)
mesh_min_mat = mesh_vertices - mesh_min_vec
range_vec = mesh_max_vec - mesh_min_vec
# print('range_vec:\t', range_vec)
bins_vec = np.ceil(range_vec / spacing)
# print('bins_vec:\t', bins_vec)
bin_mat = np.zeros(bins_vec.astype('int32'))
# print('bin_mat.shape:\t', bin_mat.shape)
for indx in range(mesh_vertices.shape[0]):
# print(int(np.floor(mesh_min_mat[indx, 0] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 1] / spacing)))
# print(int(np.floor(mesh_min_mat[indx, 2] / spacing)))
# print(type(int(np.floor(mesh_min_mat[indx, 0] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 1] / spacing))))
# print(type(int(np.floor(mesh_min_mat[indx, 2] / spacing))))
bin_mat[int(np.floor(mesh_min_mat[indx, 0] / spacing)):int(np.ceil(mesh_min_mat[indx, 0] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 1] / spacing)):int(np.ceil(mesh_min_mat[indx, 1] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 2] / spacing)):int(np.ceil(mesh_min_mat[indx, 2] / spacing)) + 1] = 1
if mark_origin:
location_of_origin = location_of_origin - mesh_min_vec
bin_mat[int(np.floor(location_of_origin[0] / spacing)), int(np.floor(location_of_origin[1] / spacing)), int(np.floor(location_of_origin[2] / spacing))] = origin_value
return bin_mat.astype('int8')
def extract_stl_to_voxel(mesh_obj, PTS_file, voxelize_dim=0.5):
'''
In sum, this function will:
1) take an STL file loaded as a mesh object and take the PTS file loaded as a pandas object
2) using the PTS file, determine local coordinate frame and shift STL point cloud to new local coordinate frame
3) voxelize the vertices of the point cloud to binary, depending on if a vertex would be in the corresponding voxel. Uses in-house algorithm.
4) return an array of both 3D voxel models for loaded model
function extract_stl_to_voxel(path_to_frame, voxelize_dim=0.5)
input:
mesh_obj --> loaded trimesh mesh object (stl file)
PTS_file --> loaded pandas
voxelize_dim --> the scale of creating new voxel map
output:
3D binary voxel model as an array
This function assumes it will be passed the loaded trimesh mesh as an argument. This function will produce an a NumPy array of the binary voxel data.
In doing so, this function will also translate the points in the stl file to the local coordinate frame defined through the PTS files:
PTS file: ---> defines the new X and Z directions of the local coordinate system
X Coordinate system: From PTS row 1 to PTS row 0
Z Coordinate system: From PTS row 3 to PTS row 2
The origin of the new coordinate system is defined to be halfway between the two anatomical points, which demarcate the x-axis.
From these two coordinates, we can determine the Y axis, via the cross product of the unit vectors: Y = cross(z,x)
'''
PTS_file = np.array(PTS_file)
X_vec = np.array(PTS_file[0, :] - PTS_file[1, :])
Z_vec_pre = np.array(PTS_file[2, :] - PTS_file[3, :])
Y_vec = np.cross(Z_vec_pre, X_vec)
# We have to do the second cross, because we cannot a priori guarantee that the X and Z unit vectors are orthogonal. Once we generate an orthogonal Y unit vector, we can regenerate the Z unit vector based on the X and Y unit vectors to create a true orthonormal basis
Z_vec = np.cross(X_vec, Y_vec)
x_unit = X_vec / np.linalg.norm(X_vec)
y_unit = Y_vec / np.linalg.norm(Y_vec)
z_unit = Z_vec / np.linalg.norm(Z_vec)
rot_mat = np.array([
[x_unit[0], y_unit[0], z_unit[0]],
[x_unit[1], y_unit[1], z_unit[1]],
[x_unit[2], y_unit[2], z_unit[2]]])
origin_mesh_local = np.array(PTS_file[0:2, :].mean(axis=0))
verts_local_coord = Global2Local_Coord(rot_mat, origin_mesh_local, mesh_obj.vertices)
bin_mat = voxel_from_array(verts_local_coord, spacing=voxelize_dim, mark_origin=True)
return bin_mat.astype('int8')
def stl_located(path_to_frames, voxelized_dim=0.5):
'''
This function will take the path to the directory, where the frames of a given motion are stored:
*/fluoro/data/activity/patient/laterality
It will then use the extract_stl_to_voxel function to generate an array of the voxelized data for the tibia and fibia.
input:
path_to_frames --> where the frames are located for each motion
voxelized_dim --> scale of new voxel map
output
array of the two 3D voxel models
[ femur 3D voxel model, tibia 3D voxel model ]
'''
lat = os.path.normpath(path_to_frames)
lat_split = lat.split(os.sep)[-1]
return_vox_tib_fib = [0, 0]
if lat_split.lower() == 'lt':
new_path = create_dir_path(os.path.normpath(lat[:-3]), 'stl')
# Left Femur
LFemur = trimesh.load(create_dir_path(new_path, 'LFemur.stl'))
LFemur_PTS = pd.read_csv(create_dir_path(new_path, 'LFemur_PTS.txt'), header=None)
LFemur_PTS_np = np.array(LFemur_PTS)
return_vox_tib_fib[0] = extract_stl_to_voxel(LFemur, LFemur_PTS_np)
# Left Tibia
LTibia = trimesh.load(create_dir_path(new_path, 'LTibia.stl'))
LTibia_PTS = pd.read_csv(create_dir_path(new_path, 'LTibia_PTS.txt'), header=None)
LTibia_PTS_np = np.array(LTibia_PTS)
return_vox_tib_fib[1] = extract_stl_to_voxel(LTibia, LTibia_PTS_np)
elif lat_split.lower() == 'rt':
new_path = create_dir_path(os.path.normpath(lat[:-3]), 'stl')
# Right Femur
RFemur = trimesh.load(create_dir_path(new_path, 'RFemur.stl'))
RFemur_PTS = pd.read_csv(create_dir_path(new_path, 'RFemur_PTS.txt'), header=None)
RFemur_PTS_np = np.array(RFemur_PTS)
return_vox_tib_fib[0] = extract_stl_to_voxel(RFemur, RFemur_PTS_np)
# Right Tibia
RTibia = trimesh.load(create_dir_path(new_path, 'RTibia.stl'))
RTibia_PTS = pd.read_csv(create_dir_path(new_path, 'RTibia_PTS.txt'), header=None)
RTibia_PTS_np = np.array(RTibia_PTS)
return_vox_tib_fib[1] = extract_stl_to_voxel(RTibia, RTibia_PTS_np)
return return_vox_tib_fib
def voxel_binary_to_distance_transform(voxel_binary):
'''
This function will take in a binary voxel data set, and it will convert it to a distance transform (https://en.wikipedia.org/wiki/Distance_transform)
'''
return ndimage.distance_transform_edt(voxel_binary)
def directory_path_to_frames(path_to_frame):
'''
This function takes a path to where the frames are located, and it returns a list of the paths to each frame directory:
input:
path_to_frame --> */fluoro/data/act/patient/laterality
output:
path_to_frame_dir --> */fluoro/data/act/patient/laterality/frame
'''
list_of_frames = []
temp_list_of_frames = os.listdir(path_to_frame)
for frme in temp_list_of_frames:
if frme != '.DS_Store' and frme != 'cali':
list_of_frames.append(create_dir_path(path_to_frame, frme))
list_of_frames.sort()
return list_of_frames
def generate_and_save_cali_compilation_matrix(list_of_path_to_frames, dict_of_path_to_frames, path_to_save_compilation):
'''
This function will take a sorted list of the paths to where the frames are held, i.e.:
*/fluoro/data/activity/patient/laterality
It will also take a dictionary, where the list of paths serve as the keys, and the value is a list corresponding to the frames for a given path to frames.
e.g. */fluoro/data/activity/patient/laterality/frame
It will then generate a matrix of the following dimensions and characteristics:
Rotation matrix and translation vector describing the relative positioning of the two fluoroscopes.
shape = 9 + 3, frames*2.
'''
os.makedirs(path_to_save_compilation, exist_ok=True)
list_of_path_to_frames.sort()
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = 0
for frame_dir1 in sorted(list_of_path_to_frames):
for frme1 in sorted(dict_of_path_to_frames[frame_dir1]):
total_number_of_frames += 1
number_of_frames_to_match = total_number_of_frames * len(list_of_bones)
print('Number of frames to match:\t', number_of_frames_to_match)
calibration_data_len9_mat = np.zeros((number_of_frames_to_match, 12))
calibration_data_len3_mat = np.zeros((number_of_frames_to_match, 6))
ticker = -1
calibration_file = h5py.File(create_dir_path(path_to_save_compilation, 'calibration.h5py'), 'w')
for frame_dir2 in sorted(list_of_path_to_frames):
for frme2 in sorted(dict_of_path_to_frames[frame_dir2]):
print('\n\n', create_dir_path(frame_dir2, frme2))
ticker += 1
temp_cali_data = extract_calibration_data(frame_dir2)
temp_cali_len9_rot = np.reshape(temp_cali_data[0], 9)
temp_cali_len3_rot = np.reshape(Basis2Angles(temp_cali_data[0]), 3)
temp_cali_trans = np.reshape(temp_cali_data[1], 3)
interim_array_holder_len9 = np.hstack((temp_cali_len9_rot, temp_cali_trans))
interim_array_holder_len3 = np.hstack((temp_cali_len3_rot, temp_cali_trans))
calibration_data_len9_mat[2 * ticker:2 * ticker + 2] = np.array([interim_array_holder_len9, interim_array_holder_len9])
calibration_data_len3_mat[2 * ticker:2 * ticker + 2] = np.array([interim_array_holder_len3, interim_array_holder_len3])
cali_9m = calibration_file.create_dataset('cali_len9_rot', data=calibration_data_len9_mat)
cali_3m = calibration_file.create_dataset('cali_len3_rot', data=calibration_data_len3_mat)
calibration_file.close()
return cali_9m, cali_3m
def generate_and_save_image_compilation_matrix(list_of_path_to_frames, dict_of_path_to_frames, path_to_save_compilation):
'''
This function will take a sorted list of the paths to where the frames are held, i.e.:
*/fluoro/data/activity/patient/laterality
It will also take a dictionary, where the list of paths serve as the keys, and the value is a list corresponding to the frames for a given path to frames.
e.g. */fluoro/data/activity/patient/laterality/frame
It will then generate a matrix of the following dimensions and characteristics:
- Matrix of two png images, converted to gray scale and downsized.
shape = frames * 2, 2, 128, 128
'''
os.makedirs(path_to_save_compilation, exist_ok=True)
list_of_path_to_frames.sort()
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = 0
for frame_dir1 in sorted(list_of_path_to_frames):
for frme1 in sorted(dict_of_path_to_frames[frame_dir1]):
total_number_of_frames += 1
number_of_frames_to_match = total_number_of_frames * len(list_of_bones)
print('Number of frames to match:\t', number_of_frames_to_match)
image_data_mat = np.random.rand(number_of_frames_to_match, 2, 128, 128)
ticker = -1
image_file = h5py.File(create_dir_path(path_to_save_compilation, 'images.h5py'), 'w')
for frame_dir2 in sorted(list_of_path_to_frames):
for frme2 in sorted(dict_of_path_to_frames[frame_dir2]):
print('\n\n', create_dir_path(frame_dir2, frme2))
ticker += 1
temp_image_data = extract_image_data(create_dir_path(frame_dir2, frme2))
image_data_mat[2 * ticker:2 * ticker + 2, :, :, :] = temp_image_data
image_dset = image_file.create_dataset('image_dset', data=image_data_mat)
image_file.close()
return image_dset
def generate_and_save_label_compilation_matrix(list_of_path_to_frames, dict_of_path_to_frames, path_to_save_compilation):
'''
This function will take a sorted list of the paths to where the frames are held, i.e.:
*/fluoro/data/activity/patient/laterality
It will also take a dictionary, where the list of paths serve as the keys, and the value is a list corresponding to the frames for a given path to frames.
e.g. */fluoro/data/activity/patient/laterality/frame
It will then generate a matrix of the following dimensions and characteristics:
Labels for each frame; comprised of rotation vector (theta, phi, psi) and translation vector (x,y,z) describing where the local coordinate system of the bone of interest has shifted to.
shape:
total_number_of_frames * 2, 6
'''
os.makedirs(path_to_save_compilation, exist_ok=True)
list_of_path_to_frames.sort()
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = 0
for frame_dir1 in sorted(list_of_path_to_frames):
for frme1 in sorted(dict_of_path_to_frames[frame_dir1]):
total_number_of_frames += 1
print('Total number of frames:\t', total_number_of_frames)
number_of_frames_to_match = total_number_of_frames * len(list_of_bones)
print('Number of frames to match:\t', number_of_frames_to_match)
label_data_mat = np.random.rand(number_of_frames_to_match, 6)
ticker = -1
label_file = h5py.File(create_dir_path(path_to_save_compilation, 'labels.h5py'), 'w')
for frame_dir2 in sorted(list_of_path_to_frames):
for frme2 in sorted(dict_of_path_to_frames[frame_dir2]):
print(create_dir_path(frame_dir2, frme2))
ticker += 1
temp_label_data = extract_labels_rot_trans_femur_tib_data(create_dir_path(frame_dir2, frme2))
label_data_mat[2 * ticker: 2 * ticker + 2, :] = temp_label_data
labels_dset = label_file.create_dataset('labels_dset', data=label_data_mat)
label_file.close()
return labels_dset
def generate_and_save_mesh_voxel_compilation_matrix(list_of_path_to_frames, dict_of_path_to_frames, path_to_save_compilation, storage_file_name=None, save_as_type='uint8'):
'''
This function will take a sorted list of the paths to where the frames are held, i.e.:
*/fluoro/data/activity/patient/laterality
It will also take a dictionary, where the list of paths serve as the keys, and the value is a list corresponding to the frames for a given path to frames.
e.g. */fluoro/data/activity/patient/laterality/frame
It will then generate a matrix of the following dimensions and characteristics:
Matrix of .stl files representing a binary voxel dataset, where the 1 voxels correspond to a mesh vertex being located there. shape = x, y, z, frames*2 . Frames will correspond to the amount of paths to frames there are present, and there will be an additional factor of 2 for both the femur and the tibia.
shape:
total_number_of_frames * 2, x,y,z of the voxelized mesh (in binary, with 1s for location of vertex and 0 when no vertex present)
'''
if not storage_file_name:
storage_file_name = 'voxels'
os.makedirs(path_to_save_compilation, exist_ok=True)
list_of_path_to_frames.sort()
list_of_bones = ['Femur', 'Tibia']
total_number_of_frames = 0
for frame_dir1 in sorted(list_of_path_to_frames):
for frme1 in sorted(dict_of_path_to_frames[frame_dir1]):
total_number_of_frames += 1
print('Total number of frames:\t', total_number_of_frames)
number_of_frames_to_match = total_number_of_frames * len(list_of_bones)
print('Number of frames to match:\t', number_of_frames_to_match)
vox_data_mat = [0] * number_of_frames_to_match
ticker = -1
# vox_file = h5py.File(create_dir_path(path_to_save_compilation, 'voxels.h5py'), 'w')
for frame_dir2 in sorted(list_of_path_to_frames):
temp_vox_data = stl_located(create_dir_path(frame_dir2))
for frme2 in sorted(dict_of_path_to_frames[frame_dir2]):
print(create_dir_path(frame_dir2, frme2))
ticker += 1
# FEMUR
vox_data_mat[2 * ticker] = temp_vox_data[0].astype(save_as_type) # if len(temp_vox_data[0]) != 1 else print('\n' * 50, 'LOOK HERE', '\nticker = ', 2 * ticker, '\n' * 50)
# TIBIA
vox_data_mat[2 * ticker + 1] = temp_vox_data[1].astype(save_as_type) # if len(temp_vox_data[0]) != 1 else print('\n' * 50, 'LOOK HERE', '\nticker = ', 2 * ticker + 1, '\n' * 50)
vox_data_mat = np.array(vox_data_mat)
# vox_dset = vox_file.create_dataset('vox_dset', data=vox_data_mat)
# vox_file.close()
try:
clsd_dset = variable_matrix_storer(vox_data_mat, create_dir_path(path_to_save_compilation, storage_file_name + '.h5py'))
we_tried = clsd_dset
except:
e = sys.exc_info()[0]
print(e)
we_tried = 'Well, we tried'
return we_tried
if __name__ == '__main__':
top_level_dir = '/Users/johndrago/fluoro/data'
sys.path.append(os.getcwd())
# First generate the dictionary of acts, with the corresponding patients who completed those acts
dict_of_act_pts = generate_dict_of_acts_with_patients()
# Next create dict of path to frames and then compile a list of frames in that path
dict_of_frames = generate_dict_path_to_frames(dict_of_act_pts)
# Generate a list of the paths
list_of_path_to_frames = list(dict_of_frames.keys())
# Sort the paths in alphabetical order
list_of_path_to_frames.sort()
# path_to_save_compilation = '/Users/johndrago/fluoro/data/compilation'
path_to_save_compilation = '/Volumes/Seagate/fluoro'
# Generate variable size voxel and save it to disk. First dimension should be total number of instances we will train on later
# attempt1 = generate_and_save_mesh_voxel_compilation_matrix(list_of_path_to_frames=list_of_path_to_frames, dict_of_path_to_frames=dict_of_frames, path_to_save_compilation=path_to_save_compilation, storage_file_name='voxels_mark_origin')
# attempt1 = generate_and_save_mesh_voxel_compilation_matrix(list_of_path_to_frames=list_of_path_to_frames, dict_of_path_to_frames=dict_of_frames, path_to_save_compilation=path_to_save_compilation)
# -----------------------------------------------------------------
# dir_file = open(os.path.join(os.getcwd(), 'vox_fluoro_hist_objects.pkl'), 'wb')
# dir_dict = {}
# dir_dict['top_level_dir'] = top_level_dir
# dir_dict['dict_of_frames'] = dict_of_frames
# dir_dict['list_of_path_to_frames'] = sorted(list_of_path_to_frames)
# dir_dict['path_to_save_compilation'] = path_to_save_compilation
# pickle.dump(dir_dict, dir_file)
# dir_file.close()
# -----------------------------------------------------------------
# frames_per_stl_pair_dict = {}
# indx_tracker = []
# list_ticker = 0
# for stl_loc in sorted(list_of_path_to_frames)[:30]:
# indx_tracker = []
# indx_tracker.append(list_ticker)
# indx_tracker.append(list_ticker + 1)
# # frames_per_stl_pair_dict[stl_loc[-8:-3] + ' - ' +stl_loc[-2:]] = len(dict_of_frames[stl_loc]) * 2
# frames_per_stl_pair_dict[stl_loc[-8:-3] + ' - ' + stl_loc[-2:]] = indx_tracker
# list_ticker = list_ticker + len(dict_of_frames[stl_loc]) * 2
# vox_file = h5py.File('/Users/johndrago/fluoro/data/compilation/voxels_pad.h5py', 'r')
# vox_init = vox_file['vox_dset']
# simple_voxel_graph(vox_init[frames_per_stl_pair_dict['CR 08 - Rt'][0]])
# vox_file.close()
# def voxel_from_array(mesh_vertices, spacing=0.5):
# '''
# This function will take in a matrix of the location of mesh vertices. It will then take the vertices and transform them into a binary voxel data set with a 1 located in the bin if a corresponding point is to be found. It will return the voxelized matrix.
# input:
# mesh_vertices --> expects np.array of locations of mesh vertices
# spacing --> the spacing of the voxels in mm
# output:
# bin_mat --> a binary voxelized matrix wtih 1's corresponding to points with a corresponding vertex
# '''
# mesh_min_vec = np.min(mesh_vertices, axis=0)
# mesh_min_mat = mesh_vertices - mesh_min_vec
# range_vec = mesh_vertices.max(axis=0) - mesh_vertices.min(axis=0)
# bins_vec = np.ceil(range_vec / spacing)
# bin_mat = np.zeros(bins_vec.astype('int32') + 2)
# for indx in range(mesh_vertices.shape[0]):
# # print(int(np.floor(mesh_min_mat[indx, 0] / spacing)))
# # print(int(np.floor(mesh_min_mat[indx, 1] / spacing)))
# # print(int(np.floor(mesh_min_mat[indx, 2] / spacing)))
# # print(type(int(np.floor(mesh_min_mat[indx, 0] / spacing))))
# # print(type(int(np.floor(mesh_min_mat[indx, 1] / spacing))))
# # print(type(int(np.floor(mesh_min_mat[indx, 2] / spacing))))
# bin_mat[int(np.floor(mesh_min_mat[indx, 0] / spacing)):int(np.ceil(mesh_min_mat[indx, 0] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 1] / spacing)):int(np.ceil(mesh_min_mat[indx, 1] / spacing)) + 1, int(np.floor(mesh_min_mat[indx, 2] / spacing)):int(np.ceil(mesh_min_mat[indx, 2] / spacing)) + 1] = 1
# return bin_mat.astype('int8')
| Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.