repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
brainiak | brainiak-master/tests/matnormal/test_matnormal_logp_conditional.py | import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import wishart, multivariate_normal
import tensorflow as tf
from brainiak.matnormal.utils import rmn
from brainiak.matnormal.matnormal_likelihoods import (
matnorm_logp_conditional_col,
matnorm_logp_conditional_row,
)
from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky
# X is m x n, so A sould be m x p
m = 5
n = 4
p = 3
rtol = 1e-7
def test_against_scipy_mvn_row_conditional(seeded_rng):
# have to be careful for constructing everything as a submatrix of a big
# PSD matrix, else no guarantee that anything's invertible.
cov_np = wishart.rvs(df=m + p + 2, scale=np.eye(m + p))
# rowcov = CovConstant(cov_np[0:m, 0:m])
rowcov = CovUnconstrainedCholesky(Sigma=cov_np[0:m, 0:m])
A = cov_np[0:m, m:]
colcov = CovIdentity(size=n)
Q = CovUnconstrainedCholesky(Sigma=cov_np[m:, m:])
X = rmn(np.eye(m), np.eye(n))
A_tf = tf.constant(A, "float64")
X_tf = tf.constant(X, "float64")
Q_np = Q._cov
rowcov_np = rowcov._cov - A.dot(np.linalg.inv(Q_np)).dot((A.T))
scipy_answer = np.sum(multivariate_normal.logpdf(
X.T, np.zeros([m]), rowcov_np))
tf_answer = matnorm_logp_conditional_row(X_tf, rowcov, colcov, A_tf, Q)
assert_allclose(scipy_answer, tf_answer, rtol=rtol)
def test_against_scipy_mvn_col_conditional(seeded_rng):
# have to be careful for constructing everything as a submatrix of a big
# PSD matrix, else no guarantee that anything's invertible.
cov_np = wishart.rvs(df=m + p + 2, scale=np.eye(m + p))
rowcov = CovIdentity(size=m)
colcov = CovUnconstrainedCholesky(Sigma=cov_np[0:n, 0:n])
A = cov_np[n:, 0:n]
Q = CovUnconstrainedCholesky(Sigma=cov_np[n:, n:])
X = rmn(np.eye(m), np.eye(n))
A_tf = tf.constant(A, "float64")
X_tf = tf.constant(X, "float64")
Q_np = Q._cov
colcov_np = colcov._cov - A.T.dot(np.linalg.inv(Q_np)).dot((A))
scipy_answer = np.sum(multivariate_normal.logpdf(
X, np.zeros([n]), colcov_np))
tf_answer = matnorm_logp_conditional_col(X_tf, rowcov, colcov, A_tf, Q)
assert_allclose(scipy_answer, tf_answer, rtol=rtol)
| 2,215 | 27.050633 | 76 | py |
brainiak | brainiak-master/tests/matnormal/test_matnormal_regression.py | import pytest
import numpy as np
from scipy.stats import norm, wishart, pearsonr
from brainiak.matnormal.covs import (
CovIdentity,
CovUnconstrainedCholesky,
CovUnconstrainedInvCholesky,
CovDiagonal,
)
from brainiak.matnormal.regression import MatnormalRegression
from brainiak.matnormal.utils import rmn
m = 100
n = 4
p = 5
corrtol = 0.8 # at least this much correlation between true and est to pass
def test_matnorm_regression_unconstrained(seeded_rng):
# Y = XB + eps
# Y is m x p, B is n x p, eps is m x p
X = norm.rvs(size=(m, n))
B = norm.rvs(size=(n, p))
Y_hat = X.dot(B)
rowcov_true = np.eye(m)
colcov_true = wishart.rvs(p + 2, np.eye(p))
Y = Y_hat + rmn(rowcov_true, colcov_true)
row_cov = CovIdentity(size=m)
col_cov = CovUnconstrainedCholesky(size=p)
model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov)
model.fit(X, Y, naive_init=False)
assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol
pred_y = model.predict(X)
assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol
model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov)
model.fit(X, Y, naive_init=True)
assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol
pred_y = model.predict(X)
assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol
def test_matnorm_regression_unconstrainedprec(seeded_rng):
# Y = XB + eps
# Y is m x n, B is n x p, eps is m x p
X = norm.rvs(size=(m, n))
B = norm.rvs(size=(n, p))
Y_hat = X.dot(B)
rowcov_true = np.eye(m)
colcov_true = wishart.rvs(p + 2, np.eye(p))
Y = Y_hat + rmn(rowcov_true, colcov_true)
row_cov = CovIdentity(size=m)
col_cov = CovUnconstrainedInvCholesky(size=p)
model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov)
model.fit(X, Y, naive_init=False)
assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol
pred_y = model.predict(X)
assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol
def test_matnorm_regression_optimizerChoice(seeded_rng):
# Y = XB + eps
# Y is m x n, B is n x p, eps is m x p
X = norm.rvs(size=(m, n))
B = norm.rvs(size=(n, p))
Y_hat = X.dot(B)
rowcov_true = np.eye(m)
colcov_true = wishart.rvs(p + 2, np.eye(p))
Y = Y_hat + rmn(rowcov_true, colcov_true)
row_cov = CovIdentity(size=m)
col_cov = CovUnconstrainedInvCholesky(size=p)
model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov,
optimizer="CG")
model.fit(X, Y, naive_init=False)
assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol
pred_y = model.predict(X)
assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol
def test_matnorm_regression_scaledDiag(seeded_rng):
# Y = XB + eps
# Y is m x n, B is n x p, eps is m x p
X = norm.rvs(size=(m, n))
B = norm.rvs(size=(n, p))
Y_hat = X.dot(B)
rowcov_true = np.eye(m)
colcov_true = np.diag(np.abs(norm.rvs(size=p)))
Y = Y_hat + rmn(rowcov_true, colcov_true)
row_cov = CovIdentity(size=m)
col_cov = CovDiagonal(size=p)
model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov)
model.fit(X, Y, naive_init=False)
assert pearsonr(B.flatten(), model.beta_.flatten())[0] >= corrtol
pred_y = model.predict(X)
assert pearsonr(pred_y.flatten(), Y_hat.flatten())[0] >= corrtol
# we only do calibration test on the scaled diag
# model because to hit corrtol on unconstrainedCov
# we'd need a lot more data, which would make the test slow
X_hat = model.calibrate(Y)
assert pearsonr(X_hat.flatten(), X.flatten())[0] >= corrtol
def test_matnorm_calibration_raises(seeded_rng):
# Y = XB + eps
# Y is m x n, B is n x p, eps is m x p
X = norm.rvs(size=(2, 5))
B = norm.rvs(size=(5, 3))
Y_hat = X.dot(B)
rowcov_true = np.eye(2)
colcov_true = np.diag(np.abs(norm.rvs(size=3)))
Y = Y_hat + rmn(rowcov_true, colcov_true)
row_cov = CovIdentity(size=2)
col_cov = CovDiagonal(size=3)
model = MatnormalRegression(time_cov=row_cov, space_cov=col_cov)
model.fit(X, Y, naive_init=False)
with pytest.raises(RuntimeError):
model.calibrate(Y)
| 4,328 | 26.05625 | 76 | py |
brainiak | brainiak-master/tests/matnormal/test_matnormal_utils.py | from brainiak.matnormal.utils import (pack_trainable_vars,
unpack_trainable_vars,
flatten_cholesky_unique,
unflatten_cholesky_unique)
import tensorflow as tf
import numpy as np
import numpy.testing as npt
def test_pack_unpack(seeded_rng):
shapes = [[2, 3], [3], [3, 4, 2], [1, 5]]
mats = [tf.random.stateless_normal(
shape=shape, seed=[0, 0]) for shape in shapes]
flatmats = pack_trainable_vars(mats)
unflatmats = unpack_trainable_vars(flatmats, mats)
for mat_in, mat_out in zip(mats, unflatmats):
assert tf.math.reduce_all(tf.equal(mat_in, mat_out))
def test_cholesky_uncholesky(seeded_rng):
size = 3
flat_chol_length = (size*(size+1))//2
flatchol = np.random.normal(size=[flat_chol_length])
unflatchol = unflatten_cholesky_unique(flatchol)
npt.assert_equal(unflatchol.shape, [3, 3])
reflatchol = flatten_cholesky_unique(unflatchol)
npt.assert_allclose(flatchol, reflatchol)
| 1,059 | 35.551724 | 64 | py |
brainiak | brainiak-master/tests/matnormal/test_matnormal_rsa.py | from brainiak.matnormal.mnrsa import MNRSA
from brainiak.utils.utils import cov2corr
from brainiak.matnormal.covs import CovIdentity, CovDiagonal
from scipy.stats import norm
from brainiak.matnormal.utils import rmn
import numpy as np
def gen_U_nips2016_example():
n_C = 16
U = np.zeros([n_C, n_C])
U = np.eye(n_C) * 0.6
U[8:12, 8:12] = 0.8
for cond in range(8, 12):
U[cond, cond] = 1
return U
def gen_brsa_data_matnorm_model(U, n_T, n_V, space_cov, time_cov, n_nureg):
n_C = U.shape[0]
beta = rmn(U, space_cov)
X = rmn(np.eye(n_T), np.eye(n_C))
beta_0 = rmn(np.eye(n_nureg), space_cov)
X_0 = rmn(np.eye(n_T), np.eye(n_nureg))
Y_hat = X.dot(beta) + X_0.dot(beta_0)
Y = Y_hat + rmn(time_cov, space_cov)
sizes = {"n_C": n_C, "n_T": n_T, "n_V": n_V}
train = {"beta": beta, "X": X, "Y": Y, "U": U, "X_0": X_0}
return train, sizes
def test_brsa_rudimentary(seeded_rng):
"""this test is super loose"""
# this is Mingbo's synth example from the paper
U = gen_U_nips2016_example()
n_T = 150
n_V = 250
n_nureg = 5
spacecov_true = np.eye(n_V)
timecov_true = np.diag(np.abs(norm.rvs(size=(n_T))))
tr, sz = gen_brsa_data_matnorm_model(
U,
n_T=n_T,
n_V=n_V,
n_nureg=n_nureg,
space_cov=spacecov_true,
time_cov=timecov_true,
)
spacecov_model = CovIdentity(size=n_V)
timecov_model = CovDiagonal(size=n_T)
model_matnorm = MNRSA(time_cov=timecov_model, space_cov=spacecov_model)
model_matnorm.fit(tr["Y"], tr["X"], naive_init=False)
RMSE = np.mean((model_matnorm.C_ - cov2corr(tr["U"])) ** 2) ** 0.5
assert RMSE < 0.1
model_matnorm = MNRSA(time_cov=timecov_model, space_cov=spacecov_model)
model_matnorm.fit(tr["Y"], tr["X"], naive_init=True)
RMSE = np.mean((model_matnorm.C_ - cov2corr(tr["U"])) ** 2) ** 0.5
assert RMSE < 0.1
| 1,934 | 24.12987 | 75 | py |
brainiak | brainiak-master/tests/factoranalysis/test_htfa.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_R():
from brainiak.factoranalysis.htfa import HTFA
with pytest.raises(TypeError) as excinfo:
HTFA()
assert "missing 2 required positional arguments" in str(excinfo.value)
def test_X():
from brainiak.factoranalysis.htfa import HTFA
import numpy as np
n_voxel = 100
n_tr = 20
K = 5
max_global_iter = 3
max_local_iter = 3
max_voxel = n_voxel
max_tr = n_tr
R = []
n_subj = 2
for s in np.arange(n_subj):
R.append(np.random.randint(2, high=102, size=(n_voxel, 3)))
htfa = HTFA(
K,
n_subj=n_subj,
max_global_iter=max_global_iter,
max_local_iter=max_local_iter,
max_voxel=max_voxel,
max_tr=max_tr)
X = np.random.rand(n_voxel, n_tr)
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "Input data should be a list" in str(excinfo.value)
X = []
# Check that does NOT run with wrong array dimension
with pytest.raises(ValueError) as excinfo:
htfa.fit(X, R=R)
assert "Need at leat one subject to train the model" in str(excinfo.value)
X = []
X.append([1, 2, 3])
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "data should be an array" in str(excinfo.value)
X = []
X.append(np.random.rand(n_voxel))
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "subject data should be 2D array" in str(excinfo.value)
X = []
for s in np.arange(n_subj):
X.append(np.random.rand(n_voxel, n_tr))
R = np.random.randint(2, high=102, size=(n_voxel, 3))
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "Coordinates should be a list" in str(excinfo.value)
R = []
R.append([1, 2, 3])
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert ("Each scanner coordinate matrix should be an array"
in str(excinfo.value))
R = []
R.append(np.random.rand(n_voxel))
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert ("Each scanner coordinate matrix should be 2D array"
in str(excinfo.value))
R = []
for s in np.arange(n_subj):
R.append(np.random.rand(n_voxel - 1, 3))
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert ("n_voxel should be the same in X[idx] and R[idx]"
in str(excinfo.value))
def test_can_run():
import numpy as np
from brainiak.factoranalysis.htfa import HTFA
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
n_voxel = 100
n_tr = 20
K = 5
max_global_iter = 3
max_local_iter = 3
max_voxel = n_voxel
max_tr = n_tr
R = []
n_subj = 2
for s in np.arange(n_subj):
R.append(np.random.randint(2, high=102, size=(n_voxel, 3)))
my_R = []
for idx in np.arange(n_subj):
if idx % size == rank:
my_R.append(R[idx])
htfa = HTFA(
K,
n_subj=n_subj,
max_global_iter=max_global_iter,
max_local_iter=max_local_iter,
max_voxel=max_voxel,
max_tr=max_tr,
verbose=True)
assert htfa, "Invalid HTFA instance!"
X = []
for s in np.arange(n_subj):
X.append(np.random.rand(n_voxel, n_tr))
my_data = []
for idx in np.arange(n_subj):
if idx % size == rank:
my_data.append(X[idx])
if rank == 0:
htfa.fit(my_data, R=my_R)
assert True, "Root successfully running HTFA"
assert htfa.global_prior_.shape[0] == htfa.prior_bcast_size,\
"Invalid result of HTFA! (wrong # element in global_prior)"
assert htfa.global_posterior_.shape[0] == htfa.prior_bcast_size,\
"Invalid result of HTFA! (wrong # element in global_posterior)"
else:
htfa.fit(my_data, R=my_R)
assert True, "worker successfully running HTFA"
print(htfa.local_weights_.shape)
assert htfa.local_weights_.shape[0] == n_tr * K,\
"Invalid result of HTFA! (wrong # element in local_weights)"
assert htfa.local_posterior_.shape[0] == htfa.prior_size,\
"Invalid result of HTFA! (wrong # element in local_posterior)"
| 5,283 | 30.266272 | 78 | py |
brainiak | brainiak-master/tests/factoranalysis/test_tfa.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_tfa():
from brainiak.factoranalysis.tfa import TFA
import numpy as np
n_voxel = 100
n_tr = 20
K = 5
max_iter = 5
max_num_voxel = n_voxel
max_num_tr = n_tr
tfa = TFA(
K=K,
max_iter=max_iter,
verbose=True,
max_num_voxel=max_num_voxel,
max_num_tr=max_num_tr)
assert tfa, "Invalid TFA instance!"
R = np.random.randint(2, high=102, size=(n_voxel, 3))
X = [1, 2, 3]
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
tfa.fit(X, R=R)
assert "Input data should be an array" in str(excinfo.value)
X = np.random.rand(n_voxel)
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
tfa.fit(X, R=R)
assert "Input data should be 2D array" in str(excinfo.value)
X = np.random.rand(n_voxel, n_tr)
R = [1, 2, 3]
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
tfa.fit(X, R=R)
assert "coordinate matrix should be an array" in str(excinfo.value)
R = np.random.rand(n_voxel)
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
tfa.fit(X, R=R)
assert "coordinate matrix should be 2D array" in str(excinfo.value)
R = np.random.randint(2, high=102, size=(n_voxel - 1, 3))
# Check that does NOT run if n_voxel in X and R does not match
with pytest.raises(TypeError) as excinfo:
tfa.fit(X, R=R)
assert "The number of voxels should be the same in X and R" in str(
excinfo.value)
R = np.random.randint(2, high=102, size=(n_voxel, 3))
tfa.fit(X, R=R)
assert True, "Success running TFA with one subject!"
posterior_size = K * (tfa.n_dim + 1)
assert tfa.local_posterior_.shape[
0] == posterior_size,\
"Invalid result of TFA! (wrong # element in local_posterior)"
weight_method = 'ols'
tfa = TFA(
weight_method=weight_method,
K=K,
max_iter=max_iter,
verbose=True,
max_num_voxel=max_num_voxel,
max_num_tr=max_num_tr)
assert tfa, "Invalid TFA instance!"
X = np.random.rand(n_voxel, n_tr)
tfa.fit(X, R=R)
assert True, "Success running TFA with one subject!"
template_prior, _, _ = tfa.get_template(R)
tfa.set_K(K)
tfa.set_seed(200)
tfa.fit(X, R=R, template_prior=template_prior)
assert True, "Success running TFA with one subject and template prior!"
assert tfa.local_posterior_.shape[
0] == posterior_size,\
"Invalid result of TFA! (wrong # element in local_posterior)"
weight_method = 'odd'
tfa = TFA(
weight_method=weight_method,
K=K,
max_iter=max_iter,
verbose=True,
max_num_voxel=max_num_voxel,
max_num_tr=max_num_tr)
with pytest.raises(ValueError) as excinfo:
tfa.fit(X, R=R)
assert "'rr' and 'ols' are accepted as weight_method!" in str(
excinfo.value)
| 3,661 | 31.990991 | 75 | py |
brainiak | brainiak-master/tests/eventseg/test_event.py | from brainiak.eventseg.event import EventSegment
from scipy.special import comb
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
def test_create_event_segmentation():
es = EventSegment(5)
assert es, "Invalid EventSegment instance"
def test_fit_shapes():
K = 5
V = 3
T = 10
es = EventSegment(K, n_iter=2)
sample_data = np.random.rand(V, T)
es.fit(sample_data.T)
assert es.segments_[0].shape == (T, K), "Segmentation from fit " \
"has incorrect shape"
assert np.isclose(np.sum(es.segments_[0], axis=1), np.ones(T)).all(), \
"Segmentation from learn_events not correctly normalized"
T2 = 15
sample_data2 = np.random.rand(V, T2)
test_segments, test_ll = es.find_events(sample_data2.T)
assert test_segments.shape == (T2, K), "Segmentation from find_events " \
"has incorrect shape"
assert np.isclose(np.sum(test_segments, axis=1), np.ones(T2)).all(), \
"Segmentation from find_events not correctly normalized"
es_invalid = EventSegment(K)
with pytest.raises(ValueError):
es_invalid.model_prior(K-1)
# ``with`` block is about to end with no error.
pytest.fail("T < K should cause error")
with pytest.raises(ValueError):
es_invalid.set_event_patterns(np.zeros((V, K-1)))
pytest.fail("#Events < K should cause error")
def test_simple_boundary():
es = EventSegment(2)
random_state = np.random.RandomState(0)
sample_data = np.array([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]]) + \
random_state.rand(2, 7) * 10
es.fit(sample_data.T)
events = np.argmax(es.segments_[0], axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly segment two events"
events_predict = es.predict(sample_data.T)
assert np.array_equal(events_predict, [0, 0, 0, 1, 1, 1, 1]), \
"Error in predict interface"
def test_event_transfer():
es = EventSegment(2)
sample_data = np.asarray([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]])
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T)[0]
pytest.fail("Should need to set variance")
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
pytest.fail("Should need to set patterns")
es.set_event_patterns(np.asarray([[1, 0], [0, 1]]))
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
events = np.argmax(seg, axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly transfer two events to new data"
def test_weighted_var():
es = EventSegment(2)
D = np.zeros((8, 4))
for t in range(4):
D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
for t in range(4, 8):
D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
mean_pat = D[[0, 4], :].T
weights = np.zeros((8, 2))
weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
assert np.array_equal(
es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
"Failed to compute variance with 0/1 weights"
weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
assert np.allclose(
es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
"Failed to compute variance with fractional weights"
def test_sym():
es = EventSegment(4)
evpat = np.repeat(np.arange(10).reshape(-1, 1), 4, axis=1)
es.set_event_patterns(evpat)
D = np.repeat(np.arange(10).reshape(1, -1), 20, axis=0)
ev = es.find_events(D, var=1)[0]
# Check that events 1-4 and 2-3 are symmetric
assert np.all(np.isclose(ev[:, :2], np.fliplr(np.flipud(ev[:, 2:])))),\
"Fit with constant data is not symmetric"
def test_chains():
es = EventSegment(5, event_chains=np.array(['A', 'A', 'B', 'B', 'B']))
sample_data = np.array([[0, 0, 0], [1, 1, 1]])
with pytest.raises(RuntimeError):
seg = es.fit(sample_data.T)[0]
pytest.fail("Can't use fit() with event chains")
es.set_event_patterns(np.array([[1, 1, 0, 0, 0],
[0, 0, 1, 1, 1]]))
seg = es.find_events(sample_data.T, 0.1)[0]
ev = np.nonzero(seg > 0.99)[1]
assert np.array_equal(ev, [2, 3, 4]),\
"Failed to fit with multiple chains"
def test_prior():
K = 10
T = 100
es = EventSegment(K)
mp = es.model_prior(T)[0]
p_bound = np.zeros((T, K-1))
norm = comb(T-1, K-1)
for t in range(T-1):
for k in range(K-1):
# See supplementary material of Neuron paper
# https://doi.org/10.1016/j.neuron.2017.06.041
p_bound[t+1, k] = comb(t, k) * comb(T-t-2, K-k-2) / norm
p_bound = np.cumsum(p_bound, axis=0)
mp_gt = np.zeros((T, K))
for k in range(K):
if k == 0:
mp_gt[:, k] = 1 - p_bound[:, 0]
elif k == K - 1:
mp_gt[:, k] = p_bound[:, k-1]
else:
mp_gt[:, k] = p_bound[:, k-1] - p_bound[:, k]
assert np.all(np.isclose(mp, mp_gt)),\
"Prior does not match analytic solution"
def test_split_merge():
ev = np.array(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4])
random_state = np.random.RandomState(0)
ev_pat = random_state.rand(5, 10)
D = np.zeros((len(ev), 10))
for t in range(len(ev)):
D[t, :] = ev_pat[ev[t], :] + 0.1*random_state.rand(10)
hmm_sm = EventSegment(5, split_merge=True, split_merge_proposals=2)
hmm_sm.fit(D)
hmm_events = np.argmax(hmm_sm.segments_[0], axis=1)
assert np.all(ev == hmm_events),\
"Merge/split fails to find highly uneven events"
def test_sym_ll():
ev = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2])
random_state = np.random.RandomState(0)
ev_pat = random_state.rand(3, 10)
D_forward = np.zeros((len(ev), 10))
for t in range(len(ev)):
D_forward[t, :] = ev_pat[ev[t], :] + 0.1 * random_state.rand(10)
D_backward = np.flip(D_forward, axis=0)
hmm_forward = EventSegment(3)
hmm_forward.set_event_patterns(ev_pat.T)
_, ll_forward = hmm_forward.find_events(D_forward, var=1)
hmm_backward = EventSegment(3)
hmm_backward.set_event_patterns(np.flip(ev_pat.T, axis=1))
_, ll_backward = hmm_backward.find_events(D_backward, var=1)
assert (ll_forward == ll_backward),\
"Log-likelihood not symmetric forward/backward"
| 6,744 | 32.063725 | 78 | py |
brainiak | brainiak-master/tests/fcma/test_mvpa_voxel_selection.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import conftest
from brainiak.fcma.mvpa_voxelselector import MVPAVoxelSelector
from brainiak.searchlight.searchlight import Searchlight
from sklearn import svm
import numpy as np
from mpi4py import MPI
from numpy.random import RandomState
# specify the random state to fix the random numbers
prng = RandomState(1234567890)
@conftest.skip_non_fork
def test_mvpa_voxel_selection():
data = prng.rand(5, 5, 5, 8).astype(np.float32)
# all MPI processes read the mask; the mask file is small
mask = np.ones([5, 5, 5], dtype=np.bool)
mask[0, 0, :] = False
labels = [0, 1, 0, 1, 0, 1, 0, 1]
# 2 subjects, 4 epochs per subject
sl = Searchlight(sl_rad=1)
mvs = MVPAVoxelSelector(data, mask, labels, 2, sl)
# for cross validation, use SVM with precomputed kernel
clf = svm.SVC(kernel='rbf', C=10, gamma='auto')
result_volume, results = mvs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = []
for tuple in results:
if tuple[1] > 0:
output.append(int(8*tuple[1]))
expected_output = [6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4,
4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 1]
assert np.allclose(output, expected_output, atol=1), \
'voxel selection via SVM does not provide correct results'
if __name__ == '__main__':
test_mvpa_voxel_selection()
| 1,972 | 34.872727 | 75 | py |
brainiak | brainiak-master/tests/fcma/test_util.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.random import RandomState
from brainiak.fcma.util import compute_correlation
# specify the random state to fix the random numbers
prng = RandomState(1234567890)
def test_correlation_computation():
row1 = 5
col = 10
row2 = 6
mat1 = prng.rand(row1, col).astype(np.float32)
mat2 = prng.rand(row2, col).astype(np.float32)
corr = compute_correlation(mat1, mat1)
expected_corr = np.corrcoef(mat1)
assert np.allclose(corr, expected_corr, atol=1e-5), (
"high performance correlation computation does not provide correct "
"correlation results within the same set")
corr = compute_correlation(mat1, mat2)
mat = np.concatenate((mat1, mat2), axis=0)
expected_corr = np.corrcoef(mat)[0:row1, row1:]
assert np.allclose(corr, expected_corr, atol=1e-5), (
"high performance correlation computation does not provide correct "
"correlation results between two sets")
def test_correlation_nans():
row1 = 5
col = 10
row2 = 6
mat1 = prng.rand(row1, col).astype(np.float32)
mat2 = prng.rand(row2, col).astype(np.float32)
mat1[0, 0] = np.nan
corr = compute_correlation(mat1, mat2, return_nans=False)
assert np.all(corr == 0, axis=1)[0]
assert np.sum(corr == 0) == row2
corr = compute_correlation(mat1, mat2, return_nans=True)
assert np.all(np.isnan(corr), axis=1)[0]
assert np.sum(np.isnan(corr)) == row2
if __name__ == '__main__':
test_correlation_computation()
test_correlation_nans()
| 2,131 | 34.533333 | 76 | py |
brainiak | brainiak-master/tests/fcma/test_classification.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.classifier import Classifier
from scipy.stats.mstats import zscore
from sklearn import svm
from sklearn.linear_model import LogisticRegression
import numpy as np
import math
from numpy.random import RandomState
from scipy.spatial.distance import hamming
# specify the random state to fix the random numbers
prng = RandomState(1234567890)
def create_epoch(idx, num_voxels):
row = 12
col = num_voxels
mat = prng.rand(row, col).astype(np.float32)
# impose a pattern to even epochs
if idx % 2 == 0:
mat = np.sort(mat, axis=0)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(mat.shape[0])
return mat
def test_classification():
fake_raw_data = [create_epoch(i, 5) for i in range(20)]
labels = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# 5 subjects, 4 epochs per subject
epochs_per_subj = 4
# svm
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
training_data = fake_raw_data[0:12]
clf = Classifier(svm_clf, epochs_per_subj=epochs_per_subj)
clf.fit(list(zip(training_data, training_data)), labels[0:12])
expected_confidence = np.array([-1.18234421, 0.97403604, -1.04005679,
0.92403019, -0.95567738, 1.11746593,
-0.83275891, 0.9486868])
recomputed_confidence = clf.decision_function(list(zip(
fake_raw_data[12:], fake_raw_data[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(recomputed_confidence)
) * expected_confidence.size
assert hamming_distance <= 1, \
'decision function of SVM with recomputation ' \
'does not provide correct results'
y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data[12:])))
expected_output = [0, 0, 0, 1, 0, 1, 0, 1]
hamming_distance = hamming(y_pred, expected_output) * len(y_pred)
assert hamming_distance <= 1, \
'classification via SVM does not provide correct results'
confidence = clf.decision_function(list(zip(fake_raw_data[12:],
fake_raw_data[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(confidence)
) * confidence.size
assert hamming_distance <= 1, \
'decision function of SVM without recomputation ' \
'does not provide correct results'
y = [0, 1, 0, 1, 0, 1, 0, 1]
score = clf.score(list(zip(fake_raw_data[12:], fake_raw_data[12:])), y)
assert np.isclose([hamming(y_pred, y)], [1-score])[0], \
'the prediction score is incorrect'
# svm with partial similarity matrix computation
clf = Classifier(svm_clf, num_processed_voxels=2,
epochs_per_subj=epochs_per_subj)
clf.fit(list(zip(fake_raw_data, fake_raw_data)),
labels,
num_training_samples=12)
y_pred = clf.predict()
expected_output = [0, 0, 0, 1, 0, 1, 0, 1]
hamming_distance = hamming(y_pred, expected_output) * len(y_pred)
assert hamming_distance <= 1, \
'classification via SVM (partial sim) does not ' \
'provide correct results'
confidence = clf.decision_function()
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(confidence)) * confidence.size
assert hamming_distance <= 1, \
'decision function of SVM (partial sim) without recomputation ' \
'does not provide correct results'
# logistic regression
lr_clf = LogisticRegression()
clf = Classifier(lr_clf, epochs_per_subj=epochs_per_subj)
clf.fit(list(zip(training_data, training_data)), labels[0:12])
expected_confidence = np.array([-4.49666484, 3.73025553, -4.04181695,
3.73027436, -3.77043872, 4.42613412,
-3.35616616, 3.77716609])
recomputed_confidence = clf.decision_function(list(zip(
fake_raw_data[12:], fake_raw_data[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(recomputed_confidence)
) * expected_confidence.size
assert hamming_distance <= 1, \
'decision function of logistic regression with recomputation ' \
'does not provide correct results'
y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data[12:])))
expected_output = [0, 0, 0, 1, 0, 1, 0, 1]
hamming_distance = hamming(y_pred, expected_output) * len(y_pred)
assert hamming_distance <= 1, \
'classification via logistic regression ' \
'does not provide correct results'
confidence = clf.decision_function(list(zip(
fake_raw_data[12:], fake_raw_data[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(confidence)
) * confidence.size
assert hamming_distance <= 1, \
'decision function of logistic regression without precomputation ' \
'does not provide correct results'
def test_classification_with_two_components():
fake_raw_data = [create_epoch(i, 5) for i in range(20)]
fake_raw_data2 = [create_epoch(i, 6) for i in range(20)]
labels = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# 5 subjects, 4 epochs per subject
epochs_per_subj = 4
# svm
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
training_data = fake_raw_data[0: 12]
training_data2 = fake_raw_data2[0: 12]
clf = Classifier(svm_clf, epochs_per_subj=epochs_per_subj)
clf.fit(list(zip(training_data, training_data2)), labels[0:12])
expected_confidence = np.array([-1.23311606, 1.02440964, -0.93898336,
1.07028798, -1.04420007, 0.97647772,
-1.0498268, 1.04970111])
recomputed_confidence = clf.decision_function(list(zip(
fake_raw_data[12:], fake_raw_data2[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(recomputed_confidence)
) * expected_confidence.size
assert hamming_distance <= 1, \
'decision function of SVM with recomputation ' \
'does not provide correct results'
y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data2[12:])))
expected_output = [0, 1, 0, 1, 0, 1, 0, 1]
hamming_distance = hamming(y_pred, expected_output) * len(y_pred)
assert hamming_distance <= 1, \
'classification via SVM does not provide correct results'
confidence = clf.decision_function(list(zip(
fake_raw_data[12:], fake_raw_data2[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(confidence)) * confidence.size
assert hamming_distance <= 1, \
'decision function of SVM without recomputation ' \
'does not provide correct results'
y = [0, 1, 0, 1, 0, 1, 0, 1]
score = clf.score(list(zip(fake_raw_data[12:], fake_raw_data2[12:])), y)
assert np.isclose([hamming(y_pred, y)], [1-score])[0], \
'the prediction score is incorrect'
# svm with partial similarity matrix computation
clf = Classifier(svm_clf, num_processed_voxels=2,
epochs_per_subj=epochs_per_subj)
clf.fit(list(zip(fake_raw_data, fake_raw_data2)),
labels,
num_training_samples=12)
y_pred = clf.predict()
expected_output = [0, 1, 0, 1, 0, 1, 0, 1]
hamming_distance = hamming(y_pred, expected_output) * len(y_pred)
assert hamming_distance <= 1, \
'classification via SVM (partial sim) does not ' \
'provide correct results'
confidence = clf.decision_function()
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(confidence)) * confidence.size
assert hamming_distance <= 1, \
'decision function of SVM (partial sim) without recomputation ' \
'does not provide correct results'
# logistic regression
lr_clf = LogisticRegression()
clf = Classifier(lr_clf, epochs_per_subj=epochs_per_subj)
# specifying num_training_samples is for coverage
clf.fit(list(zip(training_data, training_data2)),
labels[0:12],
num_training_samples=12)
expected_confidence = np.array([-4.90819848, 4.22548132, -3.76255726,
4.46505975, -4.19933099, 4.08313584,
-4.23070437, 4.31779758])
recomputed_confidence = clf.decision_function(list(zip(
fake_raw_data[12:], fake_raw_data2[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(recomputed_confidence)
) * expected_confidence.size
assert hamming_distance <= 1, \
'decision function of logistic regression with recomputation ' \
'does not provide correct results'
y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data2[12:])))
expected_output = [0, 1, 0, 1, 0, 1, 0, 1]
hamming_distance = hamming(y_pred, expected_output) * len(y_pred)
assert hamming_distance <= 1, \
'classification via logistic regression ' \
'does not provide correct results'
confidence = clf.decision_function(list(zip(fake_raw_data[12:],
fake_raw_data2[12:])))
hamming_distance = hamming(np.sign(expected_confidence),
np.sign(confidence)) * confidence.size
assert hamming_distance <= 1, \
'decision function of logistic regression without precomputation ' \
'does not provide correct results'
if __name__ == '__main__':
test_classification()
test_classification_with_two_components()
| 10,698 | 46.977578 | 79 | py |
brainiak | brainiak-master/tests/fcma/test_preprocessing.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import numpy as np
from brainiak.fcma.preprocessing import (prepare_fcma_data, prepare_mvpa_data,
prepare_searchlight_mvpa_data)
from brainiak import io
data_dir = Path(__file__).parents[1] / 'io' / 'data'
expected_dir = Path(__file__).parent / 'data'
suffix = 'bet.nii.gz'
mask_file = data_dir / 'mask.nii.gz'
epoch_file = data_dir / 'epoch_labels.npy'
expected_labels = np.array([0, 1, 0, 1])
def test_prepare_fcma_data():
images = io.load_images_from_dir(data_dir, suffix=suffix)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
expected_raw_data = np.load(expected_dir / 'expected_raw_data.npy')
assert len(raw_data) == len(expected_raw_data), \
'numbers of epochs do not match in test_prepare_fcma_data'
for idx in range(len(raw_data)):
assert np.allclose(raw_data[idx], expected_raw_data[idx]), \
'raw data do not match in test_prepare_fcma_data'
assert np.array_equal(labels, expected_labels), \
'the labels do not match in test_prepare_fcma_data'
from brainiak.fcma.preprocessing import RandomType
images = io.load_images_from_dir(data_dir, suffix=suffix)
random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
random=RandomType.REPRODUCIBLE)
assert len(random_raw_data) == len(expected_raw_data), \
'numbers of epochs do not match in test_prepare_fcma_data'
images = io.load_images_from_dir(data_dir, suffix=suffix)
random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
random=RandomType.UNREPRODUCIBLE)
assert len(random_raw_data) == len(expected_raw_data), \
'numbers of epochs do not match in test_prepare_fcma_data'
def test_prepare_mvpa_data():
images = io.load_images_from_dir(data_dir, suffix=suffix)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
processed_data, labels = prepare_mvpa_data(images, conditions, mask)
expected_processed_data = np.load(expected_dir
/ 'expected_processed_data.npy')
assert len(processed_data) == len(expected_processed_data), \
'numbers of epochs do not match in test_prepare_mvpa_data'
for idx in range(len(processed_data)):
assert np.allclose(processed_data[idx],
expected_processed_data[idx]), (
'raw data do not match in test_prepare_mvpa_data')
assert np.array_equal(labels, expected_labels), \
'the labels do not match in test_prepare_mvpa_data'
def test_prepare_searchlight_mvpa_data():
images = io.load_images_from_dir(data_dir, suffix=suffix)
conditions = io.load_labels(epoch_file)
processed_data, labels = prepare_searchlight_mvpa_data(images,
conditions)
expected_searchlight_processed_data = np.load(
expected_dir / 'expected_searchlight_processed_data.npy')
for idx in range(len(processed_data)):
assert np.allclose(processed_data[idx],
expected_searchlight_processed_data[idx]), (
'raw data do not match in test_prepare_searchlight_mvpa_data')
assert np.array_equal(labels, expected_labels), \
'the labels do not match in test_prepare_searchlight_mvpa_data'
from brainiak.fcma.preprocessing import RandomType
images = io.load_images_from_dir(data_dir, suffix=suffix)
random_processed_data, _ = prepare_searchlight_mvpa_data(
images,
conditions,
random=RandomType.REPRODUCIBLE)
assert (len(random_processed_data)
== len(expected_searchlight_processed_data)), (
'numbers of epochs do not match in test_prepare_searchlight_mvpa_data')
images = io.load_images_from_dir(data_dir, suffix=suffix)
random_processed_data, _ = prepare_searchlight_mvpa_data(
images,
conditions,
random=RandomType.UNREPRODUCIBLE)
assert (len(random_processed_data)
== len(expected_searchlight_processed_data)), (
'numbers of epochs do not match in test_prepare_searchlight_mvpa_data')
if __name__ == '__main__':
test_prepare_fcma_data()
test_prepare_mvpa_data()
test_prepare_searchlight_mvpa_data()
| 5,069 | 45.090909 | 79 | py |
brainiak | brainiak-master/tests/fcma/test_voxel_selection.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import conftest
from brainiak.fcma.voxelselector import VoxelSelector
from scipy.stats.mstats import zscore
from sklearn import svm
from sklearn.linear_model import LogisticRegression
import numpy as np
import math
from mpi4py import MPI
from numpy.random import RandomState
# specify the random state to fix the random numbers
prng = RandomState(1234567890)
def create_epoch():
row = 12
col = 5
mat = prng.rand(row, col).astype(np.float32)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(mat.shape[0])
return mat
@conftest.skip_non_fork
def test_voxel_selection():
fake_raw_data = [create_epoch() for i in range(8)]
labels = [0, 1, 0, 1, 0, 1, 0, 1]
# 2 subjects, 4 epochs per subject
vs = VoxelSelector(labels, 4, 2, fake_raw_data, voxel_unit=1)
# test scipy normalization
fake_corr = prng.rand(1, 4, 5).astype(np.float32)
fake_corr = vs._correlation_normalization(fake_corr)
if MPI.COMM_WORLD.Get_rank() == 0:
expected_fake_corr = [[[1.06988919, 0.51641309, -0.46790636,
-1.31926763, 0.2270218],
[-1.22142744, -1.39881694, -1.2979387,
1.05702305, -0.6525566],
[0.89795232, 1.27406132, 0.36460185,
0.87538344, 1.5227468],
[-0.74641371, -0.39165771, 1.40124381,
-0.61313909, -1.0972116]]]
assert np.allclose(fake_corr, expected_fake_corr), \
'within-subject normalization does not provide correct results'
# for cross validation, use SVM with precomputed kernel
# no shrinking, set C=1
clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [7, 4, 6, 4, 4]
assert np.allclose(output, expected_output, atol=1), \
'voxel selection via SVM does not provide correct results'
# for cross validation, use logistic regression
clf = LogisticRegression()
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [6, 3, 6, 4, 4]
assert np.allclose(output, expected_output, atol=1), (
"voxel selection via logistic regression does not provide correct "
"results")
@conftest.skip_non_fork
def test_voxel_selection_with_two_masks():
fake_raw_data1 = [create_epoch() for i in range(8)]
fake_raw_data2 = [create_epoch() for i in range(8)]
labels = [0, 1, 0, 1, 0, 1, 0, 1]
# 2 subjects, 4 epochs per subject
vs = VoxelSelector(labels, 4, 2, fake_raw_data1,
raw_data2=fake_raw_data2, voxel_unit=1)
# for cross validation, use SVM with precomputed kernel
# no shrinking, set C=1
clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [3, 3, 3, 6, 6]
assert np.allclose(output, expected_output, atol=1), \
'voxel selection via SVM does not provide correct results'
# for cross validation, use logistic regression
clf = LogisticRegression()
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [3, 4, 4, 6, 6]
assert np.allclose(output, expected_output, atol=1), (
"voxel selection via logistic regression does not provide correct "
"results")
if __name__ == '__main__':
test_voxel_selection()
test_voxel_selection_with_two_masks()
| 4,790 | 38.595041 | 79 | py |
brainiak | brainiak-master/tests/funcalign/test_srm_distributed.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
from mpi4py import MPI
def test_distributed_srm(): # noqa: C901
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nrank = comm.Get_size()
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features, comm=comm)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
# DSRM: broadcast S
S = comm.bcast(S)
X = []
W = []
# DSRM: only append on rank 0
Q, R = np.linalg.qr(np.random.random((voxels, features)))
tmp_noise = 0.1*np.random.random((voxels, samples))
if rank == 0:
W.append(Q)
X.append(Q.dot(S) + tmp_noise)
else:
W.append(None)
X.append(None)
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
if rank == 0:
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
if rank == 0:
print("Test: running SRM with 1 subject")
# DSRM: cyclic distribution of subject data, otherwise None
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
tmp_noise = 0.1*np.random.random((voxels, samples))
if subject % nrank == rank:
W.append(Q)
X.append(Q.dot(S) + tmp_noise)
else:
W.append(None)
X.append(None)
# Check that runs with 2 subject
s.fit(X)
from pathlib import Path
sr_v0_4 = np.load(Path(__file__).parent / "sr_v0_4.npz")['sr']
assert(np.allclose(sr_v0_4, s.s_))
assert len(s.w_) == subjects, (
"Invalid computation of SRM! (wrong # subjects in W)")
for subject in range(subjects):
if s.w_[subject] is not None:
assert s.w_[subject].shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels in W)")
assert s.w_[subject].shape[1] == features, (
"Invalid computation of SRM! (wrong # features in W)")
ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])
- np.eye(s.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in SRM."
difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, (
"Model seems incorrectly computed.")
assert s.s_.shape[0] == features, (
"Invalid computation of SRM! (wrong # features in S)")
assert s.s_.shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = s.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of SRM! (wrong # subjects after transform)")
for subject in range(subjects):
if new_s[subject] is not None:
assert new_s[subject].shape[0] == features, (
"Invalid computation of SRM! (wrong # features after "
"transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples after "
"transform)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
s.transform([X[1]])
if rank == 0:
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
s.set_params(features=(samples+1))
s.fit(X)
if rank == 0:
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
if rank == 0:
S2 = S[:, :-2]
X.append(Q.dot(S2))
else:
X.append(None)
with pytest.raises(ValueError):
s.fit(X)
if rank == 0:
print("Test: different number of samples per subject")
test_distributed_srm()
| 5,271 | 32.794872 | 77 | py |
brainiak | brainiak-master/tests/funcalign/test_fastsrm.py | import os
import tempfile
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from brainiak.funcalign.fastsrm import (
FastSRM, _compute_and_save_corr_mat, _compute_and_save_subject_basis,
_compute_basis_subject_online, _reduced_space_compute_shared_response,
check_atlas, check_imgs, check_shared_response, create_temp_dir, fast_srm,
reduce_data, safe_load)
from brainiak.funcalign.srm import DetSRM
def to_path(X, dirpath):
"""
Save list of list of array to path and returns the path_like array
Parameters
----------
X: list of list of array
input data
dirpath: str
dirpath
Returns
-------
paths: array of str
path arrays where all data are stored
"""
paths = []
for i, sessions in enumerate(X):
sessions_path = []
for j, session in enumerate(sessions):
pth = "%i_%i" % (i, j)
np.save(os.path.join(dirpath, pth), session)
sessions_path.append(os.path.join(dirpath, pth + ".npy"))
paths.append(sessions_path)
return np.array(paths)
def generate_data(n_voxels,
n_timeframes,
n_subjects,
n_components,
datadir,
noise_level=0.1,
input_format="array"):
n_sessions = len(n_timeframes)
cumsum_timeframes = np.cumsum([0] + n_timeframes)
slices_timeframes = [
slice(cumsum_timeframes[i], cumsum_timeframes[i + 1])
for i in range(n_sessions)
]
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, int(np.sum(n_timeframes)))
z = np.linspace(-2, 2, int(np.sum(n_timeframes)))
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
# Generate fake data
W = []
X = []
for subject in range(n_subjects):
Q, R = np.linalg.qr(np.random.random((n_voxels, n_components)))
W.append(Q.T)
X_ = []
for session in range(n_sessions):
S_s = S[:, slices_timeframes[session]]
S_s = S_s - np.mean(S_s, axis=1, keepdims=True)
noise = noise_level * np.random.random(
(n_voxels, n_timeframes[session]))
noise = noise - np.mean(noise, axis=1, keepdims=True)
data = Q.dot(S_s) + noise
X_.append(data)
X.append(X_)
# create paths such that paths[i, j] contains data
# of subject i during session j
S = [(S[:, s] - np.mean(S[:, s], axis=1, keepdims=True))
for s in slices_timeframes]
if input_format == "array":
paths = to_path(X, datadir)
return paths, W, S
elif input_format == "list_of_list":
return X, W, S
elif input_format == "list_of_array":
return [
np.concatenate([X[i][j].T for j in range(n_sessions)]).T
for i in range(n_subjects)
], W, S
else:
raise ValueError("Wrong input_format")
def test_generated_data():
with tempfile.TemporaryDirectory() as datadir:
# We authorize different timeframes for different sessions
# but they should be the same across subject
n_voxels = 10
n_timeframes = [25, 24]
n_subjects = 2
n_components = 3 # number of components used for SRM model
n_sessions = len(n_timeframes)
np.random.seed(0)
paths, W, S = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir)
# Test if generated data has the good shape
for subject in range(n_subjects):
for session in range(n_sessions):
assert (np.load(
paths[subject, session]).shape == (n_voxels,
n_timeframes[session]))
# Test if generated basis have good shape
assert len(W) == n_subjects
for w in W:
assert w.shape == (n_components, n_voxels)
assert len(S) == n_sessions
for j, s in enumerate(S):
assert s.shape == (n_components, n_timeframes[j])
def test_bad_aggregate():
with pytest.raises(ValueError,
match="aggregate can have only value mean or None"):
FastSRM(aggregate="invalid")
def test_check_atlas():
assert check_atlas(None) is None
with pytest.raises(ValueError,
match=("Atlas is stored using type <class 'list'> "
"which is neither np.ndarray or str")):
check_atlas([])
A = np.random.rand(10, 100)
assert check_atlas(A) == (10, 100)
with tempfile.TemporaryDirectory() as datadir:
f = os.path.join(datadir, "atlas")
np.save(f, A)
assert check_atlas(f + ".npy") == (10, 100)
A = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5])
assert check_atlas(A) == (5, 11)
with tempfile.TemporaryDirectory() as datadir:
f = os.path.join(datadir, "atlas")
np.save(f, A)
assert check_atlas(f + ".npy") == (5, 11)
with pytest.raises(ValueError,
match=("Atlas has 3 axes. It should have either "
"1 or 2 axes.")):
check_atlas(np.random.rand(5, 1, 2))
with pytest.raises(ValueError,
match=(r"Number of regions in the atlas is lower than "
r"the number of components \(3 < 5\)")):
check_atlas(np.random.rand(3, 10), n_components=5)
with pytest.raises(ValueError,
match=(r"Number of regions in the atlas is bigger than "
r"the number of voxels \(5 > 2\)")):
check_atlas(np.random.rand(5, 2))
empty_list_error = "%s is a list of length 0 which is not valid"
array_type_error = ("%s should be of type np.ndarray but is of type %s")
array_2axis_error = ("%s must have exactly 2 axes but has %i axes")
def test_check_imgs():
with pytest.raises(
ValueError,
match=(r"Since imgs is a list, it should be a list of list "
r"of arrays or a list "
r"of arrays but imgs\[0\] has type <class 'str'>")):
check_imgs(["bla"])
with pytest.raises(
ValueError,
match=("Input imgs should either be a list or an array but "
"has type <class 'str'>")):
check_imgs("bla")
with pytest.raises(ValueError, match=empty_list_error % "imgs"):
check_imgs([])
with pytest.raises(
ValueError,
match=r"imgs\[1\] has type <class 'str'> whereas imgs\[0\] has "
"type <class 'int'>. This is inconsistent."):
check_imgs([0, "bla"])
with pytest.raises(ValueError, match=empty_list_error % r"imgs\[0\]"):
check_imgs([[]])
with pytest.raises(
ValueError,
match=(r"imgs\[1\] has length 1 whereas imgs\[0\] has length 2."
" All subjects should have the same number of sessions.")):
check_imgs([["a", "a"], ["a"]])
with pytest.raises(ValueError,
match=array_type_error %
(r"imgs\[0\]\[0\]", r"<class 'str'>")):
check_imgs([["bka"]])
with pytest.raises(ValueError,
match=array_2axis_error % (r"imgs\[0\]\[0\]", 1)):
check_imgs([[np.random.rand(5)]])
with pytest.raises(ValueError,
match=array_2axis_error % (r"imgs\[0\]", 1)):
check_imgs([np.random.rand(5)])
with pytest.raises(ValueError,
match=(r"imgs\[0, 0\] is stored using type "
"<class 'numpy.float64'> which is not a str")):
check_imgs(np.random.rand(5, 3))
with pytest.raises(ValueError, match=array_2axis_error % (r"imgs", 1)):
check_imgs(np.random.rand(5))
with pytest.raises(
ValueError,
match=("The number of subjects should be greater than 1")):
check_imgs([np.random.rand(5, 3)])
with pytest.raises(
ValueError,
match=("Subject 1 Session 0 does not have the same number "
"of timeframes as Subject 0 Session 0")):
check_imgs([np.random.rand(10, 5), np.random.rand(10, 10)])
with pytest.raises(
ValueError,
match=("Subject 1 Session 0 does not have the same number "
"of voxels as Subject 0 Session 0")):
check_imgs([np.random.rand(10, 5), np.random.rand(20, 5)])
with pytest.raises(
ValueError,
match=("Total number of timeframes is shorter than number "
r"of components \(5 < 8\)")):
check_imgs([np.random.rand(10, 5),
np.random.rand(10, 5)],
n_components=8)
with pytest.raises(
ValueError,
match=("Number of voxels in the atlas is not the same as "
r"the number of voxels in input data \(11 != 10\)")):
check_imgs([np.random.rand(10, 5),
np.random.rand(10, 5)],
n_components=3,
atlas_shape=(8, 11))
def test_check_shared():
n_subjects = 2
n_sessions = 2
input_shapes = np.zeros((n_subjects, n_sessions, 2))
input_shapes[0, 0, 0] = 10
input_shapes[0, 0, 1] = 3
input_shapes[0, 1, 0] = 10
input_shapes[0, 1, 1] = 2
input_shapes[1, 0, 0] = 10
input_shapes[1, 0, 1] = 3
input_shapes[1, 1, 0] = 10
input_shapes[1, 1, 1] = 2
shared_list_list = [[
np.array([[1, 2, 3], [4, 5, 6]]),
np.array([[1, 2], [4, 5]]),
], [
np.array([[2, 3, 4], [5, 6, 7]]),
np.array([[2, 3], [5, 6]]),
]]
shared_list_subjects = [
np.array([[1, 2, 3, 1, 2], [4, 5, 6, 4, 5]]),
np.array([[2, 3, 4, 2, 3], [5, 6, 7, 5, 6]])
]
shared_list_sessions = [
np.array([[1.5, 2.5, 3.5], [4.5, 5.5, 6.5]]),
np.array([[1.5, 2.5], [4.5, 5.5]]),
]
shared_array = np.array([[1.5, 2.5, 3.5, 1.5, 2.5],
[4.5, 5.5, 6.5, 4.5, 5.5]])
with pytest.raises(ValueError,
match=(r"shared_response should be either a list or an "
"array but is of type <class 'str'>")):
check_shared_response("bla")
with pytest.raises(
ValueError,
match=(r"shared_response is a list but shared_response\[0\] "
"is neither a list or an array. This is invalid.")):
check_shared_response(["bla", "bli"])
with pytest.raises(
ValueError,
match=(r"shared_response\[0\] is a list but shared_response\[1\] "
"is not a list this is incompatible")):
check_shared_response(
[[np.random.rand(2, 2)], np.array([1])], aggregate=None)
with pytest.raises(ValueError,
match=(r"shared_response\[1\] has len 1 whereas "
r"shared_response\[0\] has len 2. They should "
"have same len")):
check_shared_response([[np.random.rand(2, 2),
np.random.rand(2, 2)], [np.random.rand(2, 2)]],
aggregate=None)
with pytest.raises(
ValueError,
match=('Number of timeframes in input images during session 0 '
'does not match the number of timeframes during session '
r'0 of shared_response \(2 != 3\)')):
check_shared_response(
[np.random.rand(2, 2), np.random.rand(2, 2)],
aggregate="mean",
input_shapes=input_shapes)
with pytest.raises(ValueError,
match=("Number of components in shared_response "
"during session 0 is different than "
"the number of components of the "
r"model \(2 != 4\)")):
check_shared_response(np.random.rand(2, 10), n_components=4)
with pytest.raises(ValueError,
match=("self.aggregate has value 'mean' but shared "
"response is a list of list. "
"This is incompatible")):
added_session, reshaped_shared = check_shared_response(
shared_list_list,
aggregate="mean",
n_components=2,
input_shapes=input_shapes)
added_session, reshaped_shared = check_shared_response(
shared_list_subjects,
aggregate=None,
n_components=2,
input_shapes=input_shapes)
assert added_session
assert_array_almost_equal(np.array(reshaped_shared),
shared_array.reshape(1, 2, 5))
added_session, reshaped_shared = check_shared_response(
shared_list_sessions,
aggregate="mean",
n_components=2,
input_shapes=input_shapes)
assert not added_session
for j in range(len(reshaped_shared)):
assert_array_almost_equal(reshaped_shared[j], shared_list_sessions[j])
added_session, reshaped_shared = check_shared_response(
shared_array,
aggregate="mean",
n_components=2,
input_shapes=input_shapes)
assert added_session
assert_array_almost_equal(np.array(reshaped_shared),
shared_array.reshape(1, 2, 5))
def test_reduce_data_dummyatlases():
n_jobs = 1
with tempfile.TemporaryDirectory() as datadir:
for n_timeframes in ([25, 24], [25, 25]):
# We authorize different timeframes for different sessions
# but they should be the same across subject
n_voxels = 10
n_subjects = 2
n_components = 3 # number of components used for SRM model
n_sessions = len(n_timeframes)
np.random.seed(0)
paths, _, _ = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir)
# test atlas that reduces nothing
atlas = np.arange(1, n_voxels + 1)
data = reduce_data(paths,
atlas=atlas,
n_jobs=n_jobs,
low_ram=False)
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(data[i, j].T,
np.load(paths[i, j]))
# test atlas that reduces everything
atlas = np.ones(n_voxels)
data = reduce_data(paths,
atlas=atlas,
n_jobs=n_jobs,
low_ram=False)
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(
data[i, j].T.flatten(),
np.mean(np.load(paths[i, j]), axis=0))
def test_reduce_data_outputshapes():
n_jobs = 1
with tempfile.TemporaryDirectory() as datadir:
for n_timeframes in ([25, 24], [25, 25]):
# We authorize different timeframes for different sessions
# but they should be the same across subject
n_voxels = 10
n_subjects = 2
n_components = 3 # number of components used for SRM model
n_supervoxels = 5 # number of components of the atlas
n_sessions = len(n_timeframes)
np.random.seed(0)
paths, _, _ = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir)
# Test if reduced data has the good shape
# probabilistic atlas
atlas = np.random.rand(n_supervoxels, n_voxels)
data = reduce_data(paths,
atlas=atlas,
n_jobs=n_jobs,
low_ram=False,
temp_dir=None)
for subject in range(n_subjects):
for session in range(n_sessions):
assert data[subject, session].shape == (
n_timeframes[session], n_supervoxels)
# deterministic atlas
det_atlas = np.round(np.random.rand(n_voxels) * n_supervoxels)
n_unique = len(np.unique(det_atlas)[1:])
while n_unique != n_supervoxels:
det_atlas = np.round(np.random.rand(n_voxels) * n_supervoxels)
n_unique = len(np.unique(det_atlas)[1:])
data = reduce_data(paths,
atlas=det_atlas,
n_jobs=n_jobs,
low_ram=True,
temp_dir=datadir)
for subject in range(n_subjects):
for session in range(n_sessions):
assert (np.load(data[subject, session]).shape == (
n_timeframes[session], n_supervoxels))
def test_reduced_data_srm():
n_jobs = 1
with tempfile.TemporaryDirectory() as datadir:
np.random.seed(0)
# We authorize different timeframes for different sessions but
# they should be the same across subject
n_voxels = 10
n_timeframes = [25, 24]
n_subjects = 5
n_components = 3 # number of components used for SRM model
n_sessions = len(n_timeframes)
paths, W, S = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir, 0)
atlas = np.arange(1, n_voxels + 1)
data = reduce_data(paths,
atlas=atlas,
n_jobs=n_jobs,
low_ram=False,
temp_dir=None)
# Test if shared response has the good shape
shared_response_list = \
_reduced_space_compute_shared_response(
data,
reduced_basis_list=None,
n_components=n_components
)
assert len(shared_response_list) == n_sessions
for session in range(n_sessions):
assert (shared_response_list[session].shape == (
n_timeframes[session], n_components))
# Test basis from shared response
for i, sessions in enumerate(paths):
basis = _compute_basis_subject_online(
sessions, [S[k].T for k in range(len(S))])
# test shape
assert basis.shape == (n_components, n_voxels)
# test orthogonality
assert np.allclose(basis.dot(basis.T), np.eye(n_components))
# test correctness
assert_array_almost_equal(basis, W[i], 2)
# Test reduced_data_shared_response
shared_response_list = _reduced_space_compute_shared_response(
data, reduced_basis_list=W, n_components=n_components)
for session in range(n_sessions):
S_real = np.mean(
[data[i, session].dot(W[i].T) for i in range(n_subjects)],
axis=0)
assert_array_almost_equal(shared_response_list[session], S_real)
assert_array_almost_equal(shared_response_list[session],
S[session].T)
shared_response_list = fast_srm(data, n_components=n_components)
for i, sessions in enumerate(paths):
basis = _compute_basis_subject_online(sessions,
shared_response_list)
for j, session in enumerate(sessions):
assert_array_almost_equal(shared_response_list[j].dot(basis),
np.load(paths[i, j]).T, 3)
def test_compute_and_save():
with tempfile.TemporaryDirectory() as datadir:
np.random.seed(0)
n_voxels = 10
n_timeframes = [25, 24]
n_subjects = 5
n_components = 3 # number of components used for SRM model
paths, W, S = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir, 0)
for m, subjects in enumerate(paths.T):
for subject in subjects:
_compute_and_save_corr_mat(subject, S[m].T, datadir)
for i, sessions in enumerate(paths):
basis = _compute_and_save_subject_basis(i, sessions, datadir)
assert_array_almost_equal(np.load(basis), W[i], 3)
def test_fastsrm_class():
n_jobs = 1
with tempfile.TemporaryDirectory() as datadir:
np.random.seed(0)
# We authorize different timeframes for different sessions
# but they should be the same across subject
n_voxels = 10
n_timeframes = [25, 24]
n_subjects = 5
n_components = 3 # number of components used for SRM model
np.random.seed(0)
paths, W, S = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir, 0)
atlas = np.arange(1, n_voxels + 1)
srm = FastSRM(atlas=atlas,
n_components=n_components,
n_iter=10,
temp_dir=datadir,
low_ram=True,
verbose=True,
n_jobs=n_jobs)
# Raises an error because model is not fitted yet
with pytest.raises(NotFittedError):
srm.transform(paths)
srm.fit(paths)
# An error can occur if temporary directory already exists
with pytest.raises(ValueError,
match=("Path %s already exists. When a model "
"is used, filesystem should be "
r"cleaned by using the .clean\(\) "
"method" % srm.temp_dir)):
# Error can occur if the filesystem is uncleaned
create_temp_dir(srm.temp_dir)
create_temp_dir(srm.temp_dir)
shared_response = srm.transform(paths)
# Raise error when wrong index
with pytest.raises(ValueError,
match=("subjects_indexes should be either "
"a list, an array or None but "
"received type <class 'int'>")):
srm.transform(paths, subjects_indexes=1000)
with pytest.raises(ValueError,
match=("subjects_indexes should be either "
"a list, an array or None but "
"received type <class 'int'>")):
srm.inverse_transform(shared_response, subjects_indexes=1000)
with pytest.raises(ValueError,
match=("sessions_indexes should be either "
"a list, an array or None but "
"received type <class 'int'>")):
srm.inverse_transform(shared_response, sessions_indexes=1000)
with pytest.raises(ValueError,
match=("Input data imgs has len 5 whereas "
"subject_indexes has len 1. "
"The number of basis used to compute "
"the shared response should be equal to "
"the number of subjects in imgs")):
srm.transform(paths, subjects_indexes=[0])
with pytest.raises(ValueError,
match=("Index 1 of subjects_indexes has value 8 "
"whereas value should be between 0 and 4")):
srm.transform(paths[:2], subjects_indexes=[0, 8])
with pytest.raises(ValueError,
match=("Index 1 of sessions_indexes has value 8 "
"whereas value should be between 0 and 1")):
srm.inverse_transform(shared_response, sessions_indexes=[0, 8])
# Check behavior of .clean
assert os.path.exists(srm.temp_dir)
srm.clean()
assert not os.path.exists(srm.temp_dir)
n_voxels = 10
n_subjects = 5
n_components = 3 # number of components used for SRM model
def apply_aggregate(shared_response, aggregate, input_format):
if aggregate is None:
if input_format == "list_of_array":
return [np.mean(shared_response, axis=0)]
else:
return [
np.mean([
shared_response[i][j] for i in range(len(shared_response))
],
axis=0) for j in range(len(shared_response[0]))
]
else:
if input_format == "list_of_array":
return [shared_response]
else:
return shared_response
def apply_input_format(X, input_format):
if input_format == "array":
n_sessions = len(X[0])
XX = [[np.load(X[i, j]) for j in range(len(X[i]))]
for i in range(len(X))]
elif input_format == "list_of_array":
XX = [[x] for x in X]
n_sessions = 1
else:
XX = X
n_sessions = len(X[0])
return XX, n_sessions
@pytest.mark.parametrize(
"input_format, low_ram, tempdir, atlas, n_jobs, n_timeframes, aggregate",
[("array", True, True, None, 1, [25, 25], "mean"),
("list_of_list", False, False, np.arange(1, n_voxels + 1), 1, [25, 24
], None),
("list_of_array", True, False, np.eye(n_voxels), 1, [25, 25], None),
("list_of_array", False, True, None, 1, [25, 24], "mean")])
def test_fastsrm_class_correctness(input_format, low_ram, tempdir, atlas,
n_jobs, n_timeframes, aggregate):
with tempfile.TemporaryDirectory() as datadir:
np.random.seed(0)
X, W, S = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir, 0, input_format)
XX, n_sessions = apply_input_format(X, input_format)
if tempdir:
temp_dir = datadir
else:
temp_dir = None
srm = FastSRM(atlas=atlas,
n_components=n_components,
n_iter=10,
temp_dir=temp_dir,
low_ram=low_ram,
verbose=True,
n_jobs=n_jobs,
aggregate=aggregate,
seed=0)
# Check that there is no difference between fit_transform
# and fit then transform
srm.fit(X)
basis = [safe_load(b) for b in srm.basis_list]
shared_response_raw = srm.transform(X)
shared_response = apply_aggregate(shared_response_raw, aggregate,
input_format)
shared_response_fittransform = apply_aggregate(srm.fit_transform(X),
aggregate, input_format)
for j in range(n_sessions):
assert_array_almost_equal(shared_response_fittransform[j],
shared_response[j])
# Check that the decomposition works
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(shared_response[j].T.dot(basis[i]),
XX[i][j].T, 3)
# Check that if we use all subjects but one if gives almost the
# same shared response
shared_response_partial_raw = srm.transform(X[1:5],
subjects_indexes=list(
range(1, 5)))
shared_response_partial = apply_aggregate(shared_response_partial_raw,
aggregate, input_format)
for j in range(n_sessions):
assert_array_almost_equal(shared_response_partial[j],
shared_response[j], 3)
# Check that if we perform add 2 times the same subject we
# obtain the same decomposition
srm.add_subjects(X[:1], shared_response_raw)
assert_array_almost_equal(safe_load(srm.basis_list[0]),
safe_load(srm.basis_list[-1]))
@pytest.mark.parametrize(
"input_format, low_ram, tempdir, atlas, n_jobs, n_timeframes, aggregate",
[("array", True, True, None, 1, [25, 25], "mean"),
("list_of_list", False, False, np.arange(1, n_voxels + 1), 1, [25, 24
], None),
("list_of_array", True, False, np.eye(n_voxels), 1, [25, 25], None),
("list_of_array", False, True, None, 1, [25, 24], "mean")])
def test_class_srm_inverse_transform(input_format, low_ram, tempdir, atlas,
n_jobs, n_timeframes, aggregate):
with tempfile.TemporaryDirectory() as datadir:
X, W, S = generate_data(n_voxels, n_timeframes, n_subjects,
n_components, datadir, 0, input_format)
if tempdir:
temp_dir = datadir
else:
temp_dir = None
srm = FastSRM(atlas=atlas,
n_components=n_components,
n_iter=10,
temp_dir=temp_dir,
low_ram=low_ram,
verbose=True,
n_jobs=n_jobs,
aggregate=aggregate,
seed=0)
# Check that there is no difference between fit_transform
# and fit then transform
srm.fit(X)
shared_response_raw = srm.transform(X)
# Check inverse transform
if input_format == "list_of_array":
reconstructed_data = srm.inverse_transform(shared_response_raw,
subjects_indexes=[0, 2])
for i, ii in enumerate([0, 2]):
assert_array_almost_equal(reconstructed_data[i], X[ii], 3)
reconstructed_data = srm.inverse_transform(shared_response_raw,
subjects_indexes=None)
for i in range(len(X)):
assert_array_almost_equal(reconstructed_data[i], X[i], 3)
else:
reconstructed_data = srm.inverse_transform(shared_response_raw,
sessions_indexes=[1],
subjects_indexes=[0, 2])
for i, ii in enumerate([0, 2]):
for j, jj in enumerate([1]):
assert_array_almost_equal(reconstructed_data[i][j],
safe_load(X[ii][jj]), 3)
reconstructed_data = srm.inverse_transform(shared_response_raw,
subjects_indexes=None,
sessions_indexes=None)
for i in range(len(X)):
for j in range(len(X[i])):
assert_array_almost_equal(reconstructed_data[i][j],
safe_load(X[i][j]), 3)
def test_fastsrm_identity():
# In this function we test whether fastsrm and DetSRM have
# identical behavior when atlas=None
# We authorize different timeframes for different sessions
# but they should be the same across subject
n_voxels = 8
n_timeframes = [4, 5, 6]
n_subjects = 2
n_components = 3 # number of components used for SRM model
np.random.seed(0)
paths, W, S = generate_data(n_voxels,
n_timeframes,
n_subjects,
n_components,
None,
input_format="list_of_array")
# Test if generated data has the good shape
for subject in range(n_subjects):
assert paths[subject].shape == (n_voxels, np.sum([n_timeframes]))
srm = DetSRM(n_iter=11, features=3, rand_seed=0)
srm.fit(paths)
shared = srm.transform(paths)
fastsrm = FastSRM(atlas=None,
n_components=3,
verbose=True,
seed=0,
n_jobs=1,
n_iter=10)
fastsrm.fit(paths)
shared_fast = fastsrm.transform(paths)
assert_array_almost_equal(shared_fast, np.mean(shared, axis=0))
for i in range(n_subjects):
assert_array_almost_equal(safe_load(fastsrm.basis_list[i]),
srm.w_[i].T)
def load_and_concat(paths):
"""
Take list of path and yields input data for ProbSRM
Parameters
----------
paths
Returns
-------
X
"""
X = []
for i in range(len(paths)):
X_i = np.concatenate(
[np.load(paths[i, j]) for j in range(len(paths[i]))], axis=1)
X.append(X_i)
return X
def test_consistency_paths_data():
with tempfile.TemporaryDirectory() as datadir:
# In this function we test that input format
# does not change the results
n_voxels = 8
n_timeframes = [4, 5, 6]
n_subjects = 2
n_components = 3 # number of components used for SRM model
np.random.seed(0)
paths, W, S = generate_data(n_voxels,
n_timeframes,
n_subjects,
n_components,
datadir,
input_format="array")
print()
print("shape", paths.shape)
fastsrm = FastSRM(
n_components=3,
atlas=None,
verbose=True,
seed=0,
n_jobs=1,
n_iter=10,
)
fastsrm.fit(paths)
b0 = fastsrm.basis_list[0]
fastsrm.fit(load_and_concat(paths))
b1 = fastsrm.basis_list[0]
assert_array_almost_equal(b0, b1)
| 34,458 | 36.414767 | 79 | py |
brainiak | brainiak-master/tests/funcalign/test_rsrm.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
def test_can_instantiate():
import brainiak.funcalign.rsrm
s = brainiak.funcalign.rsrm.RSRM()
assert s, "Invalid RSRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.rsrm.RSRM(n_iter=5, features=features)
assert s, "Invalid RSRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
print("Test: running RSRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
s.fit(X)
assert len(s.w_) == subjects, (
"Invalid computation of RSRM! (wrong # subjects in W)")
for subject in range(subjects):
assert s.s_[subject].shape[0] == voxels, (
"Invalid computation of RSRM! (wrong # voxels in S)")
assert s.s_[subject].shape[1] == samples, (
"Invalid computation of RSRM! (wrong # samples in S)")
assert s.w_[subject].shape[0] == voxels, (
"Invalid computation of RSRM! (wrong # voxels in W)")
assert s.w_[subject].shape[1] == features, (
"Invalid computation of RSRM! (wrong # features in W)")
ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])
- np.eye(s.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in RSRM."
difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.r_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert s.r_.shape[0] == features, (
"Invalid computation of RSRM! (wrong # features in R)")
assert s.r_.shape[1] == samples, (
"Invalid computation of RSRM! (wrong # samples in R)")
# Check that it does run to compute the shared response after the model
# computation
new_r, _ = s.transform(X)
assert len(new_r) == subjects, (
"Invalid computation of RSRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_r[subject].shape[0] == features, (
"Invalid computation of RSRM! (wrong # features after transform)")
assert new_r[subject].shape[1] == samples, (
"Invalid computation of RSRM! (wrong # samples after transform)")
# Check that it does run to compute a new subject
new_w, new_s = s.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of RSRM! (wrong # features for new subject)")
assert new_s.shape[1] == samples, (
"Invalid computation of RSRM! (wrong # samples for new subject)")
assert new_s.shape[0] == voxels, (
"Invalid computation of RSRM! (wrong # voxels for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of RSRM! (wrong # voxels for new subject)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
s.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
s.set_params(features=(samples+1))
s.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
s.fit(X)
print("Test: different number of samples per subject")
| 5,042 | 37.496183 | 78 | py |
brainiak | brainiak-master/tests/funcalign/test_srm.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.exceptions import NotFittedError
import pytest
def test_can_instantiate(tmp_path):
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 2
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
s.fit(X)
print("Test: running SRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
s.fit(X)
from pathlib import Path
sr_v0_4 = np.load(Path(__file__).parent / "sr_v0_4.npz")['sr']
assert(np.allclose(sr_v0_4, s.s_))
assert len(s.w_) == subjects, (
"Invalid computation of SRM! (wrong # subjects in W)")
for subject in range(subjects):
assert s.w_[subject].shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels in W)")
assert s.w_[subject].shape[1] == features, (
"Invalid computation of SRM! (wrong # features in W)")
ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])
- np.eye(s.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in SRM."
difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert s.s_.shape[0] == features, (
"Invalid computation of SRM! (wrong # features in S)")
assert s.s_.shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = s.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of SRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of SRM! (wrong # features after transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of SRM! (wrong # samples after transform)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
s.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
s.set_params(features=(samples+1))
s.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
s.fit(X)
print("Test: different number of samples per subject")
# Check save/load functionality for fitted SRM
srm_path = tmp_path / 'srm.npz'
s.save(srm_path)
s_load = brainiak.funcalign.srm.load(srm_path)
assert np.array_equal(s.s_, s_load.s_)
for w, wl in zip(s.w_, s_load.w_):
assert np.array_equal(w, wl)
assert np.array_equal(s.sigma_s_, s_load.sigma_s_)
assert np.array_equal(s.mu_, s_load.mu_)
assert np.array_equal(s.rho2_, s_load.rho2_)
assert s.features == s_load.features
assert s.n_iter == s_load.n_iter
assert s.rand_seed == s_load.rand_seed
print("Test: save/load functionality")
def test_new_subject():
import brainiak.funcalign.srm
s = brainiak.funcalign.srm.SRM()
assert s, "Invalid SRM instance!"
import numpy as np
np.random.seed(0)
voxels = 100
samples = 500
subjects = 3
features = 3
s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)
assert s, "Invalid SRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
s.transform_subject(X)
print("Test: transforming before fitting the model")
# Check that runs with 3 subject
s.fit(X)
# Check that you get an error when the data is the wrong shape
with pytest.raises(ValueError):
s.transform_subject(X[0].T)
# Check that it does run to compute a new subject
new_w = s.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
# Check that these analyses work with the deterministic SRM too
ds = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
ds.transform_subject(X)
print("Test: transforming before fitting the model")
# Check that runs with 3 subject
ds.fit(X)
# Check that you get an error when the data is the wrong shape
with pytest.raises(ValueError):
ds.transform_subject(X[0].T)
# Check that it does run to compute a new subject
new_w = ds.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
def test_det_srm():
import brainiak.funcalign.srm
model = brainiak.funcalign.srm.DetSRM()
assert model, "Invalid DetSRM instance!"
import numpy as np
voxels = 100
samples = 500
subjects = 2
features = 3
model = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)
assert model, "Invalid DetSRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
X = []
W = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
model.transform(X)
print("Test: transforming before fitting the model")
# Check that it does NOT run with 1 subject
with pytest.raises(ValueError):
model.fit(X)
print("Test: running DetSRM with 1 subject")
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))
# Check that runs with 2 subject
model.fit(X)
assert len(model.w_) == subjects, (
"Invalid computation of DetSRM! (wrong # subjects in W)")
for subject in range(subjects):
assert model.w_[subject].shape[0] == voxels, (
"Invalid computation of DetSRM! (wrong # voxels in W)")
assert model.w_[subject].shape[1] == features, (
"Invalid computation of DetSRM! (wrong # features in W)")
ortho = np.linalg.norm(model.w_[subject].T.dot(model.w_[subject])
- np.eye(model.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in DetSRM."
difference = np.linalg.norm(X[subject]
- model.w_[subject].dot(model.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert model.s_.shape[0] == features, (
"Invalid computation of DetSRM! (wrong # features in S)")
assert model.s_.shape[1] == samples, (
"Invalid computation of DetSRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = model.transform(X)
assert len(new_s) == subjects, (
"Invalid computation of DetSRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of DetSRM! (wrong # features after "
"transform)")
assert new_s[subject].shape[1] == samples, (
"Invalid computation of DetSRM! (wrong # samples after transform)")
# Check that it does run to compute a new subject
new_w = model.transform_subject(X[0])
assert new_w.shape[1] == features, (
"Invalid computation of SRM! (wrong # features for new subject)")
assert new_w.shape[0] == voxels, (
"Invalid computation of SRM! (wrong # voxels for new subject)")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
model.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
model.set_params(features=(samples+1))
model.fit(X)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
S2 = S[:, :-2]
X.append(Q.dot(S2))
with pytest.raises(ValueError):
model.fit(X)
print("Test: different number of samples per subject")
| 11,513 | 34.98125 | 79 | py |
brainiak | brainiak-master/tests/funcalign/test_sssrm.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_instance():
import os
os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64'
import brainiak.funcalign.sssrm
model = brainiak.funcalign.sssrm.SSSRM()
assert model, "Invalid SSSRM instance!"
def test_wrong_input():
import os
os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64'
from sklearn.utils.validation import NotFittedError
import numpy as np
import brainiak.funcalign.sssrm
voxels = 100
align_samples = 400
samples = 500
subjects = 2
features = 3
n_labels = 4
model = brainiak.funcalign.sssrm.SSSRM(n_iter=5, features=features,
gamma=10.0, alpha=0.1)
assert model, "Invalid SSSRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
S_align = S[:, :align_samples]
S_classify = S[:, align_samples:]
X = []
Z = []
Z2 = []
W = []
y = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S_align) + 0.1 * np.random.random((voxels, align_samples)))
Z.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
Z2.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
y.append(np.repeat(
np.arange(n_labels), (samples - align_samples)/n_labels))
# Check that transform does NOT run before fitting the model
with pytest.raises(NotFittedError):
model.transform(X)
print("Test: transforming before fitting the model")
# Check that predict does NOT run before fitting the model
with pytest.raises(NotFittedError):
model.predict(X)
print("Test: predicting before fitting the model")
# Check that it does NOT run with 1 subject on X
with pytest.raises(ValueError):
model.fit(X, y, Z)
print("Test: running SSSRM with 1 subject (alignment)")
# Create more subjects align and classification data
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
W.append(Q)
X.append(Q.dot(S_align)
+ 0.1 * np.random.random((voxels, align_samples)))
Z2.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
# Check that it does NOT run with 1 subject on y
with pytest.raises(ValueError):
model.fit(X, y, Z)
print("Test: running SSSRM with 1 subject (labels)")
# Create more subjects labels data
for subject in range(1, subjects):
y.append(np.repeat(
np.arange(n_labels), (samples - align_samples)/n_labels))
# Check that it does NOT run with 1 subject on Z
with pytest.raises(ValueError):
model.fit(X, y, Z)
print("Test: running SSSRM with 1 subject (classif.)")
# Check that alpha is in (0,1) range
model_bad = brainiak.funcalign.sssrm.SSSRM(n_iter=1, features=features,
gamma=10.0, alpha=1.5)
assert model_bad, "Invalid SSSRM instance!"
with pytest.raises(ValueError):
model_bad.fit(X, y, Z)
print("Test: running SSSRM with wrong alpha")
# Check that gamma is positive
model_bad = brainiak.funcalign.sssrm.SSSRM(n_iter=1, features=features,
gamma=-0.1, alpha=0.2)
assert model_bad, "Invalid SSSRM instance!"
with pytest.raises(ValueError):
model_bad.fit(X, y, Z)
print("Test: running SSSRM with wrong gamma")
def test_sssrm():
import os
os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64'
import numpy as np
import brainiak.funcalign.sssrm
voxels = 100
align_samples = 400
samples = 500
subjects = 2
features = 3
n_labels = 4
model = brainiak.funcalign.sssrm.SSSRM(n_iter=5, features=features,
gamma=10.0, alpha=0.1)
assert model, "Invalid SSSRM instance!"
# Create a Shared response S with K = 3
theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
z = np.linspace(-2, 2, samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
S_align = S[:, :align_samples]
S_classify = S[:, align_samples:]
X = []
Z = []
Z2 = []
y = []
Q, R = np.linalg.qr(np.random.random((voxels, features)))
X.append(Q.dot(S_align) + 0.1 * np.random.random((voxels, align_samples)))
Z.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
Z2.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
y.append(np.repeat(
np.arange(n_labels), (samples - align_samples)/n_labels))
# Create more subjects align and classification data
for subject in range(1, subjects):
Q, R = np.linalg.qr(np.random.random((voxels, features)))
X.append(Q.dot(S_align)
+ 0.1 * np.random.random((voxels, align_samples)))
Z2.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
# Create more subjects labels data
for subject in range(1, subjects):
y.append(np.repeat(
np.arange(n_labels), (samples - align_samples)/n_labels))
# Set the logging level to INFO
import logging
logging.basicConfig(level=logging.INFO)
# Check that runs with 2 subject
model.fit(X, y, Z2)
print("Test: fitting SSSRM successfully")
assert len(model.w_) == subjects, (
"Invalid computation of SSSRM! (wrong # subjects in W)")
for subject in range(subjects):
assert model.w_[subject].shape[0] == voxels, (
"Invalid computation of SSSRM! (wrong # voxels in W)")
assert model.w_[subject].shape[1] == features, (
"Invalid computation of SSSRM! (wrong # features in W)")
ortho = np.linalg.norm(model.w_[subject].T.dot(model.w_[subject])
- np.eye(model.w_[subject].shape[1]),
'fro')
assert ortho < 1e-7, "A Wi mapping is not orthonormal in SSSRM."
difference = np.linalg.norm(X[subject]
- model.w_[subject].dot(model.s_),
'fro')
datanorm = np.linalg.norm(X[subject], 'fro')
assert difference/datanorm < 1.0, "Model seems incorrectly computed."
assert model.s_.shape[0] == features, (
"Invalid computation of SSSRM! (wrong # features in S)")
assert model.s_.shape[1] == align_samples, (
"Invalid computation of SSSRM! (wrong # samples in S)")
# Check that it does run to compute the shared response after the model
# computation
new_s = model.transform(X)
print("Test: transforming with SSSRM successfully")
assert len(new_s) == subjects, (
"Invalid computation of SSSRM! (wrong # subjects after transform)")
for subject in range(subjects):
assert new_s[subject].shape[0] == features, (
"Invalid computation of SSSRM! (wrong # features after transform)")
assert new_s[subject].shape[1] == align_samples, (
"Invalid computation of SSSRM! (wrong # samples after transform)")
# Check that it predicts with the model
pred = model.predict(Z2)
print("Test: predicting with SSSRM successfully")
assert len(pred) == subjects, (
"Invalid computation of SSSRM! (wrong # subjects after predict)")
for subject in range(subjects):
assert pred[subject].size == samples - align_samples, (
"SSSRM: wrong # answers in predict")
pred_labels = np.logical_and(pred[subject] >= 0,
pred[subject] < n_labels)
assert np.all(pred_labels), (
"SSSRM: wrong class number output in predict")
# Check that it does NOT run with non-matching number of subjects
with pytest.raises(ValueError):
model.transform(X[1])
print("Test: transforming with non-matching number of subjects")
# Check that it does not run without enough samples (TRs).
with pytest.raises(ValueError):
model.set_params(features=(align_samples + 1))
model.fit(X, y, Z2)
print("Test: not enough samples")
# Check that it does not run with different number of samples (TRs)
X2 = X
X2[0] = Q.dot(S[:, :-2])
with pytest.raises(ValueError):
model.fit(X2, y, Z2)
print("Test: different number of samples per subject")
# Create one more subject
Q, R = np.linalg.qr(np.random.random((voxels, features)))
X.append(Q.dot(S_align) + 0.1 * np.random.random((voxels, align_samples)))
Z2.append(Q.dot(S_classify)
+ 0.1 * np.random.random((voxels, samples - align_samples)))
# Check that it does not run with different number of subjects in each
# input
with pytest.raises(ValueError):
model.fit(X, y, Z2)
print("Test: different number of subjects in the inputs")
y.append(np.repeat(
np.arange(n_labels), (samples - align_samples)/n_labels))
with pytest.raises(ValueError):
model.fit(X, y, Z)
print("Test: different number of subjects in the inputs")
| 10,092 | 36.381481 | 79 | py |
brainiak | brainiak-master/tests/searchlight/test_searchlight.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import numpy as np
from mpi4py import MPI
import conftest
from brainiak.searchlight.searchlight import Searchlight
from brainiak.searchlight.searchlight import Diamond, Ball
"""Distributed Searchlight Test
"""
def cube_sfn(data, msk, myrad, bcast_var):
if np.all(msk) and np.any(msk):
return 1.0
return None
@conftest.skip_non_fork
def test_searchlight_with_cube():
sl = Searchlight(sl_rad=3)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = True
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(cube_sfn)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
def test_searchlight_with_cube_poolsize_1():
sl = Searchlight(sl_rad=3)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = True
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(cube_sfn, pool_size=1)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
def diamond_sfn(data, msk, myrad, bcast_var):
assert not np.any(msk[~Diamond(3).mask_])
if np.all(msk[Diamond(3).mask_]):
return 1.0
return None
@conftest.skip_non_fork
def test_searchlight_with_diamond():
sl = Searchlight(sl_rad=3, shape=Diamond)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = Diamond(3).mask_
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(diamond_sfn)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
def ball_sfn(data, msk, myrad, bcast_var):
x, y, z = np.mgrid[-myrad:myrad+1, -myrad:myrad+1, -myrad:myrad+1]
correct_mask = np.square(x) + np.square(y) + np.square(z) <= myrad ** 2
assert not np.any(msk[~Ball(3).mask_])
if np.all(correct_mask == msk):
return 1.0
return None
@conftest.skip_non_fork
def test_searchlight_with_ball():
sl = Searchlight(sl_rad=3, shape=Ball)
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
dim0, dim1, dim2 = (50, 50, 50)
ntr = 30
nsubj = 3
mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
# Put a spot in the mask
mask[10:17, 10:17, 10:17] = Ball(3).mask_
sl.distribute(data, mask)
global_outputs = sl.run_searchlight(ball_sfn)
if rank == 0:
assert global_outputs[13, 13, 13] == 1.0
global_outputs[13, 13, 13] = None
for i in range(global_outputs.shape[0]):
for j in range(global_outputs.shape[1]):
for k in range(global_outputs.shape[2]):
assert global_outputs[i, j, k] is None
MaskRadBcast = namedtuple("MaskRadBcast", "mask rad")
def test_instantiate():
sl = Searchlight(sl_rad=5, max_blk_edge=10)
assert sl
def voxel_test_sfn(data, msk, myrad, bcast):
rad = bcast.rad
# Check each point
for subj in data:
for _tr in range(subj.shape[3]):
tr = subj[:, :, :, _tr]
midpt = tr[rad, rad, rad]
for d0 in range(tr.shape[0]):
for d1 in range(tr.shape[1]):
for d2 in range(tr.shape[2]):
assert np.array_equal(tr[d0, d1, d2] - midpt,
np.array([d0-rad, d1-rad,
d2-rad, 0]))
# Determine midpoint
midpt = data[0][rad, rad, rad, 0]
midpt = (midpt[0], midpt[1], midpt[2])
for d0 in range(msk.shape[0]):
for d1 in range(msk.shape[1]):
for d2 in range(msk.shape[2]):
pt = (midpt[0] - rad + d0, midpt[1] - rad + d1,
midpt[2] - rad + d2)
assert bcast.mask[pt] == msk[d0, d1, d2]
# Return midpoint
return midpt
def block_test_sfn(data, msk, myrad, bcast_var, extra_params):
outmat = data[0][:, :, :, 0]
outmat[~msk] = None
if myrad == 0:
return outmat
else:
return outmat[myrad:-myrad, myrad:-myrad, myrad:-myrad]
@conftest.skip_non_fork
def test_correctness(): # noqa: C901
def voxel_test(data, mask, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
# Initialize dataset with known pattern
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array(
[d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(MaskRadBcast(mask, rad))
global_outputs = sl.run_searchlight(voxel_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0]-rad):
for d1 in range(rad, global_outputs.shape[1]-rad):
for d2 in range(rad, global_outputs.shape[2]-rad):
if mask[d0, d1, d2]:
assert np.array_equal(
np.array(global_outputs[d0, d1, d2]),
np.array([d0, d1, d2]))
def block_test(data, mask, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
(dim0, dim1, dim2) = mask.shape
# Initialize dataset with known pattern
for subj in data:
if subj is not None:
for tr in range(subj.shape[3]):
for d1 in range(dim0):
for d2 in range(dim1):
for d3 in range(dim2):
subj[d1, d2, d3, tr] = np.array(
[d1, d2, d3, tr])
sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
sl.distribute(data, mask)
sl.broadcast(mask)
global_outputs = sl.run_block_function(block_test_sfn)
if rank == 0:
for d0 in range(rad, global_outputs.shape[0]-rad):
for d1 in range(rad, global_outputs.shape[1]-rad):
for d2 in range(rad, global_outputs.shape[2]-rad):
if mask[d0, d1, d2]:
assert np.array_equal(
np.array(global_outputs[d0, d1, d2]),
np.array([d0, d1, d2, 0]))
# Create dataset
def do_test(dim0, dim1, dim2, ntr, nsubj, max_blk_edge, rad):
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
mask = np.random.choice([True, False], (dim0, dim1, dim2))
data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
if i % size == rank
else None
for i in range(0, nsubj)]
voxel_test(data, mask, max_blk_edge, rad)
block_test(data, mask, max_blk_edge, rad)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=1)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=0)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=5, max_blk_edge=4, rad=1)
do_test(dim0=1, dim1=5, dim2=9, ntr=5, nsubj=5, max_blk_edge=4, rad=1)
do_test(dim0=0, dim1=10, dim2=8, ntr=5, nsubj=5, max_blk_edge=4, rad=1)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=2)
do_test(dim0=7, dim1=5, dim2=9, ntr=5, nsubj=1, max_blk_edge=4, rad=3)
| 10,021 | 32.075908 | 75 | py |
brainiak | brainiak-master/tests/reprsimil/test_gbrsa.py | # Copyright 2016 Mingbo Cai, Princeton Neuroscience Instititute,
# Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_can_instantiate():
import brainiak.reprsimil.brsa
s = brainiak.reprsimil.brsa.GBRSA()
assert s, "Invalid GBRSA instance!"
s = brainiak.reprsimil.brsa.GBRSA(n_iter=40, rank=4, auto_nuisance=False,
nureg_method='PCA',
baseline_single=False, logS_range=1.0,
SNR_bins=11, rho_bins=40, tol=2e-3,
optimizer='CG', random_state=0,
anneal_speed=20, SNR_prior='unif')
assert s, "Invalid GBRSA instance!"
def test_fit():
from brainiak.reprsimil.brsa import GBRSA
import brainiak.utils.utils as utils
import scipy.stats
import numpy as np
import os.path
np.random.seed(10)
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
# Load an example design matrix
design = utils.ReadDesign(fname=file_path)
# concatenate it by 1, 2, and 3 times, mimicking different length
# of experiments for different participants
n_run = [2, 1, 1]
design_mat = [None] * 3
n_T = [None] * 3
n_V = [40, 60, 60]
for i in range(3):
design_mat[i] = np.tile(design.design_task[:, :-1], [n_run[i], 1])
# start simulating some data
n_C = np.size(design_mat[0], axis=1)
noise_bot = 0.5
noise_top = 1.5
noise_level = [None] * 3
# AR(1) coefficient
rho1_top = 0.8
rho1_bot = -0.2
rho1 = [None] * 3
# generating noise
noise = [None] * 3
# baseline
inten = [None] * 3
for i in range(3):
design_mat[i] = np.tile(design.design_task[:, :-1], [n_run[i], 1])
n_T[i] = n_run[i] * design.n_TR
noise_level[i] = np.random.rand(
n_V[i]) * (noise_top - noise_bot) + noise_bot
# noise level is random.
rho1[i] = np.random.rand(n_V[i]) * (rho1_top - rho1_bot) + rho1_bot
noise[i] = np.zeros([n_T[i], n_V[i]])
noise[i][0, :] = np.random.randn(
n_V[i]) * noise_level[i] / np.sqrt(1 - rho1[i]**2)
for i_t in range(1, n_T[i]):
noise[i][i_t, :] = noise[i][i_t - 1, :] * rho1[i] + \
np.random.randn(n_V[i]) * noise_level[i]
noise[i] = noise[i] + \
np.dot(np.random.randn(n_T[i], 2), np.random.randn(2, n_V[i]))
inten[i] = np.random.rand(n_V[i]) * 20.0
# ideal covariance matrix
ideal_cov = np.zeros([n_C, n_C])
ideal_cov = np.eye(n_C) * 0.6
ideal_cov[0:4, 0:4] = 0.2
for cond in range(0, 4):
ideal_cov[cond, cond] = 2
ideal_cov[5:9, 5:9] = 0.9
for cond in range(5, 9):
ideal_cov[cond, cond] = 1
L_full = np.linalg.cholesky(ideal_cov)
# generating signal
snr_top = 5.0 # test with high SNR
snr_bot = 1.0
# snr = np.random.rand(n_V)*(snr_top-snr_bot)+snr_bot
# Notice that accurately speaking this is not snr. the magnitude of signal
# depends not only on beta but also on x.
snr = [None] * 3
signal = [None] * 3
betas_simulated = [None] * 3
scan_onsets = [None] * 3
Y = [None] * 3
for i in range(3):
snr[i] = np.random.rand(n_V[i]) * (snr_top - snr_bot) + snr_bot
sqrt_v = noise_level[i] * snr[i]
betas_simulated[i] = np.dot(
L_full, np.random.randn(n_C, n_V[i])) * sqrt_v
signal[i] = np.dot(design_mat[i], betas_simulated[i])
# Adding noise to signal as data
Y[i] = signal[i] + noise[i] + inten[i]
scan_onsets[i] = np.linspace(0, n_T[i], num=n_run[i] + 1)
# Test fitting.
n_nureg = 2
gbrsa = GBRSA(n_iter=15, auto_nuisance=True, logS_range=0.5, SNR_bins=11,
rho_bins=16, n_nureg=n_nureg, optimizer='L-BFGS-B')
gbrsa.fit(X=Y, design=design_mat, scan_onsets=scan_onsets)
# Check that result is significantly correlated with the ideal covariance
# matrix
u_b = gbrsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)],
u_i[np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(gbrsa.nSNR_[0], snr[0])[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
p = scipy.stats.pearsonr(gbrsa.sigma_[1], noise_level[1])[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(gbrsa.rho_[2], rho1[2])[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
assert np.shape(gbrsa.X0_[1]) == (n_T[1], n_nureg + 1), "Wrong size of X0"
Y_new = [None] * 3
noise_new = [None] * 3
for i in range(3):
noise_new[i] = np.zeros([n_T[i], n_V[i]])
noise_new[i][0, :] = np.random.randn(
n_V[i]) * noise_level[i] / np.sqrt(1 - rho1[i]**2)
for i_t in range(1, n_T[i]):
noise_new[i][i_t, :] = noise_new[i][i_t - 1, :] * \
rho1[i] + np.random.randn(n_V[i]) * noise_level[i]
Y_new[i] = signal[i] + noise_new[i] + inten[i]
ts, ts0 = gbrsa.transform(Y_new, scan_onsets=scan_onsets)
[score, score_null] = gbrsa.score(
X=Y_new, design=design_mat, scan_onsets=scan_onsets)
[score_noise, score_null_noise] = gbrsa.score(
X=noise_new, design=design_mat, scan_onsets=scan_onsets)
for i in range(3):
assert np.shape(ts[i]) == (n_T[i], n_C) and np.shape(
ts0[i]) == (n_T[i], n_nureg + 1)
p = scipy.stats.pearsonr(ts[i][:, 0], design_mat[i][:, 0])[1]
assert p < 0.01, (
"Recovered time series does not correlate with true time series!")
assert score[i] > score_null[i], (
"Full model does not win over null model on data containing "
"signal")
assert score_noise[i] < score_null_noise[i], (
"Null model does not win over full model on data without signal")
[score, score_null] = gbrsa.score(
X=[None] * 3, design=design_mat, scan_onsets=scan_onsets)
assert score == [None] * 3 and score_null == [None] * \
3, "score did not return list of None when data is None"
ts, ts0 = gbrsa.transform(X=[None] * 3, scan_onsets=scan_onsets)
assert ts == [None] * 3 and ts0 == [None] * \
3, "transform did not return list of None when data is None"
def test_gradient():
from brainiak.reprsimil.brsa import GBRSA
import brainiak.utils.utils as utils
import numpy as np
import os.path
import numdifftools as nd
np.random.seed(100)
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
# Load an example design matrix
design = utils.ReadDesign(fname=file_path)
# concatenate it by 1, 2, and 3 times, mimicking different length
# of experiments for different participants
n_run = [1, 2, 1]
design_mat = [None] * 3
n_T = [None] * 3
n_V = [30, 30, 20]
for i in range(3):
design_mat[i] = np.tile(design.design_task[:, :-1], [n_run[i], 1])
n_T[i] = n_run[i] * design.n_TR
# start simulating some data
n_C = np.size(design_mat[0], axis=1)
noise_bot = 0.5
noise_top = 1.5
noise_level = [None] * 3
for i in range(3):
noise_level[i] = np.random.rand(
n_V[i]) * (noise_top - noise_bot) + noise_bot
# noise level is random.
# AR(1) coefficient
rho1_top = 0.8
rho1_bot = -0.2
rho1 = [None] * 3
# generating noise
noise = [None] * 3
# baseline
inten = [None] * 3
for i in range(3):
rho1[i] = np.random.rand(n_V[i]) * (rho1_top - rho1_bot) + rho1_bot
noise[i] = np.zeros([n_T[i], n_V[i]])
noise[i][0, :] = np.random.randn(
n_V[i]) * noise_level[i] / np.sqrt(1 - rho1[i]**2)
for i_t in range(1, n_T[i]):
noise[i][i_t, :] = noise[i][i_t - 1, :] * rho1[i] + \
np.random.randn(n_V[i]) * noise_level[i]
noise[i] = noise[i] + \
np.dot(np.random.randn(n_T[i], 2), np.random.randn(2, n_V[i]))
inten[i] = np.random.rand(n_V[i]) * 20.0
# ideal covariance matrix
ideal_cov = np.zeros([n_C, n_C])
ideal_cov = np.eye(n_C) * 0.6
ideal_cov[0:4, 0:4] = 0.2
for cond in range(0, 4):
ideal_cov[cond, cond] = 2
ideal_cov[5:9, 5:9] = 0.9
for cond in range(5, 9):
ideal_cov[cond, cond] = 1
L_full = np.linalg.cholesky(ideal_cov)
# generating signal
snr_top = 5.0 # test with high SNR
snr_bot = 1.0
# snr = np.random.rand(n_V)*(snr_top-snr_bot)+snr_bot
# Notice that accurately speaking this is not snr. the magnitude of signal
# depends not only on beta but also on x.
snr = [None] * 3
signal = [None] * 3
betas_simulated = [None] * 3
scan_onsets = [None] * 3
Y = [None] * 3
for i in range(3):
snr[i] = np.random.rand(n_V[i]) * (snr_top - snr_bot) + snr_bot
sqrt_v = noise_level[i] * snr[i]
betas_simulated[i] = np.dot(
L_full, np.random.randn(n_C, n_V[i])) * sqrt_v
signal[i] = np.dot(design_mat[i], betas_simulated[i])
# Adding noise to signal as data
Y[i] = signal[i] + noise[i] + inten[i]
scan_onsets[i] = np.linspace(0, n_T[i], num=n_run[i] + 1)
# Get some initial fitting.
SNR_bins = 11
rho_bins = 20
gbrsa = GBRSA(n_iter=3, rank=n_C, SNR_bins=SNR_bins,
rho_bins=rho_bins, logS_range=0.5)
n_grid = SNR_bins * rho_bins
half_log_det_X0TAX0 = [np.random.randn(n_grid) for i in range(3)]
log_weights = np.random.randn(n_grid)
log_fixed_terms = [np.random.randn(n_grid) for i in range(3)]
l_idx = np.tril_indices(n_C)
L_vec = np.random.randn(int(n_C * (n_C + 1) / 2))
n_X0 = [2, 2, 2]
s = np.linspace(1, SNR_bins, n_grid)
a = np.linspace(0.5, 1, n_grid)
s2XTAcorrX = [None] * 3
YTAcorrY_diag = [None] * 3
sXTAcorrY = [None] * 3
# The calculations below are quite arbitrary and do not conform
# to the model. They simply conform to the symmetry property and shape of
# the matrix indicated by the model
for i in range(3):
YTAcorrY_diag[i] = np.sum(Y[i] * Y[i], axis=0) * a[:, None]
s2XTAcorrX[i] = np.dot(design_mat[i].T, design_mat[
i]) * s[:, None, None]**2 * a[:, None, None]
sXTAcorrY[i] = np.dot(design_mat[i].T, Y[i]) * \
s[:, None, None] * a[:, None, None]
# test if the gradients are correct
print(log_fixed_terms)
ll0, deriv0 = gbrsa._sum_loglike_marginalized(L_vec, s2XTAcorrX,
YTAcorrY_diag, sXTAcorrY,
half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank=None)
# We test the gradient to the Cholesky factor
vec = np.random.randn(np.size(L_vec))
vec = vec / np.linalg.norm(vec)
dd = nd.directionaldiff(
lambda x: gbrsa._sum_loglike_marginalized(x, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY,
half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank=None)[0],
L_vec,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), 'gradient incorrect'
def test_SNR_grids():
import brainiak.reprsimil.brsa
import numpy as np
s = brainiak.reprsimil.brsa.GBRSA(SNR_prior='unif', SNR_bins=10)
SNR_grids, SNR_weights = s._set_SNR_grids()
assert (np.isclose(np.sum(SNR_weights), 1)
and np.isclose(np.std(SNR_weights[1:-1]), 0)
and np.all(SNR_weights > 0)
and np.isclose(np.min(SNR_grids), 0)
and np.all(SNR_grids >= 0)
and np.isclose(np.max(SNR_grids), 1)
), 'SNR_weights or SNR_grids are incorrect for uniform prior'
assert np.isclose(np.ptp(np.diff(SNR_grids[1:-1])), 0), \
'SNR grids are not equally spaced for uniform prior'
assert (np.size(SNR_grids) == np.size(SNR_weights)
and np.size(SNR_grids) == 10
), ("size of SNR_grids or SNR_weights is not correct for uniform "
"prior")
s = brainiak.reprsimil.brsa.GBRSA(SNR_prior='lognorm', SNR_bins=35)
SNR_grids, SNR_weights = s._set_SNR_grids()
assert (np.all(SNR_grids >= 0)
and np.isclose(np.sum(SNR_weights), 1)
and np.all(SNR_weights > 0)
and np.all(np.diff(SNR_grids) > 0)
), 'SNR_grids or SNR_weights not correct for log normal prior'
s = brainiak.reprsimil.brsa.GBRSA(SNR_prior='exp')
SNR_grids, SNR_weights = s._set_SNR_grids()
assert (np.all(SNR_grids >= 0)
and np.isclose(np.sum(SNR_weights), 1)
and np.all(SNR_weights > 0)
and np.all(np.diff(SNR_grids) > 0)
), 'SNR_grids or SNR_weights not correct for exponential prior'
s = brainiak.reprsimil.brsa.GBRSA(SNR_prior='equal')
SNR_grids, SNR_weights = s._set_SNR_grids()
assert (np.all(SNR_grids == 1)
and np.all(SNR_weights == 1)
and np.size(SNR_grids) == 1
), 'SNR_grids or SNR_weights not correct for equal prior'
def test_n_nureg():
import brainiak.reprsimil.brsa
import numpy as np
noise = np.dot(np.random.randn(100, 8), np.random.randn(
8, 30)) + np.random.randn(100, 30) * 0.001
design = np.random.randn(100, 2)
s = brainiak.reprsimil.brsa.GBRSA(n_iter=2)
s.fit(X=noise, design=design)
assert s.n_nureg_[0] == 8, 'n_nureg_ estimation is wrong in GBRSA'
def test_grid_flatten_num_int():
# Check for numeric integration of SNR, and correctly flattening 2-D grids
# to 1-D grid.
import brainiak.reprsimil.brsa
import brainiak.utils.utils as utils
import numpy as np
import scipy.special
n_V = 30
n_T = 50
n_C = 3
design = np.random.randn(n_T, n_C)
U_simu = np.asarray([[1.0, 0.1, 0.0], [0.1, 1.0, 0.2], [0.0, 0.2, 1.0]])
L_simu = np.linalg.cholesky(U_simu)
SNR = np.random.exponential(size=n_V)
beta = np.dot(L_simu, np.random.randn(n_C, n_V)) * SNR
noise = np.random.randn(n_T, n_V)
Y = np.dot(design, beta) + noise
X = design
X_base = None
scan_onsets = [0]
s = brainiak.reprsimil.brsa.GBRSA(n_iter=1, auto_nuisance=False,
SNR_prior='exp')
s.fit(X=[Y], design=[design])
rank = n_C
l_idx, rank = s._chol_idx(n_C, rank)
L = np.zeros((n_C, rank))
n_l = np.size(l_idx[0])
current_vec_U_chlsk_l = s.random_state_.randn(n_l) * 10
L[l_idx] = current_vec_U_chlsk_l
# Now we change the grids for SNR and rho for testing.
s.SNR_bins = 2
s.rho_bins = 2
SNR_grids, SNR_weights = s._set_SNR_grids()
# rho_grids, rho_weights = s._set_rho_grids()
rho_grids = np.ones(2) * 0.1
rho_weights = np.ones(2) / 2
# We purposefully set all rhos to be equal to test flattening of
# grids.
n_grid = s.SNR_bins * s.rho_bins
D, F, run_TRs, n_run = s._prepare_DF(
n_T, scan_onsets=scan_onsets)
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, XTDX, XTFX \
= s._prepare_data_XY(X, Y, D, F)
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, X0, \
X_base, n_X0, idx_DC = s._prepare_data_XYX0(
X, Y, X_base, None, D, F, run_TRs, no_DC=False)
X0TAX0, X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY_diag, X0TAY, XTAX0 \
= s._precompute_ar1_quad_forms_marginalized(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX,
XTDX, XTFX, X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY, rho_grids, n_V, n_X0)
half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, YTAcorrY_diag, \
sXTAcorrY, X0TAY, XTAX0 = s._matrix_flattened_grid(
X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX, YTAcorrY_diag, XTAcorrY,
X0TAY, XTAX0, n_C, n_V, n_X0, n_grid)
assert (half_log_det_X0TAX0[0] == half_log_det_X0TAX0[1]
and half_log_det_X0TAX0[2] == half_log_det_X0TAX0[3]
and half_log_det_X0TAX0[0] == half_log_det_X0TAX0[2]
), '_matrix_flattened_grid has mistake with half_log_det_X0TAX0'
assert (np.array_equal(X0TAX0[0, :, :], X0TAX0[1, :, :])
and np.array_equal(X0TAX0[2, :, :], X0TAX0[3, :, :])
and np.array_equal(X0TAX0[0, :, :], X0TAX0[2, :, :])
), '_matrix_flattened_grid has mistake X0TAX0'
assert (np.array_equal(X0TAX0_i[0, :, :], X0TAX0_i[1, :, :])
and np.array_equal(X0TAX0_i[2, :, :], X0TAX0_i[3, :, :])
and np.array_equal(X0TAX0_i[0, :, :], X0TAX0_i[2, :, :])
), '_matrix_flattened_grid has mistake X0TAX0_i'
assert np.allclose(
np.dot(X0TAX0[0, :, :], X0TAX0_i[0, :, :]),
np.eye(n_X0)
), 'X0TAX0_i is not inverse of X0TAX0'
assert (np.array_equal(YTAcorrY_diag[0, :], YTAcorrY_diag[1, :])
and np.array_equal(YTAcorrY_diag[2, :], YTAcorrY_diag[3, :])
and np.array_equal(YTAcorrY_diag[0, :], YTAcorrY_diag[2, :])
), '_matrix_flattened_grid has mistake YTAcorrY_diag'
assert (np.array_equal(sXTAcorrY[0, :, :], sXTAcorrY[1, :, :])
and np.array_equal(sXTAcorrY[2, :, :], sXTAcorrY[3, :, :])
and not np.array_equal(sXTAcorrY[0, :, :], sXTAcorrY[2, :, :])
), '_matrix_flattened_grid has mistake sXTAcorrY'
assert (np.array_equal(X0TAY[0, :, :], X0TAY[1, :, :])
and np.array_equal(X0TAY[2, :, :], X0TAY[3, :, :])
and np.array_equal(X0TAY[0, :, :], X0TAY[2, :, :])
), '_matrix_flattened_grid has mistake X0TAY'
assert (np.array_equal(XTAX0[0, :, :], XTAX0[1, :, :])
and np.array_equal(XTAX0[2, :, :], XTAX0[3, :, :])
and np.array_equal(XTAX0[0, :, :], XTAX0[2, :, :])
), '_matrix_flattened_grid has mistake XTAX0'
# Now we test the other way
rho_grids, rho_weights = s._set_rho_grids()
# rho_grids, rho_weights = s._set_rho_grids()
SNR_grids = np.ones(2) * 0.1
SNR_weights = np.ones(2) / 2
# We purposefully set all SNR to be equal to test flattening of
# grids.
X0TAX0, X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY_diag, X0TAY, XTAX0 \
= s._precompute_ar1_quad_forms_marginalized(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX,
XTDX, XTFX, X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY, rho_grids, n_V, n_X0)
half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, YTAcorrY_diag, \
sXTAcorrY, X0TAY, XTAX0 = s._matrix_flattened_grid(
X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX, YTAcorrY_diag, XTAcorrY,
X0TAY, XTAX0, n_C, n_V, n_X0, n_grid)
assert (half_log_det_X0TAX0[0] == half_log_det_X0TAX0[2]
and half_log_det_X0TAX0[1] == half_log_det_X0TAX0[3]
and not half_log_det_X0TAX0[0] == half_log_det_X0TAX0[1]
), '_matrix_flattened_grid has mistake with half_log_det_X0TAX0'
assert (np.array_equal(X0TAX0[0, :, :], X0TAX0[2, :, :])
and np.array_equal(X0TAX0[1, :, :], X0TAX0[3, :, :])
and not np.array_equal(X0TAX0[0, :, :], X0TAX0[1, :, :])
), '_matrix_flattened_grid has mistake X0TAX0'
assert (np.array_equal(X0TAX0_i[0, :, :], X0TAX0_i[2, :, :])
and np.array_equal(X0TAX0_i[1, :, :], X0TAX0_i[3, :, :])
and not np.array_equal(X0TAX0_i[0, :, :], X0TAX0_i[1, :, :])
), '_matrix_flattened_grid has mistake X0TAX0_i'
assert np.allclose(
np.dot(X0TAX0[0, :, :], X0TAX0_i[0, :, :]),
np.eye(n_X0)
), 'X0TAX0_i is not inverse of X0TAX0'
assert (np.array_equal(YTAcorrY_diag[0, :], YTAcorrY_diag[2, :])
and np.array_equal(YTAcorrY_diag[1, :], YTAcorrY_diag[3, :])
and not np.array_equal(YTAcorrY_diag[0, :],
YTAcorrY_diag[1, :])
), '_matrix_flattened_grid has mistake YTAcorrY_diag'
assert (np.array_equal(sXTAcorrY[0, :, :], sXTAcorrY[2, :, :])
and np.array_equal(sXTAcorrY[1, :, :], sXTAcorrY[3, :, :])
and not np.array_equal(sXTAcorrY[0, :, :], sXTAcorrY[1, :, :])
), '_matrix_flattened_grid has mistake sXTAcorrY'
assert (np.array_equal(X0TAY[0, :, :], X0TAY[2, :, :])
and np.array_equal(X0TAY[1, :, :], X0TAY[3, :, :])
and not np.array_equal(X0TAY[0, :, :], X0TAY[1, :, :])
), '_matrix_flattened_grid has mistake X0TAY'
assert (np.array_equal(XTAX0[0, :, :], XTAX0[2, :, :])
and np.array_equal(XTAX0[1, :, :], XTAX0[3, :, :])
and not np.array_equal(XTAX0[0, :, :], XTAX0[1, :, :])
), '_matrix_flattened_grid has mistake XTAX0'
# Now test the integration over SNR
s.SNR_bins = 50
s.rho_bins = 1
SNR_grids, SNR_weights = s._set_SNR_grids()
rho_grids, rho_weights = s._set_rho_grids()
n_grid = s.SNR_bins * s.rho_bins
def setup_for_test():
# This function will be re-used to set up the variables necessary for
# testing.
X0TAX0, X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY_diag, X0TAY, XTAX0 \
= s._precompute_ar1_quad_forms_marginalized(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX,
XTDX, XTFX, X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY, rho_grids, n_V, n_X0)
half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, YTAcorrY_diag, \
sXTAcorrY, X0TAY, XTAX0 = s._matrix_flattened_grid(
X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX, YTAcorrY_diag, XTAcorrY,
X0TAY, XTAX0, n_C, n_V, n_X0, n_grid)
log_weights = np.reshape(
np.log(SNR_weights[:, None]) + np.log(rho_weights), n_grid)
all_rho_grids = np.reshape(np.repeat(
rho_grids[None, :], s.SNR_bins, axis=0), n_grid)
log_fixed_terms = - (n_T - n_X0) / 2 * np.log(2 * np.pi) + n_run \
/ 2 * np.log(1 - all_rho_grids**2) + scipy.special.gammaln(
(n_T - n_X0 - 2) / 2) + (n_T - n_X0 - 2) / 2 * np.log(2)
return s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0, \
log_weights, log_fixed_terms
(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0, log_weights,
log_fixed_terms) = setup_for_test()
LL_total, _ = s._loglike_marginalized(current_vec_U_chlsk_l, s2XTAcorrX,
YTAcorrY_diag, sXTAcorrY,
half_log_det_X0TAX0, log_weights,
log_fixed_terms, l_idx, n_C, n_T,
n_V, n_X0, n_grid, rank=rank)
LL_total = - LL_total
# Now we re-calculate using scipy.integrate
s.SNR_bins = 100
SNR_grids = np.linspace(0, 12, s.SNR_bins)
SNR_weights = np.exp(- SNR_grids)
SNR_weights = SNR_weights / np.sum(SNR_weights)
n_grid = s.SNR_bins * s.rho_bins
(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0, log_weights,
log_fixed_terms) = setup_for_test()
LL_raw, _, _, _ = s._raw_loglike_grids(L, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
n_C, n_T, n_V, n_X0,
n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
scipy_sum = scipy.integrate.simps(y=result_exp, axis=0)
LL_total_scipy = np.sum(np.log(scipy_sum) + max_value)
tol = 1e-3
assert(np.isclose(LL_total_scipy, LL_total, rtol=tol)), \
'Error of log likelihood calculation exceeds the tolerance'
# Now test the log normal prior
s = brainiak.reprsimil.brsa.GBRSA(n_iter=1, auto_nuisance=False,
SNR_prior='lognorm')
s.SNR_bins = 50
s.rho_bins = 1
SNR_grids, SNR_weights = s._set_SNR_grids()
rho_grids, rho_weights = s._set_rho_grids()
n_grid = s.SNR_bins * s.rho_bins
(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0, log_weights,
log_fixed_terms) = setup_for_test()
LL_total, _ = s._loglike_marginalized(current_vec_U_chlsk_l, s2XTAcorrX,
YTAcorrY_diag, sXTAcorrY,
half_log_det_X0TAX0, log_weights,
log_fixed_terms, l_idx, n_C, n_T,
n_V, n_X0, n_grid, rank=rank)
LL_total = - LL_total
# Now we re-calculate using scipy.integrate
s.SNR_bins = 400
SNR_grids = np.linspace(1e-8, 20, s.SNR_bins)
log_SNR_weights = scipy.stats.lognorm.logpdf(SNR_grids, s=s.logS_range)
result_sum, max_value, result_exp = utils.sumexp_stable(
log_SNR_weights[:, None])
SNR_weights = np.squeeze(result_exp / result_sum)
n_grid = s.SNR_bins * s.rho_bins
(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0, log_weights,
log_fixed_terms) = setup_for_test()
LL_raw, _, _, _ = s._raw_loglike_grids(L, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
n_C, n_T, n_V, n_X0,
n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
scipy_sum = scipy.integrate.simps(y=result_exp, axis=0)
LL_total_scipy = np.sum(np.log(scipy_sum) + max_value)
tol = 1e-3
assert(np.isclose(LL_total_scipy, LL_total, rtol=tol)), \
'Error of log likelihood calculation exceeds the tolerance'
| 27,071 | 41.903328 | 79 | py |
brainiak | brainiak-master/tests/reprsimil/test_brsa.py | # Copyright 2016 Mingbo Cai, Princeton Neuroscience Instititute,
# Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_can_instantiate():
import brainiak.reprsimil.brsa
s = brainiak.reprsimil.brsa.BRSA()
assert s, "Invalid BRSA instance!"
s = brainiak.reprsimil.brsa.BRSA(
n_iter=50, rank=5, auto_nuisance=False, n_nureg=2, nureg_method='ICA',
baseline_single=False, init_iter=5, GP_space=True, GP_inten=True,
tol=2e-3, eta=0.001, space_smooth_range=10.0, inten_smooth_range=100.0,
tau_range=2.0,
tau2_prior=brainiak.reprsimil.brsa.prior_GP_var_inv_gamma,
optimizer='CG', random_state=100, anneal_speed=20)
assert s, "Invalid BRSA instance!"
def test_fit():
from brainiak.reprsimil.brsa import BRSA
import brainiak.utils.utils as utils
import scipy.stats
import numpy as np
import os.path
np.random.seed(10)
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
# Load an example design matrix
design = utils.ReadDesign(fname=file_path)
# concatenate it by 2 times, mimicking 2 runs of itenditcal timing
n_run = 2
design.design_task = np.tile(design.design_task[:, :-1], [n_run, 1])
design.n_TR = design.n_TR * n_run
# start simulating some data
n_V = 50
n_C = np.size(design.design_task, axis=1)
n_T = design.n_TR
noise_bot = 0.5
noise_top = 5.0
noise_level = np.random.rand(n_V) * (noise_top - noise_bot) + noise_bot
# noise level is random.
# AR(1) coefficient
rho1_top = 0.8
rho1_bot = -0.2
rho1 = np.random.rand(n_V) * (rho1_top - rho1_bot) + rho1_bot
# generating noise
noise = np.zeros([n_T, n_V])
noise[0, :] = np.random.randn(n_V) * noise_level / np.sqrt(1 - rho1**2)
for i_t in range(1, n_T):
noise[i_t, :] = noise[i_t - 1, :] * rho1 + \
np.random.randn(n_V) * noise_level
# ideal covariance matrix
ideal_cov = np.zeros([n_C, n_C])
ideal_cov = np.eye(n_C) * 0.6
ideal_cov[0:4, 0:4] = 0.2
for cond in range(0, 4):
ideal_cov[cond, cond] = 2
ideal_cov[5:9, 5:9] = 0.9
for cond in range(5, 9):
ideal_cov[cond, cond] = 1
L_full = np.linalg.cholesky(ideal_cov)
# generating signal
snr_level = 5.0 # test with high SNR
# snr = np.random.rand(n_V)*(snr_top-snr_bot)+snr_bot
# Notice that accurately speaking this is not snr. the magnitude of signal
# depends
# not only on beta but also on x.
inten = np.random.rand(n_V) * 20.0
# parameters of Gaussian process to generate pseuso SNR
tau = 1.0
smooth_width = 5.0
inten_kernel = 1.0
coords = np.arange(0, n_V)[:, None]
dist2 = np.square(coords - coords.T)
inten_tile = np.tile(inten, [n_V, 1])
inten_diff2 = (inten_tile - inten_tile.T)**2
K = np.exp(-dist2 / smooth_width**2 / 2.0 - inten_diff2 /
inten_kernel**2 / 2.0) * tau**2 + np.eye(n_V) * tau**2 * 0.001
L = np.linalg.cholesky(K)
snr = np.exp(np.dot(L, np.random.randn(n_V))) * snr_level
sqrt_v = noise_level * snr
betas_simulated = np.dot(L_full, np.random.randn(n_C, n_V)) * sqrt_v
signal = np.dot(design.design_task, betas_simulated)
# Adding noise to signal as data
Y = signal + noise + inten
scan_onsets = np.linspace(0, design.n_TR, num=n_run + 1)
# Test fitting with GP prior.
brsa = BRSA(GP_space=True, GP_inten=True, n_iter=5,
init_iter=10, auto_nuisance=False, tol=2e-3)
# We also test that it can detect baseline regressor included in the
# design matrix for task conditions
wrong_design = np.insert(design.design_task, 0, 1, axis=1)
with pytest.raises(ValueError) as excinfo:
brsa.fit(X=Y, design=wrong_design, scan_onsets=scan_onsets,
coords=coords, inten=inten)
assert ('Your design matrix appears to have included baseline time series.'
in str(excinfo.value))
# Now we fit with the correct design matrix.
brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets,
coords=coords, inten=inten)
# Check that result is significantly correlated with the ideal covariance
# matrix
u_b = brsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)],
u_i[np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(brsa.nSNR_, snr)[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
assert np.isclose(np.mean(np.log(brsa.nSNR_)), 0), "nSNR_ not normalized!"
p = scipy.stats.pearsonr(brsa.sigma_, noise_level)[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(brsa.rho_, rho1)[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
noise_new = np.zeros([n_T, n_V])
noise_new[0, :] = np.random.randn(n_V) * noise_level / np.sqrt(1 - rho1**2)
for i_t in range(1, n_T):
noise_new[i_t, :] = noise_new[i_t - 1, :] * \
rho1 + np.random.randn(n_V) * noise_level
Y_new = signal + noise_new + inten
ts, ts0 = brsa.transform(Y_new, scan_onsets=scan_onsets)
p = scipy.stats.pearsonr(ts[:, 0], design.design_task[:, 0])[1]
assert p < 0.01, (
"Recovered time series does not correlate with true time series!")
assert np.shape(ts) == (n_T, n_C) and np.shape(ts0) == (n_T, 1), (
"Wrong shape in returned time series by transform function!")
[score, score_null] = brsa.score(
X=Y_new, design=design.design_task, scan_onsets=scan_onsets)
assert score > score_null, (
"Full model does not win over null model on data containing signal")
[score, score_null] = brsa.score(X=noise_new + inten,
design=design.design_task,
scan_onsets=scan_onsets)
assert score < score_null, (
"Null model does not win over full model on data without signal")
# Test fitting with lower rank, nuisance regressors and without GP prior
rank = n_C - 1
n_nureg = 1
brsa = BRSA(rank=rank, n_nureg=n_nureg, tol=2e-3,
n_iter=8, init_iter=4, auto_nuisance=True)
brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets)
# u_b = brsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)], u_i[
np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(brsa.nSNR_, snr)[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
assert np.isclose(np.mean(np.log(brsa.nSNR_)), 0), "nSNR_ not normalized!"
p = scipy.stats.pearsonr(brsa.sigma_, noise_level)[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(brsa.rho_, rho1)[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
assert (not hasattr(brsa, 'bGP_')
and not hasattr(brsa, 'lGPspace_')
and not hasattr(brsa, 'lGPinten_')
), ("the BRSA object should not have parameters of GP if GP is "
"not requested.")
# GP parameters are not set if not requested
assert brsa.beta0_.shape[0] == n_nureg + 1, 'Shape of beta0 incorrect'
p = scipy.stats.pearsonr(brsa.beta0_[0, :], inten)[1]
assert p < 0.01, (
'recovered beta0 does not correlate with the baseline of voxels.')
assert np.shape(brsa.L_) == (
n_C, rank), 'Cholesky factor should have shape of (n_C, rank)'
# Test fitting with GP over just spatial coordinates.
brsa = BRSA(GP_space=True, baseline_single=False,
tol=2e-3, n_iter=4, init_iter=4)
brsa.fit(X=Y, design=design.design_task,
scan_onsets=scan_onsets, coords=coords)
# Check that result is significantly correlated with the ideal covariance
# matrix
u_b = brsa.U_
u_i = ideal_cov
p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)], u_i[
np.tril_indices_from(u_i)])[1]
assert p < 0.01, (
"Fitted covariance matrix does not correlate with ideal covariance "
"matrix!")
# check that the recovered SNRs makes sense
p = scipy.stats.pearsonr(brsa.nSNR_, snr)[1]
assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
assert np.isclose(np.mean(np.log(brsa.nSNR_)), 0), "nSNR_ not normalized!"
p = scipy.stats.pearsonr(brsa.sigma_, noise_level)[1]
assert p < 0.01, (
"Fitted noise level does not correlate with simulated noise level!")
p = scipy.stats.pearsonr(brsa.rho_, rho1)[1]
assert p < 0.01, (
"Fitted AR(1) coefficient does not correlate with simulated values!")
assert not hasattr(brsa, 'lGPinten_'), (
"the BRSA object should not have parameters of lGPinten_ if only "
"smoothness in space is requested.")
# GP parameters are not set if not requested
def test_gradient():
from brainiak.reprsimil.brsa import BRSA
import brainiak.utils.utils as utils
import numpy as np
import os.path
import numdifftools as nd
np.random.seed(100)
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
# Load an example design matrix
design = utils.ReadDesign(fname=file_path)
n_run = 4
# concatenate it by 4 times, mimicking 4 runs of itenditcal timing
design.design_task = np.tile(design.design_task[:, :-1], [n_run, 1])
design.n_TR = design.n_TR * n_run
# start simulating some data
n_V = 30
n_C = np.size(design.design_task, axis=1)
n_T = design.n_TR
noise_bot = 0.5
noise_top = 1.5
noise_level = np.random.rand(n_V) * (noise_top - noise_bot) + noise_bot
# noise level is random.
# AR(1) coefficient
rho1_top = 0.8
rho1_bot = -0.2
rho1 = np.random.rand(n_V) * (rho1_top - rho1_bot) + rho1_bot
# generating noise
noise = np.zeros([n_T, n_V])
noise[0, :] = np.random.randn(n_V) * noise_level / np.sqrt(1 - rho1**2)
for i_t in range(1, n_T):
noise[i_t, :] = noise[i_t - 1, :] * rho1 + \
np.random.randn(n_V) * noise_level
# ideal covariance matrix
ideal_cov = np.zeros([n_C, n_C])
ideal_cov = np.eye(n_C) * 0.6
ideal_cov[0, 0] = 0.2
ideal_cov[5:9, 5:9] = 0.6
for cond in range(5, 9):
ideal_cov[cond, cond] = 1
L_full = np.linalg.cholesky(ideal_cov)
# generating signal
snr_level = 5.0 # test with high SNR
inten = np.random.randn(n_V) * 20.0
# parameters of Gaussian process to generate pseuso SNR
tau = 0.8
smooth_width = 5.0
inten_kernel = 1.0
coords = np.arange(0, n_V)[:, None]
dist2 = np.square(coords - coords.T)
inten_tile = np.tile(inten, [n_V, 1])
inten_diff2 = (inten_tile - inten_tile.T)**2
K = np.exp(-dist2 / smooth_width**2 / 2.0 - inten_diff2 /
inten_kernel**2 / 2.0) * tau**2 + np.eye(n_V) * tau**2 * 0.001
L = np.linalg.cholesky(K)
snr = np.exp(np.dot(L, np.random.randn(n_V))) * snr_level
# Notice that accurately speaking this is not snr. the magnitude of signal
# depends not only on beta but also on x.
sqrt_v = noise_level * snr
betas_simulated = np.dot(L_full, np.random.randn(n_C, n_V)) * sqrt_v
signal = np.dot(design.design_task, betas_simulated)
# Adding noise to signal as data
Y = signal + noise
scan_onsets = np.linspace(0, design.n_TR, num=n_run + 1)
# Test fitting with GP prior.
brsa = BRSA(GP_space=True, GP_inten=True, rank=n_C)
# Additionally, we test the generation of re-used terms.
X0 = np.ones(n_T)[:, None]
D, F, run_TRs, n_run_returned = brsa._prepare_DF(
n_T, scan_onsets=scan_onsets)
assert np.shape(D) == (n_T, n_T), 'D has wrong shape'
assert np.shape(F) == (n_T, n_T), 'F has wrong shape'
assert np.sum(D) == (n_T - n_run) * 2, 'D is initialized incorrectly.'
assert np.sum(F) == n_T - n_run * 2, 'F is initialized incorrectly.'
assert n_run_returned == n_run, (
'There is mistake in counting number of runs')
assert np.sum(run_TRs) == n_T, (
'The segmentation of the total experiment duration is wrong')
(XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, XTDX, XTFX
) = brsa._prepare_data_XY(design.design_task, Y, D, F)
(X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY, X0, X_base,
n_X0, idx_DC
) = brsa._prepare_data_XYX0(design.design_task, Y, X0,
np.random.randn(n_T)[:, None], D, F, run_TRs,
no_DC=False)
assert (np.shape(XTY) == (n_C, n_V)
and np.shape(XTDY) == (n_C, n_V)
and np.shape(XTFY) == (n_C, n_V)
), 'Dimension of XTY etc. returned from _prepare_data is wrong'
assert (np.ndim(YTY_diag) == 1
and np.ndim(YTDY_diag) == 1
and np.ndim(YTFY_diag) == 1
), ("Dimension of YTY_diag etc. returned from _prepare_data is "
"wrong")
assert (np.ndim(XTX) == 2
and np.ndim(XTDX) == 2
and np.ndim(XTFX) == 2
), 'Dimension of XTX etc. returned from _prepare_data is wrong'
assert (np.ndim(X0TX0) == 2
and np.ndim(X0TDX0) == 2
and np.ndim(X0TFX0) == 2
), 'Dimension of X0TX0 etc. returned from _prepare_data is wrong'
assert (np.ndim(XTX0) == 2
and np.ndim(XTDX0) == 2
and np.ndim(XTFX0) == 2
), 'Dimension of XTX0 etc. returned from _prepare_data is wrong'
assert (np.ndim(X0TY) == 2
and np.ndim(X0TDY) == 2
and np.ndim(X0TFY) == 2
), 'Dimension of X0TY etc. returned from _prepare_data is wrong'
assert (np.shape(X0) == (n_T, n_X0)
and np.shape(X_base) == (n_T, np.size(idx_DC))
and np.max(idx_DC) < n_X0
and np.size(idx_DC) + 1 == n_X0
), ("Dimension of X0 or X_base, or n_X0 or indices of DC "
"components are wrong.")
l_idx = np.tril_indices(n_C)
n_l = np.size(l_idx[0])
# Make sure all the fields are in the indices.
idx_param_sing, idx_param_fitU, idx_param_fitV = brsa._build_index_param(
n_l, n_V, 2)
assert 'Cholesky' in idx_param_sing and 'a1' in idx_param_sing, \
'The dictionary for parameter indexing misses some keys'
assert 'Cholesky' in idx_param_fitU and 'a1' in idx_param_fitU, \
'The dictionary for parameter indexing misses some keys'
assert 'log_SNR2' in idx_param_fitV and 'c_space' in idx_param_fitV \
and 'c_inten' in idx_param_fitV and 'c_both' in idx_param_fitV, \
'The dictionary for parameter indexing misses some keys'
# Initial parameters are correct parameters with some perturbation
param0_fitU = np.random.randn(n_l + n_V) * 0.1
param0_fitV = np.random.randn(n_V + 1) * 0.1
param0_sing = np.random.randn(n_l + 1) * 0.1
param0_sing[idx_param_sing['a1']] += np.mean(np.tan(rho1 * np.pi / 2))
param0_fitV[idx_param_fitV['log_SNR2']] += np.log(snr[:n_V - 1]) * 2
param0_fitV[idx_param_fitV['c_space']] += np.log(smooth_width) * 2
param0_fitV[idx_param_fitV['c_inten']] += np.log(inten_kernel) * 2
# test if the gradients are correct
# log likelihood and derivative of the _singpara function
ll0, deriv0 = brsa._loglike_AR1_singpara(param0_sing, XTX, XTDX, XTFX,
YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0, X0TY,
X0TDY, X0TFY, l_idx, n_C, n_T,
n_V, n_run, n_X0, idx_param_sing)
# We test the gradient to the Cholesky factor
vec = np.zeros(np.size(param0_sing))
vec[idx_param_sing['Cholesky'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_singpara(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0, XTX0,
XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing)[0],
param0_sing,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of singpara wrt Cholesky is incorrect')
# We test the gradient to a1
vec = np.zeros(np.size(param0_sing))
vec[idx_param_sing['a1']] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_singpara(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0, XTX0,
XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing)[0],
param0_sing,
vec)
assert np.isclose(dd, np.dot(deriv0, vec),
rtol=1e-5), 'gradient of singpara wrt a1 is incorrect'
# log likelihood and derivative of the fitU function.
ll0, deriv0 = brsa._loglike_AR1_diagV_fitU(param0_fitU, XTX, XTDX, XTFX,
YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY, np.log(snr)
* 2, l_idx, n_C, n_T, n_V,
n_run, n_X0, idx_param_fitU,
n_C)
# We test the gradient wrt the reparametrization of AR(1) coefficient of
# noise.
vec = np.zeros(np.size(param0_fitU))
vec[idx_param_fitU['a1'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, np.log(snr) * 2, l_idx,
n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, n_C)[0],
param0_fitU,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitU wrt to AR(1) coefficient incorrect')
# We test if the numerical and analytical gradient wrt to the first
# element of Cholesky factor is correct
vec = np.zeros(np.size(param0_fitU))
vec[idx_param_fitU['Cholesky'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, np.log(snr) * 2, l_idx,
n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, n_C)[0],
param0_fitU,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitU wrt Cholesky factor incorrect')
# Test on a random direction
vec = np.random.randn(np.size(param0_fitU))
vec = vec / np.linalg.norm(vec)
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitU(x, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY,
XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, np.log(snr) * 2, l_idx,
n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, n_C)[0],
param0_fitU,
vec)
assert np.isclose(dd, np.dot(deriv0, vec),
rtol=1e-5), 'gradient of fitU incorrect'
# We test the gradient of _fitV wrt to log(SNR^2) assuming no GP prior.
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
brsa._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L_full, rho1, n_V, n_X0)
assert np.shape(XTAcorrX) == (n_V, n_C, n_C), (
'Dimension of XTAcorrX is wrong by _precompute_ar1_quad_forms()')
assert XTAcorrY.shape == XTY.shape, (
'Shape of XTAcorrY is wrong by _precompute_ar1_quad_forms()')
assert YTAcorrY.shape == YTY_diag.shape, (
'Shape of YTAcorrY is wrong by _precompute_ar1_quad_forms()')
assert np.shape(X0TAX0) == (n_V, n_X0, n_X0), (
'Dimension of X0TAX0 is wrong by _precompute_ar1_quad_forms()')
assert np.shape(XTAX0) == (n_V, n_C, n_X0), (
'Dimension of XTAX0 is wrong by _precompute_ar1_quad_forms()')
assert X0TAY.shape == X0TY.shape, (
'Shape of X0TAX0 is wrong by _precompute_ar1_quad_forms()')
assert np.all(np.isfinite(X0TAX0_i)), (
'Inverse of X0TAX0 includes NaN or Inf')
ll0, deriv0 = brsa._loglike_AR1_diagV_fitV(
param0_fitV[idx_param_fitV['log_SNR2']], X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx], np.tan(rho1 * np.pi / 2), l_idx, n_C, n_T,
n_V, n_run, n_X0, idx_param_fitV, n_C, False, False)
vec = np.zeros(np.size(param0_fitV[idx_param_fitV['log_SNR2']]))
vec[idx_param_fitV['log_SNR2'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C,
False, False)[0],
param0_fitV[idx_param_fitV['log_SNR2']],
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV wrt log(SNR2) incorrect for model without GP')
# We test the gradient of _fitV wrt to log(SNR^2) assuming GP prior.
ll0, deriv0 = brsa._loglike_AR1_diagV_fitV(
param0_fitV, X0TAX0, XTAX0, X0TAY, X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2), l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_fitV, n_C, True, True, dist2, inten_diff2, 100, 100)
vec = np.zeros(np.size(param0_fitV))
vec[idx_param_fitV['log_SNR2'][0]] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV srt log(SNR2) incorrect for model with GP')
# We test the graident wrt spatial length scale parameter of GP prior
vec = np.zeros(np.size(param0_fitV))
vec[idx_param_fitV['c_space']] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV wrt spatial length scale of GP incorrect')
# We test the graident wrt intensity length scale parameter of GP prior
vec = np.zeros(np.size(param0_fitV))
vec[idx_param_fitV['c_inten']] = 1
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV wrt intensity length scale of GP incorrect')
# We test the graident on a random direction
vec = np.random.randn(np.size(param0_fitV))
vec = vec / np.linalg.norm(vec)
dd = nd.directionaldiff(
lambda x: brsa._loglike_AR1_diagV_fitV(x, X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY,
YTAcorrY, LTXTAcorrY, XTAcorrXL,
LTXTAcorrXL, L_full[l_idx],
np.tan(rho1 * np.pi / 2),
l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, n_C, True,
True, dist2, inten_diff2, 100,
100)[0],
param0_fitV,
vec)
assert np.isclose(dd, np.dot(deriv0, vec), rtol=1e-5), (
'gradient of fitV incorrect')
def test_nureg_determine():
from brainiak.reprsimil.brsa import Ncomp_SVHT_MG_DLD_approx
import numpy as np
x = np.dot(np.random.randn(100, 5), np.random.randn(5, 40)) + \
np.random.randn(100, 40) * 0.01
ncomp = Ncomp_SVHT_MG_DLD_approx(x)
assert ncomp >= 3 and ncomp <= 8, (
'recovered number of components should be in a reasonable range')
def test_half_log_det():
import numpy as np
from brainiak.reprsimil.brsa import BRSA
a = np.asarray([[1, 0.2], [0.2, 1]])
brsa = BRSA()
half_log_det = np.log(np.linalg.det(a)) / 2
assert np.isclose(half_log_det, brsa._half_log_det(
a)), 'half log determinant function is wrong'
def test_n_nureg():
import brainiak.reprsimil.brsa
import numpy as np
# noise = np.random.randn(100,30)
noise = np.dot(np.random.randn(100, 8), np.random.randn(
8, 30)) + np.random.randn(100, 30) * 0.01
design = np.random.randn(100, 2)
s = brainiak.reprsimil.brsa.BRSA(n_iter=2)
s.fit(X=noise, design=design)
assert s.n_nureg_ > 2 and s.n_nureg_ < 16, (
'n_nureg_ estimation is wrong in BRSA')
| 29,837 | 44.763804 | 79 | py |
brainiak | brainiak-master/tests/reconstruct/test_iem.py | # Copyright 2018 David Huberdeau & Peter Kok
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Authors: David Huberdeau (Yale University) &
# Peter Kok (Yale University), 2018 &
# Vy Vo (Intel Corp., UCSD), 2019
import pytest
import numpy as np
import logging
from brainiak.reconstruct.iem import InvertedEncoding1D, InvertedEncoding2D
from brainiak.utils.fmrisim import generate_1d_gaussian_rfs, \
generate_1d_rf_responses
from scipy.stats import circmean
logger = logging.getLogger(__name__)
# Simple test: can an instance be instantiated?
def test_can_instantiate():
s = InvertedEncoding1D()
assert s, "Invalid InvertedEncoding1D instance"
s2 = InvertedEncoding2D(stim_xlim=[0, 1], stim_ylim=[0, 1],
stimulus_resolution=[1, 1])
assert s2, "Invalid InvertedEncoding2D instance"
# Test for checking range values.
def test_instantiate_improper_range():
with pytest.raises(ValueError):
s = InvertedEncoding1D(6, 5, 'halfcircular', range_start=20,
range_stop=0)
assert s, "Invalid InvertedEncoding1D instance"
with pytest.raises(ValueError):
s2 = InvertedEncoding2D(stim_xlim=[0, -1], stim_ylim=[0, -1],
stimulus_resolution=[10, 10])
assert s2, "Invalid InvertedEncoding2D instance"
with pytest.raises(ValueError):
s2 = InvertedEncoding2D(stim_xlim=[0], stim_ylim=[-1, 0],
stimulus_resolution=10)
assert s2, "Invalid InvertedEncoding2D instance"
# Test for n_observations < n_channels
def test_data_amount():
x = np.random.rand(5, 1000)
s = InvertedEncoding1D()
with pytest.raises(ValueError):
s.fit(x, np.random.rand(5))
assert s, "Invalid data"
s2 = InvertedEncoding2D(stim_xlim=[-1, 1], stim_ylim=[-1, 1],
stimulus_resolution=10)
with pytest.raises(ValueError):
s2.fit(x, np.random.rand(5))
# Test number of data dimensions
def test_data_dimensions():
x = np.random.rand(5, 10, 2)
s = InvertedEncoding1D()
with pytest.raises(ValueError):
s.fit(x, np.random.rand(5))
s2 = InvertedEncoding2D(stim_xlim=[-1, 1], stim_ylim=[-1, 1],
stimulus_resolution=10)
with pytest.raises(ValueError):
s2.fit(x, np.random.rand(5))
# TESTS FOR 2D MODEL #
# Test to check that stimulus resolution is used properly
def test_2d_stimulus_resolution():
s2 = InvertedEncoding2D(stim_xlim=[-1, 1], stim_ylim=[-1, 1],
stimulus_resolution=10)
assert len(s2.stim_pixels[0] == 10)
assert len(s2.stim_pixels[1] == 10)
s2 = InvertedEncoding2D(stim_xlim=[-1, 1], stim_ylim=[-2, 2],
stimulus_resolution=[10, 20])
assert len(s2.stim_pixels[0] == 10)
assert len(s2.stim_pixels[1] == 20)
# Test that 2D channels can be set by the user
def test_2d_custom_channels():
nchan = 8
res = 10
npix = res*res
channels = np.random.rand(nchan, npix)*2 - 1
bds = [-1, 1]
s = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=res, chan_xlim=bds,
chan_ylim=bds, channels=channels)
assert s, "Unable to define custom InvertedEncoding2D channels"
# Test that channel definition should be consistent.
def test_cannot_instantiate_2d_channels():
# Channel definition over wrong number of pixels (5 instead of 100)
with pytest.raises(ValueError):
s = InvertedEncoding2D(stim_xlim=[-1, 1], stim_ylim=[-1, 1],
stimulus_resolution=10,
channels=np.random.rand(5, 5))
assert s, "Invalid InvertedEncoding2D instance"
# Test that you cannot modify properties in an inconsistent way.
def test_modify_2d_properties():
nchan = 8
res = 10
npix = res*res
channels = np.random.rand(nchan, npix)*2 - 1
bds = [-1, 1]
s = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=res, chan_xlim=bds,
chan_ylim=bds, channels=channels)
with pytest.raises(ValueError):
s = s.set_params(n_channels=nchan - 1)
assert s, "Invalid InvertedEncoding2D instance"
with pytest.raises(ValueError):
s = s.set_params(xp=np.random.rand(npix - 10))
assert s, "Invalid InvertedEncoding2D instance"
with pytest.raises(ValueError):
s = s.set_params(stim_fov=[[0, 1], [0, -1]])
assert s, "Invalid InvertedEncoding2D instance"
with pytest.raises(ValueError):
s = s.set_params(stim_fov=[[0, 1]])
assert s, "Invalid InvertedEncoding2D instance"
with pytest.raises(ValueError):
s = s.set_params(stim_fov=[[0], [0, 1]])
assert s, "Invalid InvertedEncoding2D instance"
# Test that you can get object properties
def test_get_2d_params():
bds = [-1, 1]
res = 10
s = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=res)
param_out = s.get_params()
assert np.all(param_out.get('stim_fov')[0] == bds)
assert param_out.get('xp').size == res*res
# Test helper function to create 2D cosine
def test_2d_cos():
nchan = 8
res = 10
npix = res*res
bds = [-1, 1]
sz = 2
s = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=res,
channels=np.random.rand(nchan, npix))
sz = s._2d_cosine_fwhm_to_sz(1)
fcn = s._make_2d_cosine(s.xp.reshape(-1, 1), s.yp.reshape(-1, 1),
np.linspace(bds[0], bds[1], nchan),
np.linspace(bds[0], bds[1], nchan), sz)
assert fcn.shape == (nchan, npix)
# Test that masking works -- basis function should have fewer non-zero
# elements than specified by the size constant
xd = np.diff(s.xp)[0][0]
nval = (np.nonzero(fcn[0, :])[0]).size
assert nval*(xd**2) <= sz**2
# Test size conversion functions
def test_2d_cos_size_fcns():
bds = [-1, 1]
s = np.random.rand()
imodel = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=10)
fwhm = imodel._2d_cosine_sz_to_fwhm(s)
s2 = imodel._2d_cosine_fwhm_to_sz(fwhm)
assert np.isclose(s, s2)
fwhm2 = imodel._2d_cosine_sz_to_fwhm(s2)
assert np.isclose(fwhm, fwhm2)
def test_square_basis_grid():
nchan = 8
bds = [-1, 1]
s = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=10)
_, centers = s.define_basis_functions_sqgrid(nchannels=nchan)
assert centers.shape[0] == nchan*nchan
xspacing = np.round(np.diff(centers[:, 0]), 5)
yspacing = np.round(np.diff(centers[:, 1]), 5)
assert xspacing[0] == xspacing[28] == xspacing[-1]
assert yspacing[0] == yspacing[25] == yspacing[-1]
def test_triangular_basis_grid():
grid_rad = 3
n_channels = (grid_rad*2 + 1) * (grid_rad*2)
bds = [-1, 1]
s = InvertedEncoding2D(stim_xlim=bds, stim_ylim=bds,
stimulus_resolution=10)
_, centers = s.define_basis_functions_trigrid(grid_rad)
assert centers.shape[0] == n_channels
xspacing = np.round(np.diff(centers[:, 0]), 4)
assert xspacing[0] == xspacing[np.random.randint(n_channels)] == \
xspacing[-1]
ysp = xspacing[0] * np.sqrt(3) * 0.5
yspacing = np.diff(centers[:, 1])
yspace = yspacing[yspacing > 0.0]
assert np.all((ysp - yspace) < 1e-5)
# Define some data to use in the following tests.
nobs, nvox, ntest = 100, 1000, 5
xlim, ylim = [[-6, 6], [-3, 3]]
res = [100, 100]
sxx, syy = np.meshgrid(np.linspace(xlim[0], xlim[1], 10),
np.linspace(ylim[0], ylim[1], 10))
yd = np.hstack((sxx.reshape(-1, 1), syy.reshape(-1, 1)))
Xd = np.zeros((nobs, nvox))
for i, l in enumerate(np.linspace(-1, 1, 10)):
Xd[i*10:i*10+10, :] = np.random.normal(loc=l, scale=1.5,
size=(10, nvox))
X2d = np.zeros((ntest, nvox))
for i, l in enumerate(np.linspace(-1, 1, 5)):
X2d[i, :] = np.random.normal(loc=l, scale=1.5,
size=(1, nvox))
# Test that 2D model raises error if design matrix C cannot be defined
def test_fit_invalid_2d():
# C=None and stim_radius=None here, cannot define C
i2 = InvertedEncoding2D(stim_xlim=xlim, stim_ylim=ylim,
stimulus_resolution=res, stim_radius=None)
i2.define_basis_functions_sqgrid(nchannels=[12, 6])
with pytest.raises(ValueError):
i2.fit(Xd, yd)
# Test attempt to fit with list of varying stimulus radii
def test_fit_2d_radius_list():
i2 = InvertedEncoding2D(stim_xlim=xlim, stim_ylim=ylim,
stimulus_resolution=res,
stim_radius=np.random.rand(nobs))
i2.define_basis_functions_sqgrid(nchannels=[12, 6])
i2.fit(Xd, yd)
# Test with custom C input
def test_fit_custom_channel_activations():
i2 = InvertedEncoding2D(stim_xlim=xlim, stim_ylim=ylim,
stimulus_resolution=res,
stim_radius=12)
i2.define_basis_functions_sqgrid(nchannels=[12, 6])
# Define C by expanding y & adding noise to avoid singular W matrix error
C0 = np.repeat(np.expand_dims(yd[:, 0], 1), 12*3, axis=1) + \
np.random.rand(nobs, 12*3)
C1 = np.repeat(np.expand_dims(yd[:, 1], 1), 12*3, axis=1) + \
np.random.rand(nobs, 12*3)
i2.fit(Xd, yd, np.hstack((C0, C1)))
assert np.all(i2.W_)
iem_2d = InvertedEncoding2D(stim_xlim=xlim, stim_ylim=ylim,
stimulus_resolution=res, stim_radius=12)
iem_2d.define_basis_functions_sqgrid(nchannels=[12, 6])
# Test if valid data can be fit.
def test_can_fit_2d_data():
iem_2d.fit(Xd, yd)
# Show that a data matrix with improper format (dimensions) breaks the
# algorithm.
def test_cannot_fit_2d_data():
with pytest.raises(ValueError):
iem_2d.fit(Xd.transpose(), yd)
# Ill conditioned data matrix will raise error
def test_ill_conditioned_2d_train_data():
with pytest.raises(ValueError):
Xt = np.ones((nobs, nvox))
y = np.random.rand(nobs, 2)
iem_2d.fit(Xt, y)
# Ill conditioned channel activations C will raise warning
def test_ill_conditioned_2d_channel_activations():
with pytest.warns(RuntimeWarning):
C = iem_2d._define_trial_activations(np.ones((nobs, 2)))
assert np.linalg.matrix_rank(C) == 1
# Ill conditioned weight matrix will raise error
def test_ill_conditioned_2d_weights():
with pytest.raises(ValueError):
Xt = np.random.rand(nobs, nvox)
y = np.random.rand(nobs, 2)
iem_2d.fit(Xt, y)
# Not enough observations will trigger error
def test_insufficient_2d_data():
with pytest.raises(ValueError):
Xt = np.random.rand(10, nvox)
y = np.random.rand(10, 2)
iem_2d.fit(Xt, y)
# Test case when # of observations are not matched btwn data & labels
def test_mismatched_2d_observations():
with pytest.raises(ValueError):
iem_2d.fit(Xd, yd[:-50, :])
# Test prediction capability from valid (fabricated) data
def test_can_predict_from_2d_data():
iem_2d.fit(Xd, yd)
preds = iem_2d.predict(X2d)
assert preds.shape == (ntest, 2)
# Show that prediction is invalid when input data is wrong size
def test_cannot_predict_from_2d_data():
iem_2d.fit(Xd, yd)
with pytest.raises(ValueError):
_ = iem_2d.predict(X2d.T)
# Show proper scoring function with valid (fabricated) test data
def test_can_score_2d():
iem_2d.fit(Xd, yd)
score = iem_2d.score(X2d, yd[:ntest, :])
assert score.shape == (ntest,)
score = iem_2d.score_against_reconstructed(X2d,
np.random.rand(res[0]*res[1],
ntest))
assert score.shape == (ntest,)
score = iem_2d.score_against_reconstructed(X2d,
np.random.rand(res[0]*res[1],
ntest),
metric="cosine")
assert score.shape == (ntest,)
# Test scoring with invalid data formatting
def test_cannot_score_2d():
iem_2d.fit(Xd, yd)
with pytest.raises(ValueError):
score = iem_2d.score(X2d.transpose(), yd[ntest, :])
assert score
# TESTS FOR 1D MODEL #
# Test to check stimulus resolution input
def test_1d_stimulus_resolution():
s = InvertedEncoding1D(6, 5, stimulus_resolution=360)
assert s.stim_res == 360
# Provide invalid data so that channels cannot be created.
def test_cannot_instantiate_1d_channels():
with pytest.raises(ValueError):
s = InvertedEncoding1D(n_channels=0)
assert s, "Invalid InvertedEncoding1D instance"
# Provide invalid stimulus mode
def test_stimulus_mode():
with pytest.raises(ValueError):
s = InvertedEncoding1D(6, 5, 'random')
assert s, "Invalid InvertedEncoding1D instance"
# Provide mismatching range and stimulus_mode input
def test_range_stimulus_mode_circ():
with pytest.raises(ValueError):
s = InvertedEncoding1D(6, 5, 'circular', 0, 180)
assert s, "Invalid InvertedEncoding1D instance"
# Provide mismatching range & stimulus mode, with half circular
def test_range_stimulus_mode_halfcirc():
with pytest.raises(ValueError):
s = InvertedEncoding1D(6, 5, 'halfcircular', -10, 350)
assert s, "Invalid InvertedEncoding1D instance"
# Define some data to use in the following tests.
n, dim = 297, 9
n_ = n // dim
y = np.repeat(np.linspace(0, 180-(180/dim), dim), n_)
voxel_rfs, _ = generate_1d_gaussian_rfs(dim, 180, (0, 179),
random_tuning=False)
X = generate_1d_rf_responses(voxel_rfs, y, 180, (0, 179),
trial_noise=0.25).transpose()
X2 = generate_1d_rf_responses(voxel_rfs, y, 180, (0, 179),
trial_noise=0.25).transpose()
# Test if valid data can be fit.
def test_can_fit_data():
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y)
# Test if valid data can be fit in circular space.
def test_can_fit_circular_space():
s = InvertedEncoding1D(6, 5, 'circular', range_stop=360)
s.fit(X, y)
# Show that a data matrix with improper format (dimensions) breaks the
# algorithm.
def test_cannot_fit_data():
with pytest.raises(ValueError):
Invt_model = InvertedEncoding1D()
Invt_model.fit(X.transpose(), y)
def test_ill_conditioned_train_data():
Invt_model = InvertedEncoding1D()
with pytest.raises(ValueError):
X = np.array([[0, 0, 0], [1, 1, 1]])
Invt_model.fit(X, np.array([0, 0, 0]))
# Test case if data dimensions are wrong
def test_extra_data_dimensions():
with pytest.raises(ValueError):
n, dim1, dim2 = 300, 3, 3
X = np.random.rand(n//3, dim1, dim2)
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y)
# Test case when # of observations are not matched btwn data & labels
def test_mismatched_observations():
with pytest.raises(ValueError):
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y[:-50])
# Test prediction capability from valid (fabricated) data
def test_can_predict_from_data():
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y)
m_reconstruct = []
for j in np.arange(dim):
preds = Invt_model.predict(X2[n_*j:n_*(j+1), :])
tmp = circmean(np.deg2rad(preds))
m_reconstruct.append(np.rad2deg(tmp))
logger.info('Reconstructed angles: ' + str(m_reconstruct))
# Show that prediction is invalid when input data is wrong size
def test_cannot_predict_from_data():
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y)
with pytest.raises(ValueError):
_ = Invt_model.predict(X2[0:n_, :].transpose())
# Show proper scoring function with valid (fabricated) test data
def test_can_score():
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y)
score = Invt_model.score(X2, y)
logger.info('Scores: ' + str(score))
# Test scoring with invalid data formatting
def test_cannot_score():
with pytest.raises(ValueError):
Invt_model = InvertedEncoding1D()
Invt_model.fit(X, y)
score = Invt_model.score(X2.transpose(), y)
logger.info('Scores: ' + str(score))
# Test stimulus resolution that is not even multiple
def test_stimulus_resolution_odd():
Invt_model = InvertedEncoding1D(stimulus_resolution=59)
with pytest.raises(NotImplementedError):
Invt_model.fit(X, y)
# Test stimulus masking
def test_stimulus_mask():
Invt_model = InvertedEncoding1D(6, 5, range_start=-10,
range_stop=170,
stimulus_resolution=60)
chans, _ = Invt_model._define_channels()
Invt_model.set_params(channels_=chans)
with pytest.warns(RuntimeWarning):
C = Invt_model._define_trial_activations(np.array([50]))
tmp_C = np.repeat([0, 1, 0], 60) @ chans.transpose()
assert np.all((C - tmp_C) < 1e-7)
# Test stimulus masking with different range
def test_stimulus_mask_shift_positive():
Invt_model = InvertedEncoding1D(6, 5, range_start=10,
range_stop=190,
stimulus_resolution=60)
chans, _ = Invt_model._define_channels()
Invt_model.set_params(channels_=chans)
with pytest.warns(RuntimeWarning):
C = Invt_model._define_trial_activations(np.array([70]))
tmp_C = np.repeat([0, 1, 0], 60) @ chans.transpose()
assert np.all((C - tmp_C) < 1e-7)
# Test ability to get model parameters from object
def test_can_get_params():
s = InvertedEncoding1D()
param_out = s.get_params()
assert param_out.get('channel_exp') == 5
logger.info('Returned Parameters: ' +
str(param_out.get('n_channels')) +
', ' + str(param_out.get('range_start')) +
', ' + str(param_out.get('range_stop')))
# Test ability to set model parameters of an object instance
def test_can_set_params():
s = InvertedEncoding1D()
s.set_params(n_channels=10,
stimulus_mode='circular',
range_start=-90,
range_stop=270,
channel_exp=4,
verbose=False)
| 19,162 | 34.161468 | 77 | py |
brainiak | brainiak-master/tests/utils/test_fmrisim.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fMRI Simulator test script
Test script for generating a run of a participant's data.
Authors: Cameron Ellis (Princeton) 2016
"""
import numpy as np
import math
from brainiak.utils import fmrisim as sim
import pytest
from itertools import product
def test_generate_signal():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [3]
feature_type = ['cube']
feature_coordinates = np.array([[5, 5, 5]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
assert np.all(volume.shape == dimensions), "Check signal shape"
assert np.max(volume) == signal_magnitude, "Check signal magnitude"
assert np.sum(volume > 0) == math.pow(feature_size[0], 3), (
"Check feature size")
assert volume[5, 5, 5] == signal_magnitude, "Check signal location"
assert volume[5, 5, 1] == 0, "Check noise location"
feature_coordinates = np.array(
[[5, 5, 5], [3, 3, 3], [7, 7, 7]])
# Check feature size is correct
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=['loop', 'cavity', 'sphere'],
feature_size=[3],
signal_magnitude=signal_magnitude)
assert volume[5, 5, 5] == 0, "Loop is empty"
assert volume[3, 3, 3] == 0, "Cavity is empty"
assert volume[7, 7, 7] != 0, "Sphere is not empty"
# Check feature size manipulation
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=['loop', 'cavity', 'sphere'],
feature_size=[1],
signal_magnitude=signal_magnitude)
assert volume[5, 6, 6] == 0, "Loop is too big"
assert volume[3, 5, 5] == 0, "Cavity is too big"
assert volume[7, 9, 9] == 0, "Sphere is too big"
# Check that out of bounds feature coordinates are corrected
feature_coordinates = np.array([0, 2, dimensions[2]])
x, y, z = sim._insert_idxs(feature_coordinates, feature_size[0],
dimensions)
assert x[1] - x[0] == 2, "x min not corrected"
assert y[1] - y[0] == 3, "y was corrected when it shouldn't be"
assert z[1] - z[0] == 1, "z max not corrected"
# Check that signal patterns are created
feature_coordinates = np.array([[5, 5, 5]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
signal_constant=0,
)
assert volume[4:7, 4:7, 4:7].std() > 0, "Signal is constant"
def test_generate_stimfunction():
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
assert stimfunction.shape[0] == duration * 100, "stimfunc incorrect length"
eventNumber = np.sum(event_durations * len(onsets)) * 100
assert np.sum(stimfunction) == eventNumber, "Event number"
# Create the signal function
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
stim_dur = stimfunction.shape[0] / (tr_duration * 100)
assert signal_function.shape[0] == stim_dur, "The length did not change"
# Test
onsets = [0]
tr_duration = 1
event_durations = [1]
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
max_response = np.where(signal_function != 0)[0].max()
assert 25 < max_response <= 30, "HRF has the incorrect length"
assert np.sum(signal_function < 0) > 0, "No values below zero"
# Export a stimfunction
sim.export_3_column(stimfunction,
'temp.txt',
)
# Load in the stimfunction
stimfunc_new = sim.generate_stimfunction(onsets=None,
event_durations=None,
total_time=duration,
timing_file='temp.txt',
)
assert np.all(stimfunc_new == stimfunction), "Export/import failed"
# Break the timing precision of the generation
stimfunc_new = sim.generate_stimfunction(onsets=None,
event_durations=None,
total_time=duration,
timing_file='temp.txt',
temporal_resolution=0.5,
)
assert stimfunc_new.sum() == 0, "Temporal resolution not working right"
# Set the duration to be too short so you should get an error
onsets = [10, 30, 50, 70, 90]
event_durations = [5]
with pytest.raises(ValueError):
sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=89,
)
# Clip the event offset
stimfunc_new = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=95,
)
assert stimfunc_new[-1] == 1, 'Event offset was not clipped'
# Test exporting a group of participants to an epoch file
cond_a = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=110,
)
cond_b = sim.generate_stimfunction(onsets=[x + 5 for x in onsets],
event_durations=event_durations,
total_time=110,
)
stimfunction_group = [np.hstack((cond_a, cond_b))] * 2
sim.export_epoch_file(stimfunction_group,
'temp.txt',
tr_duration,
)
# Check that convolve throws a warning when the shape is wrong
sim.convolve_hrf(stimfunction=np.hstack((cond_a, cond_b)).T,
tr_duration=tr_duration,
temporal_resolution=1,
)
def test_apply_signal():
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 100
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Check that you can compute signal change appropriately
# Preset a bunch of things
stimfunction_tr = stimfunction[::int(tr_duration * 100)]
mask, template = sim.mask_brain(dimensions, mask_self=False)
noise_dict = sim._noise_dict_update({})
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=noise_dict,
iterations=[0, 0]
)
coords = feature_coordinates[0]
noise_function_a = noise[coords[0], coords[1], coords[2], :]
noise_function_a = noise_function_a.reshape(duration // tr_duration, 1)
noise_function_b = noise[coords[0] + 1, coords[1], coords[2], :]
noise_function_b = noise_function_b.reshape(duration // tr_duration, 1)
# Check that the noise_function and signal_function must be the same size
method = 'PSC'
with pytest.raises(ValueError):
sim.compute_signal_change(signal_function,
noise_function_a.T,
noise_dict,
[0.5],
method,
)
# Create the calibrated signal with PSC
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
sig_b = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[1.0],
method,
)
assert sig_b.max() / sig_a.max() == 2, 'PSC modulation failed'
# Create the calibrated signal with SFNR
method = 'SFNR'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = sig_a / (noise_function_a.mean() / noise_dict['sfnr'])
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = sig_b / (noise_function_b.mean() / noise_dict['sfnr'])
assert scaled_b.max() / scaled_a.max() == 2, 'SFNR modulation failed'
# Create the calibrated signal with CNR_Amp/Noise-SD
method = 'CNR_Amp/Noise-SD'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = sig_a / noise_function_a.std()
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = sig_b / noise_function_b.std()
assert scaled_b.max() / scaled_a.max() == 2, 'CNR_Amp modulation failed'
# Create the calibrated signal with CNR_Amp/Noise-Var_dB
method = 'CNR_Amp2/Noise-Var_dB'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = np.log(sig_a.max() / noise_function_a.std())
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = np.log(sig_b.max() / noise_function_b.std())
assert np.round(scaled_b / scaled_a) == 2, 'CNR_Amp dB modulation failed'
# Create the calibrated signal with CNR_Signal-SD/Noise-SD
method = 'CNR_Signal-SD/Noise-SD'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = sig_a.std() / noise_function_a.std()
sig_b = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[1.0],
method,
)
scaled_b = sig_b.std() / noise_function_a.std()
assert (scaled_b / scaled_a) == 2, 'CNR signal modulation failed'
# Create the calibrated signal with CNR_Amp/Noise-Var_dB
method = 'CNR_Signal-Var/Noise-Var_dB'
sig_a = sim.compute_signal_change(signal_function,
noise_function_a,
noise_dict,
[0.5],
method,
)
scaled_a = np.log(sig_a.std() / noise_function_a.std())
sig_b = sim.compute_signal_change(signal_function,
noise_function_b,
noise_dict,
[1.0],
method,
)
scaled_b = np.log(sig_b.std() / noise_function_b.std())
assert np.round(scaled_b / scaled_a) == 2, 'CNR signal dB modulation ' \
'failed'
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
assert signal.shape == (dimensions[0], dimensions[1], dimensions[2],
duration / tr_duration), "The output is the " \
"wrong size"
signal = sim.apply_signal(signal_function=stimfunction,
volume_signal=volume,
)
assert np.any(signal == signal_magnitude), "The stimfunction is not binary"
# Check that there is an error if the number of signal voxels doesn't
# match the number of non zero brain voxels
with pytest.raises(IndexError):
sig_vox = (volume > 0).sum()
vox_pattern = np.tile(stimfunction, (1, sig_vox - 1))
sim.apply_signal(signal_function=vox_pattern,
volume_signal=volume,
)
def test_generate_noise():
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[5, 5, 5]])
signal_magnitude = [1]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Inputs for generate_stimfunction
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 200
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
)
signal_function = sim.convolve_hrf(stimfunction=stimfunction,
tr_duration=tr_duration,
)
# Convolve the HRF with the stimulus sequence
signal = sim.apply_signal(signal_function=signal_function,
volume_signal=volume,
)
# Generate the mask of the signal
mask, template = sim.mask_brain(signal,
mask_self=None)
assert min(mask[mask > 0]) > 0.1, "Mask thresholding did not work"
assert len(np.unique(template) > 2), "Template creation did not work"
stimfunction_tr = stimfunction[::int(tr_duration * 100)]
# Create the noise volumes (using the default parameters)
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
iterations=[1, 0],
)
assert signal.shape == noise.shape, "The dimensions of signal and noise " \
"the same"
noise_high = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 50, 'snr': 25},
iterations=[1, 0],
)
noise_low = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={'sfnr': 100, 'snr': 25},
iterations=[1, 0],
)
system_high = np.std(noise_high[mask > 0], 1).mean()
system_low = np.std(noise_low[mask > 0], 1).mean()
assert system_low < system_high, "SFNR noise could not be manipulated"
# Check that you check for the appropriate template values
with pytest.raises(ValueError):
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template * 2,
mask=mask,
noise_dict={},
)
# Check that iterations does what it should
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={},
iterations=[0, 0],
)
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict={},
iterations=None,
)
# Test drift noise
trs = 1000
period = 100
drift = sim._generate_noise_temporal_drift(trs,
tr_duration,
'sine',
period,
)
# Check that the max frequency is the appropriate frequency
power = abs(np.fft.fft(drift))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs
period_freq = np.where(freq == 1 / (period // tr_duration))
max_freq = np.argmax(power)
assert period_freq == max_freq, 'Max frequency is not where it should be'
# Do the same but now with cosine basis functions, answer should be close
drift = sim._generate_noise_temporal_drift(trs,
tr_duration,
'discrete_cos',
period,
)
# Check that the appropriate frequency is peaky (may not be the max)
power = abs(np.fft.fft(drift))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / trs
period_freq = np.where(freq == 1 / (period // tr_duration))[0][0]
assert power[period_freq] > power[period_freq + 1], 'Power is low'
assert power[period_freq] > power[period_freq - 1], 'Power is low'
# Check it runs fine
drift = sim._generate_noise_temporal_drift(50,
tr_duration,
'discrete_cos',
period,
)
# Check it runs fine
drift = sim._generate_noise_temporal_drift(300,
tr_duration,
'cos_power_drop',
period,
)
# Check that when the TR is greater than the period it errors
with pytest.raises(ValueError):
sim._generate_noise_temporal_drift(30, 10, 'cos_power_drop', 5)
# Test physiological noise (using unrealistic parameters so that it's easy)
timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs))
resp_freq = 0.2
heart_freq = 1.17
phys = sim._generate_noise_temporal_phys(timepoints,
resp_freq,
heart_freq,
)
# Check that the max frequency is the appropriate frequency
power = abs(np.fft.fft(phys))[1:trs // 2]
freq = np.linspace(1, trs // 2 - 1, trs // 2 - 1) / (trs * tr_duration)
peaks = (power > (power.mean() + power.std())) # Where are the peaks
peak_freqs = freq[peaks]
assert np.any(resp_freq == peak_freqs), 'Resp frequency not found'
assert len(peak_freqs) == 2, 'Two peaks not found'
# Test task noise
sim._generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
)
sim._generate_noise_temporal_task(stimfunction_tr,
motion_noise='rician',
)
# Test ARMA noise
with pytest.raises(ValueError):
noise_dict = {'fwhm': 4, 'auto_reg_rho': [1], 'ma_rho': [1, 1]}
sim._generate_noise_temporal_autoregression(stimfunction_tr,
noise_dict,
dimensions,
mask,
)
# Generate spatial noise
vol = sim._generate_noise_spatial(np.array([10, 10, 10, trs]))
assert len(vol.shape) == 3, 'Volume was not reshaped to ignore TRs'
# Switch some of the noise types on
noise_dict = dict(physiological_sigma=1, drift_sigma=1, task_sigma=1,
auto_reg_sigma=0)
sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=noise_dict,
iterations=[0, 0],
)
def test_generate_noise_spatial():
# Set up the inputs
dimensions = np.array([10, 5, 10])
mask = np.ones(dimensions)
vol = sim._generate_noise_spatial(dimensions, mask)
# Run the analysis from _calc_FHWM but for th elast step of aggregating
# across dimensions
v_count = 0
v_sum = 0
v_sq = 0
d_sum = [0.0, 0.0, 0.0]
d_sq = [0.0, 0.0, 0.0]
d_count = [0, 0, 0]
# Pull out all the voxel coordinates
coordinates = list(product(range(dimensions[0]),
range(dimensions[1]),
range(dimensions[2])))
# Find the sum of squared error for the non-masked voxels in the brain
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# Find the the volume sum and squared values
v_count += 1
v_sum += vol[x, y, z]
v_sq += vol[x, y, z] ** 2
# Get the volume variance
v_var = (v_sq - ((v_sum ** 2) / v_count)) / (v_count - 1)
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# For each xyz dimension calculate the squared
# difference of this voxel and the next
in_range = (x < dimensions[0] - 1)
in_mask = in_range and (mask[x + 1, y, z] > 0)
included = in_mask and (~np.isnan(vol[x + 1, y, z]))
if included:
d_sum[0] += vol[x, y, z] - vol[x + 1, y, z]
d_sq[0] += (vol[x, y, z] - vol[x + 1, y, z]) ** 2
d_count[0] += 1
in_range = (y < dimensions[1] - 1)
in_mask = in_range and (mask[x, y + 1, z] > 0)
included = in_mask and (~np.isnan(vol[x, y + 1, z]))
if included:
d_sum[1] += vol[x, y, z] - vol[x, y + 1, z]
d_sq[1] += (vol[x, y, z] - vol[x, y + 1, z]) ** 2
d_count[1] += 1
in_range = (z < dimensions[2] - 1)
in_mask = in_range and (mask[x, y, z + 1] > 0)
included = in_mask and (~np.isnan(vol[x, y, z + 1]))
if included:
d_sum[2] += vol[x, y, z] - vol[x, y, z + 1]
d_sq[2] += (vol[x, y, z] - vol[x, y, z + 1]) ** 2
d_count[2] += 1
# Find the variance
d_var = np.divide((d_sq - np.divide(np.power(d_sum, 2),
d_count)), (np.add(d_count, -1)))
o_var = np.divide(-1, (4 * np.log(1 - (0.5 * d_var / v_var))))
fwhm3 = np.sqrt(o_var) * 2 * np.sqrt(2 * np.log(2))
# Calculate the proportion of std relative to the mean
std_proportion = np.nanstd(fwhm3) / np.nanmean(fwhm3)
assert std_proportion < 0.25, 'Variance is inconsistent across dim'
def test_mask_brain():
# Inputs for generate_signal
dimensions = np.array([10, 10, 10]) # What is the size of the brain
feature_size = [2]
feature_type = ['cube']
feature_coordinates = np.array(
[[4, 4, 4]])
signal_magnitude = [30]
# Generate a volume representing the location and quality of the signal
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(dimensions, mask_self=None,)
brain = volume * mask
assert np.sum(brain != 0) == np.sum(volume != 0), "Masking did not work"
assert brain[0, 0, 0] == 0, "Masking did not work"
assert brain[4, 4, 4] != 0, "Masking did not work"
feature_coordinates = np.array(
[[1, 1, 1]])
volume = sim.generate_signal(dimensions=dimensions,
feature_coordinates=feature_coordinates,
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Mask the volume to be the same shape as a brain
mask, _ = sim.mask_brain(dimensions, mask_self=None, )
brain = volume * mask
assert np.sum(brain != 0) < np.sum(volume != 0), "Masking did not work"
# Test that you can load the default
dimensions = np.array([100, 100, 100])
mask, template = sim.mask_brain(dimensions, mask_self=False)
assert mask[20, 80, 50] == 0, 'Masking didn''t work'
assert mask[25, 80, 50] == 1, 'Masking didn''t work'
assert int(template[25, 80, 50] * 100) == 57, 'Template not correct'
# Check that you can mask self
mask_self, template_self = sim.mask_brain(template, mask_self=True)
assert (template_self - template).sum() < 1e2, 'Mask self error'
assert (mask_self - mask).sum() == 0, 'Mask self error'
def test_calc_noise():
# Inputs for functions
onsets = [10, 30, 50, 70, 90]
event_durations = [6]
tr_duration = 2
duration = 200
temporal_res = 100
tr_number = int(np.floor(duration / tr_duration))
dimensions_tr = np.array([10, 10, 10, tr_number])
# Preset the noise dict
nd_orig = sim._noise_dict_update({})
# Create the time course for the signal to be generated
stimfunction = sim.generate_stimfunction(onsets=onsets,
event_durations=event_durations,
total_time=duration,
temporal_resolution=temporal_res,
)
# Mask the volume to be the same shape as a brain
mask, template = sim.mask_brain(dimensions_tr, mask_self=None)
stimfunction_tr = stimfunction[::int(tr_duration * temporal_res)]
nd_orig['matched'] = 0
noise = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
)
# Check the spatial noise match
nd_orig['matched'] = 1
noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
iterations=[50, 0]
)
# Calculate the noise parameters from this newly generated volume
nd_new = sim.calc_noise(noise, mask, template)
nd_matched = sim.calc_noise(noise_matched, mask, template)
# Check the values are reasonable"
assert nd_new['snr'] > 0, 'snr out of range'
assert nd_new['sfnr'] > 0, 'sfnr out of range'
assert nd_new['auto_reg_rho'][0] > 0, 'ar out of range'
# Check that the dilation increases SNR
no_dilation_snr = sim._calc_snr(noise_matched,
mask,
dilation=0,
reference_tr=tr_duration,
)
assert nd_new['snr'] > no_dilation_snr, "Dilation did not increase SNR"
# Check that template size is in bounds
with pytest.raises(ValueError):
sim.calc_noise(noise, mask, template * 2)
# Check that Mask is set is checked
with pytest.raises(ValueError):
sim.calc_noise(noise, None, template)
# Check that it can deal with missing noise parameters
temp_nd = sim.calc_noise(noise, mask, template, noise_dict={})
assert temp_nd['voxel_size'][0] == 1, 'Default voxel size not set'
temp_nd = sim.calc_noise(noise, mask, template, noise_dict=None)
assert temp_nd['voxel_size'][0] == 1, 'Default voxel size not set'
# Check that the fitting worked
snr_diff = abs(nd_orig['snr'] - nd_new['snr'])
snr_diff_match = abs(nd_orig['snr'] - nd_matched['snr'])
assert snr_diff > snr_diff_match, 'snr fit incorrectly'
# Test that you can generate rician and exponential noise
sim._generate_noise_system(dimensions_tr,
1,
1,
spatial_noise_type='exponential',
temporal_noise_type='rician',
)
# Check the temporal noise match
nd_orig['matched'] = 1
noise_matched = sim.generate_noise(dimensions=dimensions_tr[0:3],
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
noise_dict=nd_orig,
iterations=[0, 50]
)
nd_matched = sim.calc_noise(noise_matched, mask, template)
sfnr_diff = abs(nd_orig['sfnr'] - nd_new['sfnr'])
sfnr_diff_match = abs(nd_orig['sfnr'] - nd_matched['sfnr'])
assert sfnr_diff > sfnr_diff_match, 'sfnr fit incorrectly'
ar1_diff = abs(nd_orig['auto_reg_rho'][0] - nd_new['auto_reg_rho'][0])
ar1_diff_match = abs(nd_orig['auto_reg_rho'][0] - nd_matched[
'auto_reg_rho'][0])
assert ar1_diff > ar1_diff_match, 'AR1 fit incorrectly'
# Check that you can calculate ARMA for a single voxel
vox = noise[5, 5, 5, :]
arma = sim._calc_ARMA_noise(vox,
None,
sample_num=2,
)
assert len(arma) == 2, "Two outputs not given by ARMA"
def test_gen_1D_gauss_shape():
n_vox = 10
res = 180
rfs, centers = sim.generate_1d_gaussian_rfs(n_vox, res, (0, res-1))
assert rfs.shape == (n_vox, res)
assert centers.size == n_vox
sim_data = sim.generate_1d_rf_responses(rfs, np.array([0, 10, 20]), res,
(0, res-1))
assert sim_data.shape == (n_vox, 3)
def test_gen_1d_gauss_range():
res = 180
range_values = (-10, res-11)
rfs, centers = sim.generate_1d_gaussian_rfs(1, res, range_values,
random_tuning=False)
sim_data = sim.generate_1d_rf_responses(rfs, np.array([-10]), res,
range_values, 0)
assert sim_data[0, ] > 0
range_values = (10, res+10)
rfs, centers = sim.generate_1d_gaussian_rfs(1, res, range_values,
random_tuning=False)
sim_data = sim.generate_1d_rf_responses(rfs, np.array([10]), res,
range_values, 0)
assert sim_data[0, ] > 0
def test_gen_1D_gauss_even_spacing():
n_vox = 9
res = 180
rfs, centers = sim.generate_1d_gaussian_rfs(n_vox, res, (0, res-1),
random_tuning=False)
assert np.all(centers == np.array([0, 19, 39, 59, 79, 99, 119, 139, 159]))
| 37,943 | 40.514223 | 79 | py |
brainiak | brainiak-master/tests/utils/test_fmrisim_real_time.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fmrisim real-time generator test script
Authors: Cameron Ellis (Princeton) 2020
"""
import numpy as np
from brainiak.utils import fmrisim_real_time_generator as gen
import pytest
import os
import time
import glob
from pkg_resources import resource_stream
from typing import Dict
from nibabel.nifti1 import Nifti1Image
import gzip
# Test that it crashes without inputs
with pytest.raises(TypeError):
gen.generate_data() # type: ignore
data_dict = {} # type: Dict
vol = resource_stream(gen.__name__, "sim_parameters/ROI_A.nii.gz").read()
data_dict['ROI_A_file'] = Nifti1Image.from_bytes(gzip.decompress(
vol)).get_data()
vol = resource_stream(gen.__name__, "sim_parameters/ROI_B.nii.gz").read()
data_dict['ROI_B_file'] = Nifti1Image.from_bytes(gzip.decompress(
vol)).get_data()
vol = resource_stream(gen.__name__,
"sim_parameters/sub_template.nii.gz").read()
data_dict['template_path'] = Nifti1Image.from_bytes(gzip.decompress(
vol)).get_data()
noise_dict_file = resource_stream(gen.__name__,
"sim_parameters/sub_noise_dict.txt").read()
data_dict['noise_dict_file'] = noise_dict_file
data_dict['numTRs'] = 30
data_dict['event_duration'] = 2
data_dict['scale_percentage'] = 1
data_dict['different_ROIs'] = True
data_dict['multivariate_pattern'] = False
data_dict['save_dicom'] = False
data_dict['save_realtime'] = False
data_dict['trDuration'] = 2
data_dict['isi'] = 4
data_dict['burn_in'] = 6
# Run default test
def test_default(tmp_path, dd=data_dict):
# copy data_dict so values aren't changed
dd = dd.copy()
# Run the simulation
gen.generate_data(str(tmp_path),
dd)
# Check that there are 32 files where there should be (30 plus label and
# mask)
assert len(os.listdir(str(tmp_path))) == 32, "Incorrect file number"
# Check that the data is the right shape
input_template = dd['template_path']
input_shape = input_template.shape
output_vol = np.load(tmp_path / 'rt_000.npy')
output_shape = output_vol.shape
assert input_shape == output_shape, 'Output shape is incorrect'
# Check the labels have the correct count
labels = np.load(tmp_path / 'labels.npy')
assert np.sum(labels > 0) == 9, 'Incorrect number of events'
def test_signal_size(tmp_path, dd=data_dict):
dd = dd.copy()
# Change it to only use ROI A
dd['different_ROIs'] = False
# Make the signal large
dd['scale_percentage'] = 100
# Run the simulation
gen.generate_data(str(tmp_path),
dd)
# Load in the ROI masks
ROI_A = dd['ROI_A_file']
ROI_B = dd['ROI_B_file']
# Load in the data just simulated
ROI_A_mean = []
ROI_B_mean = []
for TR_counter in range(dd['numTRs']):
# Load the data
vol_name = 'rt_%03d.npy' % TR_counter
vol = np.load(tmp_path / vol_name)
# Mask the data
ROI_A_mean += [np.mean(vol[ROI_A == 1])]
ROI_B_mean += [np.mean(vol[ROI_B == 1])]
assert np.std(ROI_A_mean) > np.std(ROI_B_mean), 'Signal not scaling'
def test_multivariate(tmp_path, dd=data_dict):
dd = dd.copy()
dd['multivariate_pattern'] = True
dd['different_ROIs'] = False
# Make the signal large
dd['scale_percentage'] = 100
# Run the simulation
gen.generate_data(str(tmp_path),
dd)
# Load in the ROI masks
ROI_A = dd['ROI_A_file']
ROI_B = dd['ROI_B_file']
# Test this volume
vol = np.load(str(tmp_path / 'rt_007.npy'))
ROI_A_std = np.std(vol[ROI_A == 1])
ROI_B_std = np.std(vol[ROI_B == 1])
assert ROI_A_std > ROI_B_std, 'Multivariate not making variable signal'
def test_save_dicoms_realtime(tmp_path, dd=data_dict):
dd = dd.copy()
start_time = time.time()
dd['save_dicom'] = True
dd['save_realtime'] = True
# test when ROI files are not set
dd['ROI_A_file'] = None
dd['ROI_B_file'] = None
dd['template_path'] = None
dd['noise_dict_file'] = None
# Run the simulation
gen.generate_data(str(tmp_path),
dd)
end_time = time.time()
# Check it took 2s per TR
assert (end_time - start_time) > 60, 'Realtime ran fast'
# Check correct file number
file_path = str(tmp_path / '*.dcm')
assert len(glob.glob(file_path)) == 30, "Wrong dicom file num"
| 4,975 | 27.597701 | 77 | py |
brainiak | brainiak-master/tests/utils/test_utils.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_tri_sym_convert():
from brainiak.utils.utils import from_tri_2_sym, from_sym_2_tri
import numpy as np
sym = np.random.rand(3, 3)
tri = from_sym_2_tri(sym)
assert tri.shape[0] == 6,\
"from_sym_2_tri returned wrong result!"
sym1 = from_tri_2_sym(tri, 3)
assert sym1.shape[0] == sym1.shape[1],\
"from_tri_2_sym returned wrong shape!"
tri1 = from_sym_2_tri(sym1)
assert np.array_equiv(tri, tri1),\
"from_sym_2_tri returned wrong result!"
def test_sumexp():
from brainiak.utils.utils import sumexp_stable
import numpy as np
data = np.array([[1, 1], [0, 1]])
sums, maxs, exps = sumexp_stable(data)
assert sums.size == data.shape[1], (
"Invalid sum(exp(v)) computation (wrong # samples in sums)")
assert exps.shape[0] == data.shape[0], (
"Invalid exp(v) computation (wrong # features)")
assert exps.shape[1] == data.shape[1], (
"Invalid exp(v) computation (wrong # samples)")
assert maxs.size == data.shape[1], (
"Invalid max computation (wrong # samples in maxs)")
def test_concatenate_not_none():
from brainiak.utils.utils import concatenate_not_none
import numpy as np
arrays = [None] * 5
arrays[1] = np.array([0, 1, 2])
arrays[3] = np.array([3, 4])
r = concatenate_not_none(arrays, axis=0)
assert np.all(np.arange(5) == r), (
"Invalid concatenation of a list of arrays")
def test_cov2corr():
from brainiak.utils.utils import cov2corr
import numpy as np
cov = np.array([[4, 3, 0], [3, 9, 0], [0, 0, 1]])
corr = cov2corr(cov)
assert np.allclose(corr,
np.array([[1, 0.5, 0], [0.5, 1, 0], [0, 0, 1]])), (
"Converting from covariance matrix to correlation incorrect")
def test_ReadDesign():
from brainiak.utils.utils import ReadDesign
import numpy as np
import os.path
file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
design = ReadDesign(fname=file_path, include_orth=False,
include_pols=False)
assert design, 'Failed to read design matrix'
assert design.reg_nuisance is None, \
'Nuiance regressor is not None when include_orth and include_pols are'\
' both set to False'
read = ReadDesign()
assert read, 'Failed to initialize an instance of the class'
design = ReadDesign(fname=file_path, include_orth=True, include_pols=True)
assert np.size(design.cols_nuisance) == 10, \
'Mistake in counting the number of nuiance regressors'
assert np.size(design.cols_task) == 17, \
'Mistake in counting the number of task conditions'
assert (np.shape(design.reg_nuisance)[0]
== np.shape(design.design_task)[0]
), 'The number of time points in nuiance regressor does not match'\
' that of task response'
def test_gen_design():
from brainiak.utils.utils import gen_design
import numpy as np
import os.path
files = {'FSL1': 'example_stimtime_1_FSL.txt',
'FSL2': 'example_stimtime_2_FSL.txt',
'AFNI1': 'example_stimtime_1_AFNI.txt',
'AFNI2': 'example_stimtime_2_AFNI.txt'}
for key in files.keys():
files[key] = os.path.join(os.path.dirname(__file__), files[key])
design1 = gen_design(stimtime_files=files['FSL1'], scan_duration=[48, 20],
TR=2, style='FSL')
assert design1.shape == (34, 1), 'Returned design matrix has wrong shape'
assert design1[24] == 0, (
"gen_design should generated design matrix for each run separately "
"and concatenate them.")
design2 = gen_design(stimtime_files=[files['FSL1'], files['FSL2']],
scan_duration=[48, 20], TR=2, style='FSL')
assert design2.shape == (34, 2), 'Returned design matrix has wrong shape'
design3 = gen_design(stimtime_files=files['FSL1'], scan_duration=68, TR=2,
style='FSL')
assert design3[24] != 0, (
'design matrix should be non-zero 8 seconds after an event onset.')
design4 = gen_design(stimtime_files=[files['FSL2']],
scan_duration=[48, 20], TR=2, style='FSL')
assert np.all(np.isclose(design1 * 0.5, design4)), (
'gen_design does not treat missing values correctly')
design5 = gen_design(stimtime_files=[files['FSL2']],
scan_duration=[48, 20], TR=1)
assert (np.abs(design4 - design5[::2])).mean() < 0.1, (
'design matrices sampled at different frequency do not match'
' at corresponding time points')
design6 = gen_design(stimtime_files=[files['AFNI1']],
scan_duration=[48, 20], TR=2, style='AFNI')
assert np.all(np.isclose(design1, design6)), (
'design matrices generated from AFNI style and FSL style do not match')
design7 = gen_design(stimtime_files=[files['AFNI2']],
scan_duration=[48], TR=2, style='AFNI')
assert np.all(design7 == 0.0), (
'A negative stimulus onset of AFNI style should result in an all-zero'
+ ' design matrix')
def test_center_mass_exp():
from brainiak.utils.utils import center_mass_exp
import numpy as np
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp([1, 2])
assert ('interval must be a tuple'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((1, 2, 3))
assert ('interval must be length two'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((-2, -1))
assert ('interval_left must be non-negative'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((-2, 3))
assert ('interval_left must be non-negative'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((3, 3))
assert ('interval_right must be bigger than interval_left'
in str(excinfo.value))
with pytest.raises(AssertionError) as excinfo:
result = center_mass_exp((1, 2), -1)
assert ('scale must be positive'
in str(excinfo.value))
result = center_mass_exp((0, np.inf), 2.0)
assert np.isclose(result, 2.0), 'center of mass '\
'incorrect for the whole distribution'
result = center_mass_exp((1.0, 1.0+2e-10))
assert np.isclose(result, 1.0+1e-10), 'for a small '\
'enough interval, the center of mass should be '\
'close to its mid-point'
def test_p_from_null():
import numpy as np
from brainiak.utils.utils import p_from_null
# Create random null and observed value in tail
null = np.random.randn(10000)
observed = np.ceil(np.percentile(null, 97.5) * 1000) / 1000
# Check that we catch improper side
with pytest.raises(ValueError):
_ = p_from_null(observed, null, side='wrong')
# Check two-tailed p-value for observed
p_ts = p_from_null(observed, null)
assert np.isclose(p_ts, 0.05, atol=1e-02)
# Check two-tailed p-value for observed
p_right = p_from_null(observed, null, side='right')
assert np.isclose(p_right, 0.025, atol=1e-02)
assert np.isclose(p_right, p_ts / 2, atol=1e-02)
# Check two-tailed p-value for observed
p_left = p_from_null(observed, null, side='left')
assert np.isclose(p_left, 0.975, atol=1e-02)
assert np.isclose(1 - p_left, p_right, atol=1e-02)
assert np.isclose(1 - p_left, p_ts / 2, atol=1e-02)
# Check 2-dimensional input (i.e., samples by voxels)
null = np.random.randn(10000, 3)
observed = np.ceil(np.percentile(null, 97.5, axis=0) * 1000) / 1000
# Check two-tailed p-value for observed
p_ts = p_from_null(observed, null, axis=0)
assert np.allclose(p_ts, 0.05, atol=1e-02)
# Check two-tailed p-value for observed
p_right = p_from_null(observed, null, side='right', axis=0)
assert np.allclose(p_right, 0.025, atol=1e-02)
assert np.allclose(p_right, p_ts / 2, atol=1e-02)
# Check two-tailed p-value for observed
p_left = p_from_null(observed, null, side='left', axis=0)
assert np.allclose(p_left, 0.975, atol=1e-02)
assert np.allclose(1 - p_left, p_right, atol=1e-02)
assert np.allclose(1 - p_left, p_ts / 2, atol=1e-02)
# Check for exact test
p_ts = p_from_null(observed, null, exact=True, axis=0)
assert np.allclose(p_ts, 0.05, atol=1e-02)
# Check two-tailed p-value for exact
p_right = p_from_null(observed, null, side='right',
exact=True, axis=0)
assert np.allclose(p_right, 0.025, atol=1e-02)
assert np.allclose(p_right, p_ts / 2, atol=1e-02)
# Check two-tailed p-value for exact
p_left = p_from_null(observed, null, side='left',
exact=True, axis=0)
assert np.allclose(p_left, 0.975, atol=1e-02)
assert np.allclose(1 - p_left, p_right, atol=1e-02)
assert np.allclose(1 - p_left, p_ts / 2, atol=1e-02)
def test_phase_randomize():
import numpy as np
from scipy.fftpack import fft
from scipy.stats import pearsonr
from brainiak.utils.utils import phase_randomize
data = np.repeat(np.repeat(np.random.randn(60)[:, np.newaxis, np.newaxis],
30, axis=1),
20, axis=2)
assert np.array_equal(data[..., 0], data[..., 1])
# Phase-randomize data across subjects (same across voxels)
shifted_data = phase_randomize(data, voxelwise=False, random_state=1)
assert shifted_data.shape == data.shape
assert not np.array_equal(shifted_data[..., 0], shifted_data[..., 1])
assert not np.array_equal(shifted_data[..., 0], data[..., 0])
# Check that uneven n_TRs doesn't explode
_ = phase_randomize(data[:-1, ...])
# Check that random_state returns same shifts
shifted_data_ = phase_randomize(data, voxelwise=False, random_state=1)
assert np.array_equal(shifted_data, shifted_data_)
shifted_data_ = phase_randomize(data, voxelwise=False, random_state=2)
assert not np.array_equal(shifted_data, shifted_data_)
# Phase-randomize subjects and voxels
shifted_data = phase_randomize(data, voxelwise=True, random_state=1)
assert shifted_data.shape == data.shape
assert not np.array_equal(shifted_data[..., 0], shifted_data[..., 1])
assert not np.array_equal(shifted_data[..., 0], data[..., 0])
assert not np.array_equal(shifted_data[:, 0, 0], shifted_data[:, 1, 0])
# Try with 2-dimensional input
shifted_data = phase_randomize(data[..., 0],
voxelwise=True,
random_state=1)
assert shifted_data.ndim == 2
assert not np.array_equal(shifted_data[:, 0], shifted_data[:, 1])
# Create correlated noisy data
corr_data = np.repeat(np.random.randn(60)[:, np.newaxis, np.newaxis],
2, axis=2) + np.random.randn(60, 1, 2)
# Get correlation and frequency domain for data
corr_r = pearsonr(corr_data[:, 0, 0],
corr_data[:, 0, 1])[0]
corr_freq = fft(corr_data, axis=0)
# Phase-randomize time series and get correlation/frequency
shifted_data = phase_randomize(corr_data)
shifted_r = pearsonr(shifted_data[:, 0, 0],
shifted_data[:, 0, 1])[0]
shifted_freq = fft(shifted_data, axis=0)
# Check that phase-randomization reduces correlation
assert np.abs(shifted_r) < np.abs(corr_r)
# Check that amplitude spectrum is preserved
assert np.allclose(np.abs(shifted_freq), np.abs(corr_freq))
def test_check_timeseries_input():
import numpy as np
from itertools import combinations
from brainiak.utils.utils import _check_timeseries_input
# Set a fixed vector for comparison
vector = np.random.randn(60)
# List of subjects with one voxel/ROI
list_1d = [vector for _ in np.arange(10)]
(data_list_1d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_1d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# Array of subjects with one voxel/ROI
array_2d = np.hstack([vector[:, np.newaxis]
for _ in np.arange(10)])
(data_array_2d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(array_2d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# List of 2-dimensional arrays
list_2d = [vector[:, np.newaxis] for _ in np.arange(10)]
(data_list_2d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_2d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# Check if lists have mismatching size
list_bad = [list_2d[0][:-1, :]] + list_2d[1:]
with pytest.raises(ValueError):
(data_list_bad, _, _, _) = _check_timeseries_input(list_bad)
# List of 3-dimensional arrays
list_3d = [vector[:, np.newaxis, np.newaxis]
for _ in np.arange(10)]
(data_list_3d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_3d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# 3-dimensional array
array_3d = np.dstack([vector[:, np.newaxis]
for _ in np.arange(10)])
(data_array_3d, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(array_3d)
assert n_TRs == 60
assert n_voxels == 1
assert n_subjects == 10
# Check that 4-dimensional input array throws error
array_4d = array_3d[..., np.newaxis]
with pytest.raises(ValueError):
(data_array_4d, _, _, _) = _check_timeseries_input(array_4d)
# Check they're the same
for pair in combinations([data_list_1d, data_array_2d,
data_list_2d, data_list_3d,
data_array_3d], 2):
assert np.array_equal(pair[0], pair[1])
# List of multivoxel arrays
matrix = np.random.randn(60, 30)
list_mv = [matrix
for _ in np.arange(10)]
(data_list_mv, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(list_mv)
assert n_TRs == 60
assert n_voxels == 30
assert n_subjects == 10
# 3-dimensional array with multiple voxels
array_mv = np.dstack([matrix for _ in np.arange(10)])
(data_array_mv, n_TRs,
n_voxels, n_subjects) = _check_timeseries_input(array_mv)
assert n_TRs == 60
assert n_voxels == 30
assert n_subjects == 10
assert np.array_equal(data_list_mv, data_array_mv)
def test_array_correlation():
import numpy as np
from brainiak.utils.utils import array_correlation
from scipy.stats import pearsonr
# Minimal array datasets
n_TRs = 30
n_voxels = 2
x, y = (np.random.randn(n_TRs, n_voxels),
np.random.randn(n_TRs, n_voxels))
# Perform the correlation
r = array_correlation(x, y)
# Check there are the right number of voxels in the output
assert r.shape == (n_voxels,)
# Check that this (roughly) matches corrcoef
assert np.allclose(r, np.corrcoef(x.T, y.T)[[0, 1], [2, 3]])
# Check that this (roughly) matches pearsonr
assert np.allclose(r, np.array([pearsonr(x[:, 0], y[:, 0])[0],
pearsonr(x[:, 1], y[:, 1])[0]]))
# Try axis argument
assert np.allclose(array_correlation(x, y, axis=0),
array_correlation(x.T, y.T, axis=1))
# Trigger shape mismatch error
with pytest.raises(ValueError):
array_correlation(x, y[:, 0])
with pytest.raises(ValueError):
array_correlation(x, y[:-1])
# Feed in lists
_ = array_correlation(x.tolist(), y)
_ = array_correlation(x, y.tolist())
_ = array_correlation(x.tolist(), y.tolist())
# Check 1D array input
x, y = (np.random.randn(n_TRs),
np.random.randn(n_TRs))
assert type(array_correlation(x, y)) == np.float64
assert np.isclose(array_correlation(x, y),
pearsonr(x, y)[0])
# 1D list inputs
_ = array_correlation(x.tolist(), y)
_ = array_correlation(x, y.tolist())
_ = array_correlation(x.tolist(), y.tolist())
# Check integer inputs
x, y = (np.random.randint(0, 9, (n_TRs, n_voxels)),
np.random.randint(0, 9, (n_TRs, n_voxels)))
_ = array_correlation(x, y)
| 17,046 | 36.383772 | 79 | py |
brainiak | brainiak-master/tests/hyperparamopt/test_hpo.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import scipy.stats as st
from brainiak.hyperparamopt.hpo import gmm_1d_distribution, fmin
def test_simple_gmm():
x = np.array([1., 1., 2., 3., 1.])
d = gmm_1d_distribution(x, min_limit=0., max_limit=4.)
assert d(1.1) > d(3.5), "GMM distribution not behaving correctly"
assert d(2.0) > d(3.0), "GMM distribution not behaving correctly"
assert d(-1.0) == 0, "GMM distribution out of bounds error"
assert d(9.0) == 0, "GMM distribution out of bounds error"
samples = d.get_samples(n=25)
np.testing.assert_array_less(samples, 4.)
np.testing.assert_array_less(0., samples)
def test_simple_gmm_weights():
x = np.array([1., 1., 2., 3., 1., 3.])
d = gmm_1d_distribution(x)
x2 = np.array([1., 2., 3.])
w = np.array([3., 1., 2.])
d2 = gmm_1d_distribution(x2, weights=w)
y2 = d2(np.array([1.1, 2.0]))
assert d2(1.1) == y2[0],\
"GMM distribution array & scalar results don't match"
assert np.abs(d(1.1) - d2(1.1)) < 1e-5,\
"GMM distribution weights not handled correctly"
assert np.abs(d(2.0) - d2(2.0)) < 1e-5,\
"GMM distribution weights not handled correctly"
def test_simple_hpo():
def f(args):
x = args['x']
return x*x
s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
trials = []
# Test fmin and ability to continue adding to trials
best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)
assert len(trials) == 50, "HPO continuation trials not working"
# Test verbose flag
best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)
yarray = np.array([tr['loss'] for tr in trials])
np.testing.assert_array_less(yarray, 100.)
xarray = np.array([tr['x'] for tr in trials])
np.testing.assert_array_less(np.abs(xarray), 10.)
assert best['loss'] < 100., "HPO out of range"
assert np.abs(best['x']) < 10., "HPO out of range"
# Test unknown distributions
s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
trials2 = []
with pytest.raises(ValueError) as excinfo:
fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
assert "Unknown distribution type for variable" in str(excinfo.value)
s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
trials3 = []
fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3)
| 3,042 | 33.977011 | 78 | py |
brainiak | brainiak-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# toolkit documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 17 16:45:35 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from pkg_resources import get_distribution
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_nb',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'brainiak'
copyright = '2016, Princeton Neuroscience Institute and Intel Corporation'
author = 'Princeton Neuroscience Institute and Intel Corporation'
version = get_distribution(project).version
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'brainiakdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'brainiak.tex', 'BrainIAK Documentation',
'Princeton Neuroscience Institute and Intel Corporation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'brainiak', 'BrainIAK Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'brainiak', 'BrainIAK Documentation',
author, 'brainiak', 'Brain Imaging Analysis Kit.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
napoleon_include_special_with_doc = True
# Myst-nb
execution_timeout = -1
jupyter_execute_notebooks = "force"
| 9,448 | 30.708054 | 79 | py |
DMGI | DMGI-master/main.py | import numpy as np
np.random.seed(0)
import torch
torch.autograd.set_detect_anomaly(True)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import argparse
def parse_args():
# input arguments
parser = argparse.ArgumentParser(description='DMGI')
parser.add_argument('--embedder', nargs='?', default='DMGI')
parser.add_argument('--dataset', nargs='?', default='imdb')
parser.add_argument('--metapaths', nargs='?', default='MAM,MDM')
parser.add_argument('--nb_epochs', type=int, default=10000)
parser.add_argument('--hid_units', type=int, default=64)
parser.add_argument('--lr', type = float, default = 0.0005)
parser.add_argument('--l2_coef', type=float, default=0.0001)
parser.add_argument('--drop_prob', type=float, default=0.5)
parser.add_argument('--reg_coef', type=float, default=0.001)
parser.add_argument('--sup_coef', type=float, default=0.1)
parser.add_argument('--sc', type=float, default=3.0, help='GCN self connection')
parser.add_argument('--margin', type=float, default=0.1)
parser.add_argument('--gpu_num', type=int, default=0)
parser.add_argument('--patience', type=int, default=20)
parser.add_argument('--nheads', type=int, default=1)
parser.add_argument('--activation', nargs='?', default='relu')
parser.add_argument('--isSemi', action='store_true', default=False)
parser.add_argument('--isBias', action='store_true', default=False)
parser.add_argument('--isAttn', action='store_true', default=False)
return parser.parse_known_args()
def printConfig(args):
args_names = []
args_vals = []
for arg in vars(args):
args_names.append(arg)
args_vals.append(getattr(args, arg))
print(args_names)
print(args_vals)
def main():
args, unknown = parse_args()
if args.embedder == 'DMGI':
from models import DMGI
embedder = DMGI(args)
elif args.embedder == 'DGI':
from models import DGI
embedder = DGI(args)
embedder.training()
if __name__ == '__main__':
main()
| 2,131 | 33.95082 | 84 | py |
DMGI | DMGI-master/evaluate.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from models import LogReg
import torch.nn as nn
import numpy as np
np.random.seed(0)
from sklearn.metrics import f1_score
from sklearn.cluster import KMeans
from sklearn.metrics import normalized_mutual_info_score, pairwise
def evaluate(embeds, idx_train, idx_val, idx_test, labels, device, isTest=True):
hid_units = embeds.shape[2]
nb_classes = labels.shape[2]
xent = nn.CrossEntropyLoss()
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]
train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)
accs = []
micro_f1s = []
macro_f1s = []
macro_f1s_val = [] ##
for _ in range(50):
log = LogReg(hid_units, nb_classes)
opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
log.to(device)
val_accs = []; test_accs = []
val_micro_f1s = []; test_micro_f1s = []
val_macro_f1s = []; test_macro_f1s = []
for iter_ in range(50):
# train
log.train()
opt.zero_grad()
logits = log(train_embs)
loss = xent(logits, train_lbls)
loss.backward()
opt.step()
# val
logits = log(val_embs)
preds = torch.argmax(logits, dim=1)
val_acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
val_f1_macro = f1_score(val_lbls.cpu(), preds.cpu(), average='macro')
val_f1_micro = f1_score(val_lbls.cpu(), preds.cpu(), average='micro')
val_accs.append(val_acc.item())
val_macro_f1s.append(val_f1_macro)
val_micro_f1s.append(val_f1_micro)
# test
logits = log(test_embs)
preds = torch.argmax(logits, dim=1)
test_acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
test_f1_macro = f1_score(test_lbls.cpu(), preds.cpu(), average='macro')
test_f1_micro = f1_score(test_lbls.cpu(), preds.cpu(), average='micro')
test_accs.append(test_acc.item())
test_macro_f1s.append(test_f1_macro)
test_micro_f1s.append(test_f1_micro)
max_iter = val_accs.index(max(val_accs))
accs.append(test_accs[max_iter])
max_iter = val_macro_f1s.index(max(val_macro_f1s))
macro_f1s.append(test_macro_f1s[max_iter])
macro_f1s_val.append(val_macro_f1s[max_iter]) ###
max_iter = val_micro_f1s.index(max(val_micro_f1s))
micro_f1s.append(test_micro_f1s[max_iter])
if isTest:
print("\t[Classification] Macro-F1: {:.4f} ({:.4f}) | Micro-F1: {:.4f} ({:.4f})".format(np.mean(macro_f1s),
np.std(macro_f1s),
np.mean(micro_f1s),
np.std(micro_f1s)))
else:
return np.mean(macro_f1s_val), np.mean(macro_f1s)
test_embs = np.array(test_embs.cpu())
test_lbls = np.array(test_lbls.cpu())
run_kmeans(test_embs, test_lbls, nb_classes)
run_similarity_search(test_embs, test_lbls)
def run_similarity_search(test_embs, test_lbls):
numRows = test_embs.shape[0]
cos_sim_array = pairwise.cosine_similarity(test_embs) - np.eye(numRows)
st = []
for N in [5, 10, 20, 50, 100]:
indices = np.argsort(cos_sim_array, axis=1)[:, -N:]
tmp = np.tile(test_lbls, (numRows, 1))
selected_label = tmp[np.repeat(np.arange(numRows), N), indices.ravel()].reshape(numRows, N)
original_label = np.repeat(test_lbls, N).reshape(numRows,N)
st.append(str(np.round(np.mean(np.sum((selected_label == original_label), 1) / N),4)))
st = ','.join(st)
print("\t[Similarity] [5,10,20,50,100] : [{}]".format(st))
def run_kmeans(x, y, k):
estimator = KMeans(n_clusters=k)
NMI_list = []
for i in range(10):
estimator.fit(x)
y_pred = estimator.predict(x)
s1 = normalized_mutual_info_score(y, y_pred, average_method='arithmetic')
NMI_list.append(s1)
s1 = sum(NMI_list) / len(NMI_list)
print('\t[Clustering] NMI: {:.4f}'.format(s1)) | 4,571 | 34.71875 | 115 | py |
DMGI | DMGI-master/embedder.py | import time
import numpy as np
import torch
from utils import process
import torch.nn as nn
from layers import AvgReadout
class embedder:
def __init__(self, args):
args.batch_size = 1
args.sparse = True
args.metapaths_list = args.metapaths.split(",")
args.gpu_num_ = args.gpu_num
if args.gpu_num_ == 'cpu':
args.device = 'cpu'
else:
args.device = torch.device("cuda:" + str(args.gpu_num_) if torch.cuda.is_available() else "cpu")
adj, features, labels, idx_train, idx_val, idx_test = process.load_data_dblp(args)
features = [process.preprocess_features(feature) for feature in features]
args.nb_nodes = features[0].shape[0]
args.ft_size = features[0].shape[1]
args.nb_classes = labels.shape[1]
args.nb_graphs = len(adj)
args.adj = adj
adj = [process.normalize_adj(adj_) for adj_ in adj]
self.adj = [process.sparse_mx_to_torch_sparse_tensor(adj_) for adj_ in adj]
self.features = [torch.FloatTensor(feature[np.newaxis]) for feature in features]
self.labels = torch.FloatTensor(labels[np.newaxis]).to(args.device)
self.idx_train = torch.LongTensor(idx_train).to(args.device)
self.idx_val = torch.LongTensor(idx_val).to(args.device)
self.idx_test = torch.LongTensor(idx_test).to(args.device)
self.train_lbls = torch.argmax(self.labels[0, self.idx_train], dim=1)
self.val_lbls = torch.argmax(self.labels[0, self.idx_val], dim=1)
self.test_lbls = torch.argmax(self.labels[0, self.idx_test], dim=1)
# How to aggregate
args.readout_func = AvgReadout()
# Summary aggregation
args.readout_act_func = nn.Sigmoid()
self.args = args
def currentTime(self):
now = time.localtime()
s = "%04d-%02d-%02d %02d:%02d:%02d" % (
now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
return s
| 1,994 | 35.272727 | 108 | py |
DMGI | DMGI-master/models/logreg.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
import torch.nn.functional as F
class LogReg(nn.Module):
def __init__(self, ft_in, nb_classes):
super(LogReg, self).__init__()
self.fc = nn.Linear(ft_in, nb_classes)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, seq):
ret = self.fc(seq)
return ret
| 697 | 24.851852 | 56 | py |
DMGI | DMGI-master/models/DMGI.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
from embedder import embedder
from layers import GCN, Discriminator, Attention
import numpy as np
np.random.seed(0)
from evaluate import evaluate
from models import LogReg
import pickle as pkl
class DMGI(embedder):
def __init__(self, args):
embedder.__init__(self, args)
self.args = args
def training(self):
features = [feature.to(self.args.device) for feature in self.features]
adj = [adj_.to(self.args.device) for adj_ in self.adj]
model = modeler(self.args).to(self.args.device)
optimiser = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.l2_coef)
cnt_wait = 0; best = 1e9
b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()
for epoch in range(self.args.nb_epochs):
xent_loss = None
model.train()
optimiser.zero_grad()
idx = np.random.permutation(self.args.nb_nodes)
shuf = [feature[:, idx, :] for feature in features]
shuf = [shuf_ft.to(self.args.device) for shuf_ft in shuf]
lbl_1 = torch.ones(self.args.batch_size, self.args.nb_nodes)
lbl_2 = torch.zeros(self.args.batch_size, self.args.nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1).to(self.args.device)
result = model(features, adj, shuf, self.args.sparse, None, None, None)
logits = result['logits']
for view_idx, logit in enumerate(logits):
if xent_loss is None:
xent_loss = b_xent(logit, lbl)
else:
xent_loss += b_xent(logit, lbl)
loss = xent_loss
reg_loss = result['reg_loss']
loss += self.args.reg_coef * reg_loss
if self.args.isSemi:
sup = result['semi']
semi_loss = xent(sup[self.idx_train], self.train_lbls)
loss += self.args.sup_coef * semi_loss
if loss < best:
best = loss
cnt_wait = 0
torch.save(model.state_dict(), 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, self.args.metapaths))
else:
cnt_wait += 1
if cnt_wait == self.args.patience:
break
loss.backward()
optimiser.step()
model.load_state_dict(torch.load('saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, self.args.metapaths)))
# Evaluation
model.eval()
evaluate(model.H.data.detach(), self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
class modeler(nn.Module):
def __init__(self, args):
super(modeler, self).__init__()
self.args = args
self.gcn = nn.ModuleList([GCN(args.ft_size, args.hid_units, args.activation, args.drop_prob, args.isBias) for _ in range(args.nb_graphs)])
self.disc = Discriminator(args.hid_units)
self.H = nn.Parameter(torch.FloatTensor(1, args.nb_nodes, args.hid_units))
self.readout_func = self.args.readout_func
if args.isAttn:
self.attn = nn.ModuleList([Attention(args) for _ in range(args.nheads)])
if args.isSemi:
self.logistic = LogReg(args.hid_units, args.nb_classes).to(args.device)
self.init_weight()
def init_weight(self):
nn.init.xavier_normal_(self.H)
def forward(self, feature, adj, shuf, sparse, msk, samp_bias1, samp_bias2):
h_1_all = []; h_2_all = []; c_all = []; logits = []
result = {}
for i in range(self.args.nb_graphs):
h_1 = self.gcn[i](feature[i], adj[i], sparse)
# how to readout positive summary vector
c = self.readout_func(h_1)
c = self.args.readout_act_func(c) # equation 9
h_2 = self.gcn[i](shuf[i], adj[i], sparse)
logit = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)
h_1_all.append(h_1)
h_2_all.append(h_2)
c_all.append(c)
logits.append(logit)
result['logits'] = logits
# Attention or not
if self.args.isAttn:
h_1_all_lst = []; h_2_all_lst = []; c_all_lst = []
for h_idx in range(self.args.nheads):
h_1_all_, h_2_all_, c_all_ = self.attn[h_idx](h_1_all, h_2_all, c_all)
h_1_all_lst.append(h_1_all_); h_2_all_lst.append(h_2_all_); c_all_lst.append(c_all_)
h_1_all = torch.mean(torch.cat(h_1_all_lst, 0), 0).unsqueeze(0)
h_2_all = torch.mean(torch.cat(h_2_all_lst, 0), 0).unsqueeze(0)
else:
h_1_all = torch.mean(torch.cat(h_1_all), 0).unsqueeze(0)
h_2_all = torch.mean(torch.cat(h_2_all), 0).unsqueeze(0)
# consensus regularizer
pos_reg_loss = ((self.H - h_1_all) ** 2).sum()
neg_reg_loss = ((self.H - h_2_all) ** 2).sum()
reg_loss = pos_reg_loss - neg_reg_loss
result['reg_loss'] = reg_loss
# semi-supervised module
if self.args.isSemi:
semi = self.logistic(self.H).squeeze(0)
result['semi'] = semi
return result | 5,373 | 34.826667 | 146 | py |
DMGI | DMGI-master/models/DGI.py | # Code based on https://github.com/PetarV-/DGI/blob/master/models/dgi.py
import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
from embedder import embedder
from layers import GCN, Discriminator
import numpy as np
np.random.seed(0)
from evaluate import evaluate
class DGI(embedder):
def __init__(self, args):
embedder.__init__(self, args)
self.args = args
def training(self):
features_lst = [feature.to(self.args.device) for feature in self.features]
adj_lst = [adj_.to(self.args.device) for adj_ in self.adj]
final_embeds = []
for m_idx, (features, adj) in enumerate(zip(features_lst, adj_lst)):
metapath = self.args.metapaths_list[m_idx]
print("- Training on {}".format(metapath))
model = modeler(self.args).to(self.args.device)
optimiser = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.l2_coef)
cnt_wait = 0; best = 1e9
b_xent = nn.BCEWithLogitsLoss()
for epoch in range(self.args.nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(self.args.nb_nodes)
shuf_fts = features[:, idx, :].to(self.args.device)
lbl_1 = torch.ones(self.args.batch_size, self.args.nb_nodes)
lbl_2 = torch.zeros(self.args.batch_size, self.args.nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
lbl = lbl.to(self.args.device)
logits = model(features, shuf_fts, adj, self.args.sparse, None, None, None)
loss = b_xent(logits, lbl)
if loss < best:
best = loss
cnt_wait = 0
torch.save(model.state_dict(), 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, metapath))
else:
cnt_wait += 1
if cnt_wait == self.args.patience:
break
loss.backward()
optimiser.step()
model.load_state_dict(torch.load('saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, metapath)))
# Evaluation
embeds, _ = model.embed(features, adj, self.args.sparse)
evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
final_embeds.append(embeds)
embeds = torch.mean(torch.cat(final_embeds), 0).unsqueeze(0)
print("- Integrated")
evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
class modeler(nn.Module):
def __init__(self, args):
super(modeler, self).__init__()
self.args = args
self.gcn = GCN(args.ft_size, args.hid_units, args.activation, args.drop_prob, args.isBias)
# one discriminator
self.disc = Discriminator(args.hid_units)
self.readout_func = self.args.readout_func
def forward(self, seq1, seq2, adj, sparse, msk, samp_bias1, samp_bias2):
h_1 = self.gcn(seq1, adj, sparse)
c = self.readout_func(h_1) # equation 9
c = self.args.readout_act_func(c)
h_2 = self.gcn(seq2, adj, sparse)
ret = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)
return ret
# Detach the return variables
def embed(self, seq, adj, sparse):
h_1 = self.gcn(seq, adj, sparse)
c = self.readout_func(h_1) # positive summary vector
c = self.args.readout_act_func(c) # equation 9
return h_1.detach(), c.detach()
| 3,756 | 35.125 | 139 | py |
DMGI | DMGI-master/models/__init__.py | from .logreg import LogReg
from .DMGI import DMGI
from .DGI import DGI
| 71 | 17 | 26 | py |
DMGI | DMGI-master/layers/discriminator.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, n_h):
super(Discriminator, self).__init__()
self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = torch.unsqueeze(c, 1) # c: summary vector, h_pl: positive, h_mi: negative
c_x = c_x.expand_as(h_pl)
sc_1 = torch.squeeze(self.f_k_bilinear(h_pl, c_x), 2) # sc_1 = 1 x nb_nodes
sc_2 = torch.squeeze(self.f_k_bilinear(h_mi, c_x), 2) # sc_2 = 1 x nb_nodes
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits | 1,143 | 30.777778 | 87 | py |
DMGI | DMGI-master/layers/readout.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
class AvgReadout(nn.Module):
def __init__(self):
super(AvgReadout, self).__init__()
def forward(self, seq):
return torch.mean(seq, 1) | 326 | 24.153846 | 42 | py |
DMGI | DMGI-master/layers/gcn.py | import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
class GCN(nn.Module):
def __init__(self, in_ft, out_ft, act, drop_prob, isBias=False):
super(GCN, self).__init__()
self.fc_1 = nn.Linear(in_ft, out_ft, bias=False)
if act == 'prelu':
self.act = nn.PReLU()
elif act == 'relu':
self.act = nn.ReLU()
elif act == 'leakyrelu':
self.act = nn.LeakyReLU()
elif act == 'relu6':
self.act = nn.ReLU6()
elif act == 'rrelu':
self.act = nn.RReLU()
elif act == 'selu':
self.act = nn.SELU()
elif act == 'celu':
self.act = nn.CELU()
elif act == 'sigmoid':
self.act = nn.Sigmoid()
elif act == 'identity':
self.act = nn.Identity()
if isBias:
self.bias_1 = nn.Parameter(torch.FloatTensor(out_ft))
self.bias_1.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
self.drop_prob = drop_prob
self.isBias = isBias
# self.reset_parameters()
# def reset_parameters(self):
# # pdb.set_trace()
# stdv = 1. / math.sqrt(self.fc_1.weight.data.size(0))
# self.fc_1.weight.data.uniform_(-stdv, stdv)
# if self.bias_1 is not None:
# self.bias_1.data.uniform_(-stdv, stdv)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
# Shape of seq: (batch, nodes, features)
def forward(self, seq, adj, sparse=False):
seq = F.dropout(seq, self.drop_prob, training=self.training)
seq = self.fc_1(seq)
if sparse:
seq = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq, 0)), 0)
else:
seq = torch.bmm(adj, seq)
if self.isBias:
seq += self.bias_1
return self.act(seq)
| 2,239 | 28.473684 | 76 | py |
DMGI | DMGI-master/layers/__init__.py | from .gcn import GCN
from .readout import AvgReadout
from .discriminator import Discriminator
from .attention import Attention
| 128 | 20.5 | 40 | py |
DMGI | DMGI-master/layers/attention.py | import torch.nn as nn
import torch
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, args):
super(Attention, self).__init__()
self.args = args
self.A = nn.ModuleList([nn.Linear(args.hid_units, 1) for _ in range(args.nb_graphs)])
self.weight_init()
def weight_init(self):
for i in range(self.args.nb_graphs):
nn.init.xavier_normal_(self.A[i].weight)
self.A[i].bias.data.fill_(0.0)
def forward(self, feat_pos, feat_neg, summary):
feat_pos, feat_pos_attn = self.attn_feature(feat_pos)
feat_neg, feat_neg_attn = self.attn_feature(feat_neg)
summary, summary_attn = self.attn_summary(summary)
return feat_pos, feat_neg, summary
def attn_feature(self, features):
features_attn = []
for i in range(self.args.nb_graphs):
features_attn.append((self.A[i](features[i].squeeze())))
features_attn = F.softmax(torch.cat(features_attn, 1), -1)
features = torch.cat(features,1).squeeze(0)
features_attn_reshaped = features_attn.transpose(1, 0).contiguous().view(-1, 1)
features = features * features_attn_reshaped.expand_as(features)
features = features.view(self.args.nb_graphs, self.args.nb_nodes, self.args.hid_units).sum(0).unsqueeze(0)
return features, features_attn
def attn_summary(self, features):
features_attn = []
for i in range(self.args.nb_graphs):
features_attn.append((self.A[i](features[i].squeeze())))
features_attn = F.softmax(torch.cat(features_attn), dim=-1).unsqueeze(1)
features = torch.cat(features, 0)
features_attn_expanded = features_attn.expand_as(features)
features = (features * features_attn_expanded).sum(0).unsqueeze(0)
return features, features_attn
| 1,865 | 37.875 | 114 | py |
DMGI | DMGI-master/utils/__init__.py | 0 | 0 | 0 | py | |
DMGI | DMGI-master/utils/process.py | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
import sys
import torch
import torch.nn as nn
import scipy.io as sio
import pdb
def load_data_dblp(args):
dataset = args.dataset
metapaths = args.metapaths_list
sc = args.sc
if dataset == 'acm':
data = sio.loadmat('data/{}.mat'.format(dataset))
else:
data = pkl.load(open('data/{}.pkl'.format(dataset), "rb"))
label = data['label']
N = label.shape[0]
truefeatures = data['feature'].astype(float)
rownetworks = [data[metapath] + np.eye(N)*sc for metapath in metapaths]
rownetworks = [sp.csr_matrix(rownetwork) for rownetwork in rownetworks]
truefeatures = sp.lil_matrix(truefeatures)
idx_train = data['train_idx'].ravel()
idx_val = data['val_idx'].ravel()
idx_test = data['test_idx'].ravel()
truefeatures_list = []
for _ in range(len(rownetworks)):
truefeatures_list.append(truefeatures)
return rownetworks, truefeatures_list, label, idx_train, idx_val, idx_test
def parse_skipgram(fname):
with open(fname) as f:
toks = list(f.read().split())
nb_nodes = int(toks[0])
nb_features = int(toks[1])
ret = np.empty((nb_nodes, nb_features))
it = 2
for i in range(nb_nodes):
cur_nd = int(toks[it]) - 1
it += 1
for j in range(nb_features):
cur_ft = float(toks[it])
ret[cur_nd][j] = cur_ft
it += 1
return ret
# Process a (subset of) a TU dataset into standard form
def process_tu(data, nb_nodes):
nb_graphs = len(data)
ft_size = data.num_features
features = np.zeros((nb_graphs, nb_nodes, ft_size))
adjacency = np.zeros((nb_graphs, nb_nodes, nb_nodes))
labels = np.zeros(nb_graphs)
sizes = np.zeros(nb_graphs, dtype=np.int32)
masks = np.zeros((nb_graphs, nb_nodes))
for g in range(nb_graphs):
sizes[g] = data[g].x.shape[0]
features[g, :sizes[g]] = data[g].x
labels[g] = data[g].y[0]
masks[g, :sizes[g]] = 1.0
e_ind = data[g].edge_index
coo = sp.coo_matrix((np.ones(e_ind.shape[1]), (e_ind[0, :], e_ind[1, :])), shape=(nb_nodes, nb_nodes))
adjacency[g] = coo.todense()
return features, adjacency, labels, sizes, masks
def micro_f1(logits, labels):
# Compute predictions
preds = torch.round(nn.Sigmoid()(logits))
# Cast to avoid trouble
preds = preds.long()
labels = labels.long()
# Count true positives, true negatives, false positives, false negatives
tp = torch.nonzero(preds * labels).shape[0] * 1.0
tn = torch.nonzero((preds - 1) * (labels - 1)).shape[0] * 1.0
fp = torch.nonzero(preds * (labels - 1)).shape[0] * 1.0
fn = torch.nonzero((preds - 1) * labels).shape[0] * 1.0
# Compute micro-f1 score
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = (2 * prec * rec) / (prec + rec)
return f1
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
"""
Prepare adjacency matrix by expanding up to a given neighbourhood.
This will insert loops on every node.
Finally, the matrix is converted to bias vectors.
Expected shape: [graph, nodes, nodes]
"""
def adj_to_bias(adj, sizes, nhood=1):
nb_graphs = adj.shape[0]
mt = np.empty(adj.shape)
for g in range(nb_graphs):
mt[g] = np.eye(adj.shape[1])
for _ in range(nhood):
mt[g] = np.matmul(mt[g], (adj[g] + np.eye(adj.shape[1])))
for i in range(sizes[g]):
for j in range(sizes[g]):
if mt[g][i][j] > 0.0:
mt[g][i][j] = 1.0
return -1e9 * (1.0 - mt)
###############################################
# This section of code adapted from tkipf/gcn #
###############################################
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset_str): # {'pubmed', 'citeseer', 'cora'}
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
return adj, features, labels, idx_train, idx_val, idx_test
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""Convert sparse matrix to tuple representation."""
"""Set insert_batch=True if you want to insert a batch dimension."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def standardize_data(f, train_mask):
"""Standardize feature matrix and convert to tuple representation"""
# standardize data
f = f.todense()
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = f[:, np.squeeze(np.array(sigma > 0))]
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = (f - mu) / sigma
return f
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features.todense()
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def process_adj_gat(adj):
# adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# Tricky implementation of official GAT
adj = (adj + sp.eye(adj.shape[0])).todense()
for x in range(0, adj.shape[0]):
for y in range(0, adj.shape[1]):
if adj[x, y] == 0:
adj[x, y] = -9e15
elif adj[x, y] >= 1:
adj[x, y] = 0
else:
print(adj[x, y], 'error')
adj = torch.FloatTensor(np.array(adj))
# adj = sp.coo_matrix(adj)
return adj | 8,988 | 33.178707 | 110 | py |
DMGI | DMGI-master/data/preprocess_dblp.py | import pdb
import sys
from nltk.corpus import stopwords
import re
import numpy as np
import pandas as pd
import pickle as pkl
import pdb
import sys
from nltk.corpus import stopwords
import re
import time
num_train = int(sys.argv[1])
raw_data_filename = 'dblp.txt'
top_confs = ['icml','aaai','ijcai','sigkdd','international conference on data mining','sigmod','vldb', 'icde','sigir', 'cikm','cvpr','eccv','emnlp','naacl', 'iccv', 'web search and data mining', 'world wide web conference','association for computational linguistics']
conf_dict = {'international conference on data mining':'icdm', 'web search and data mining':'wsdm',
'world wide web conference':'www','association for computational linguistics':'acl',
'sigkdd':'sigkdd','vldb':'vldb','sigmod':'sigmod','icde':'icde','icml':'icml',
'sigir':'sigir','cvpr':'cvpr','cikm':'cikm','eccv':'eccv',"aaai":"aaai","emnlp":"emnlp",
'ijcai':'ijcai','naacl':'naacl','iccv':'iccv'}
def currentTime():
now = time.localtime()
s = "%04d-%02d-%02d %02d:%02d:%02d" % (
now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
return s
print('[{}]Reading {}'.format(currentTime(), raw_data_filename))
with open(raw_data_filename, 'r', errors='replace') as f:
lines = f.readlines()
# separate blocks
papers = []; paper = []
for line in lines:
if len(line.strip()) != 0: paper.append(line.rstrip())
else: papers.append(paper); paper = []
# read papers
titles = []; authors = []; years = []; conferences = []; indices = []; references = []; abstracts = []
for paper in papers:
if '#*' not in str(paper) or '#@' not in str(paper) or '#t' not in str(paper) or '#c' not in str(paper) or '#!' not in str(paper):
continue
ref_tmp = []; flag=True
for elem in paper:
if elem.startswith('#*'):
title = elem.split("#*")[1]
if len(title) == 0:
flag=False
break
elif elem.startswith('#@'):
author = elem.split("#@")[1]
if len(author) == 0:
flag=False
break
elif elem.startswith('#t'):
year = elem.split("#t")[1]
if len(year) == 0:
flag=False
break
elif elem.startswith('#c'):
conference = elem.split("#c")[1]
if len(conference) == 0:
flag=False
break
elif elem.startswith('#index'):
index = elem.split("#index")[1]
if len(index) == 0:
flag=False
break
elif elem.startswith('#%'):
reference = elem.split("#%")[1]
ref_tmp.append(reference)
elif elem.startswith('#!'):
abstract = elem.split("#!")[1]
if len(abstract) < 100:
flag=False
break
if flag==True:
titles.append(title); authors.append(author); years.append(year); conferences.append(conference); indices.append(index); references.append(ref_tmp); abstracts.append(abstract)
df = pd.DataFrame({'paper_idx':indices, 'title':titles, 'author':authors, 'year':years,'conference':conferences, 'abstract':abstracts, 'reference':references})
df.year = df.year.astype('int')
df.conference = df.conference.str.lower()
df.abstract = df.abstract.str.lower()
df.title = df.title.str.lower()
print("[{}]Done reading data".format(currentTime()))
print("[{}]Start Filtering data".format(currentTime()))
df = df.drop_duplicates(subset='title')
df_year = df.loc[(df.year >= 2006) & (df.year <= 2015)]
df_year = df_year.loc[(~df_year.conference.str.contains('workshop'))]
df_year = df_year.loc[(~df_year.conference.str.contains('special issue'))]
df_year = df_year.loc[(~df_year.conference.str.contains('tutorials'))]
df_year = df_year.loc[(~df_year.conference.str.contains('companion'))]
df_year = df_year.loc[(~df_year.conference.str.contains('poster'))]
df_year = df_year.loc[(~df_year.conference.str.contains('posters'))]
df_year = df_year.loc[(~df_year.conference.str.contains('talks'))]
df_year = df_year.loc[(~df_year.conference.str.contains('sketches'))]
df_year = df_year.loc[(~df_year.conference.str.contains('courses'))]
df_year = df_year.loc[(~df_year.conference.str.contains('classes'))]
print("[{}]Done Filtering data".format(currentTime()))
df_year = df_year[df_year.conference.str.lower().str.contains(('|'.join(top_confs)))]
conf_dict_tmp = {}
for elem in df_year.conference.unique():
for idx, conf in enumerate(top_confs):
if conf in elem:
conf_dict_tmp[elem] = conf
break
df_year.conference = df_year.conference.map(conf_dict_tmp)
df_year.conference = df_year.conference.map(conf_dict)
print("Num conferences: {}".format(len(df_year.conference.unique())))
# parse authors
df_year['author']= df_year.author.apply(lambda x: x.split(", "))
def print_stats():
num_papers = len(df_year.paper_idx.unique())
num_conferences = len(df_year.conference.unique())
num_authors = len(set([elem for elems in df_year.author for elem in elems]))
ave_authors_per_paper = df_year[['paper_idx','author']].author.apply(len).sum() / len(df_year)
print("NumPapers: {}, NumVenues: {}, NumAuthors: {}, AveAuthorsPerPaper: {}"
.format(num_papers, num_conferences, num_authors, round(ave_authors_per_paper,3)))
# print_stats()
df_filtered = df_year[['paper_idx','author','title','conference','abstract','reference']]
df_filtered = df_filtered.reset_index(drop=True)
label_dict = {"sigkdd":"DM", "wsdm":"DM","icdm":"DM",
"icml":"AI", "aaai":"AI", "ijcai":"AI",
"cvpr":"CV",
"acl":"NLP", "naacl":"NLP", "emnlp":"NLP"}
# pdb.set_trace()
df_filtered['label'] = df_filtered.conference.map(label_dict)
df_filtered = df_filtered.dropna().reset_index(drop=True)
temp = df_filtered
temp = temp[['title','paper_idx','author','conference','abstract','reference','label']]
from collections import Counter
# filter authors
author_threshold = 3
print("Author threshold: {}".format(author_threshold))
counter = Counter([author for authors in temp.author.values for author in authors])
cnt_stopauthors = set([author for author, count in counter.most_common() if count <= author_threshold])
def remove_cnt_stopauthors(x):
return list(set(x).difference(cnt_stopauthors))
temp.author = temp.author.apply(lambda x : remove_cnt_stopauthors(x))
temp = temp[temp.author.apply(len) > 0].reset_index(drop=True)
################# preprocess abstract
from sklearn.feature_extraction.text import TfidfVectorizer
tvec = TfidfVectorizer(tokenizer=lambda x: x, preprocessor=lambda x: x, min_df=.0025, max_df=.1, ngram_range=(1,1), lowercase=False)
temp['abstract'] = temp.abstract.apply(lambda x : x.lower().split(" "))
tvec.fit(temp.abstract.values.tolist())
tvec_weights = tvec.transform(temp.abstract.values.tolist())
weights = np.asarray(tvec_weights.mean(axis=0)).ravel().tolist()
weights_df = pd.DataFrame({'term': tvec.get_feature_names(), 'weight': weights})
# weights_df.sort_values(by='weight', ascending=False).head(20)
valid_words = set(weights_df.sort_values(by='weight', ascending=False).head(2000).term.values)
# temp['plots'] = temp.plots.apply(lambda x:x.split(" "))
word_set = {term for terms in temp.abstract.values for term in terms if term in valid_words}
word_idx = {word:idx for idx,word in enumerate(word_set)}
word_idx_rev = {idx:word for idx,word in enumerate(word_set)}
def map_word_dict(xs, idx_dic):
return [idx_dic[x] for x in xs if x in idx_dic]
temp['abstract'] = temp.abstract.apply(lambda x : map_word_dict(x, word_idx))
temp = temp[temp.abstract.apply(len) > 0].reset_index(drop=True)
print("num abstract words: {}".format(len(set([word for words in temp.abstract.values for word in words]))))
################
tvec = TfidfVectorizer(stop_words='english', min_df=1, max_df=1.0, ngram_range=(1,1))
tvec.fit(temp.title)
tvec_weights = tvec.transform(temp.title.values.tolist())
weights = np.asarray(tvec_weights.mean(axis=0)).ravel().tolist()
weights_df = pd.DataFrame({'term': tvec.get_feature_names(), 'weight': weights})
valid_words = set(weights_df.sort_values(by='weight', ascending=False).head(2000).term.values)
temp['title_real'] = temp.title #############
temp['title'] = temp.title.apply(lambda x:x.lower().split(" "))
title_word_set = {term for terms in temp.title.values for term in terms if term in valid_words}
title_word_idx = {word:idx for idx,word in enumerate(title_word_set)}
title_word_idx_rev = {idx:word for idx,word in enumerate(title_word_set)}
def map_word_dict(xs, idx_dic):
return [idx_dic[x] for x in xs if x in idx_dic]
temp['title'] = temp.title.apply(lambda x : map_word_dict(x, title_word_idx))
temp = temp[temp.title.apply(len) > 0].reset_index(drop=True)
print("num title words: {}".format(len(set([word for words in temp.title.values for word in words]))))
papers = set(temp.paper_idx.unique())
# leave ref papers that are in papers
def filter_refs(x):
return list(set(x).intersection(papers))
temp['reference'] = temp.reference.apply(lambda x: filter_refs(x))
# Map to indices
paper_idx_names = list(set([paper_idx for paper_idx in temp.paper_idx.values]))
paper_idx_names_dict = {name:idx for idx, name in enumerate(paper_idx_names)}
paper_idx_names_dict_rev = {idx:name for idx, name in enumerate(paper_idx_names)}
area_names = list(set([area for area in temp.label.values]))
area_names_dict = {name:idx for idx, name in enumerate(area_names)}
area_names_dict_rev = {idx:name for idx, name in enumerate(area_names)}
author_names = set([author for authors in temp.author.values for author in authors])
author_names_dict = {name:idx for idx, name in enumerate(author_names)}
author_names_dict_rev = {idx:name for idx, name in enumerate(author_names)}
conf_names = list(set([conf for conf in temp.conference.values]))
conf_names_dict = {name:idx for idx, name in enumerate(conf_names)}
# map label
temp.label = temp.label.map(area_names_dict)
# map paper idx
temp.paper_idx = temp.paper_idx.map(paper_idx_names_dict)
# map reference
def map_refs_dict(xs):
return [paper_idx_names_dict[x] for x in xs]
temp.reference = temp.reference.apply(lambda x : map_refs_dict(x))
# map author
def map_author_dict(xs):
return [author_names_dict[x] for x in xs]
temp['author_real'] = temp.author
temp.author = temp.author.apply(lambda x : map_author_dict(x))
# map conference
temp['conference_real'] = temp.conference
temp.conference = temp.conference.map(conf_names_dict)
# temp = temp[['title','paper_idx','author','conference','abstract','reference','label']]
def make_onehot(idxs, length):
tmp = [0] * length
for idx in idxs:
tmp[idx] = 1
return tmp
# PC = []
PA = []
# PP_ref = []
PT = []
features = []
labels = []
author_titles = dict()
paper_idxs = []
for idx, vals in enumerate(temp.values):
title = vals[0]
paper_idx = vals[1]
authors = vals[2]
# conf = [vals[3]]
abstract = vals[4]
refs = vals[5]
area = [vals[6]]
# PC.append(make_onehot(conf, len(conf_names_dict)))
paper_idxs.append(paper_idx)
PT.append(make_onehot(title, len(title_word_idx_rev)))
PA.append(make_onehot(authors, len(author_names_dict)))
# PP_ref.append(make_onehot(refs, len(paper_idx_names_dict)))
features.append(make_onehot(abstract, len(word_idx_rev)))
labels.append(make_onehot(area, len(area_names_dict)))
for author in authors:
author_titles.setdefault(author,[]).extend(title)
paper_idxs_rev = {p_idx:idx for idx, p_idx in enumerate(paper_idxs)}
PP_ref = []
for idx, vals in enumerate(temp.values):
refs = [paper_idxs_rev[elem] for elem in vals[5]]
PP_ref.append(make_onehot(refs, len(paper_idx_names_dict)))
AT = []
for author in range(len(author_titles)):
titles = list(set(author_titles[author]))
AT.append(make_onehot(titles, len(title_word_idx_rev)))
PA = np.array(PA).astype(float)
PP_ref = np.array(PP_ref).astype(float)
PT = np.array(PT).astype(float)
# PC = np.array(PC).astype(float)
AT = np.array(AT).astype(float)
features = np.array(features).astype(float)
labels = np.array(labels).astype(float)
def print_shape(mat, name):
print("[{}] shape:{} / numRelations: {}".format(name, mat.shape, len(mat.nonzero()[0])))
print_shape(PA, 'PA')
print_shape(PP_ref, 'PP_ref')
print_shape(PT, 'PT')
print_shape(AT, 'AT')
print_shape(features, 'features')
PAP = np.matmul(PA, PA.T) #
# PCP = np.matmul(PC, PC.T) #
PAT = np.matmul(PA, AT)
PATA = np.matmul(PAT, AT.T)
PATAP = np.matmul(PATA, PA.T) #
PPrefP = np.matmul(PP_ref, PP_ref.T)
PTP = np.matmul(PT, PT.T)
PAP = (PAP > 0) * np.ones_like(PAP)
# PCP = (PCP > 0) * np.ones_like(PCP)
PATAP = (PATAP > 0) * np.ones_like(PATAP)
PPrefP = (PPrefP > 0) * np.ones_like(PPrefP)
PTP = (PTP > 0) * np.ones_like(PTP)
arg_labels = np.argmax(labels,1)
unique, counts = np.unique(arg_labels, return_counts=True)
print("Label: {}".format({area_names_dict_rev[un]:cn for un, cn in zip(unique, counts)}))
label_idxs_dict = {}
for idx, label in enumerate(arg_labels):
label_idxs_dict.setdefault(label, []).append(idx)
train_idx = []
val_idx = []
test_idx = []
for label in label_idxs_dict:
idxs = label_idxs_dict[label]
train_idx += idxs[:num_train]
val_idx += idxs[num_train:num_train+50]
test_idx += idxs[num_train+50:]
print("Train: {}, Val: {}, Test: {}".format(len(train_idx), len(val_idx), len(test_idx)))
train_idx = np.array(train_idx).reshape(1,-1)
val_idx = np.array(val_idx).reshape(1,-1)
test_idx = np.array(test_idx).reshape(1,-1)
data = {'label':labels, 'feature':features, 'PAP':PAP, 'PPP':PPrefP, 'PATAP':PATAP, 'train_idx':train_idx, 'val_idx':val_idx, 'test_idx':test_idx}
print('dblp_num_labels_{}.pkl'.format(num_train))
pkl.dump(data, open('dblp_{}.pkl'.format(num_train),"wb"), protocol=4)
def print_sparsity(mat, name):
print("[{}] Density: {}".format(name, np.mean(sum(mat==1) / mat.shape[1])))
print_sparsity(PAP,'PAP')
print_sparsity(PPrefP,'PPP')
print_sparsity(PATAP,'PATAP')
def print_shape(mat, name):
print("[{}] shape:{} / numRelations: {}".format(name, mat.shape, len(mat.nonzero()[0])))
print_shape(PAP,'PAP')
print_shape(PPrefP,'PPP')
print_shape(PATAP,'PATAP')
| 14,440 | 36.028205 | 267 | py |
EEND | EEND-main/eend/infer.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
from backend.models import (
average_checkpoints,
get_model,
)
from common_utils.diarization_dataset import KaldiDiarizationDataset
from common_utils.gpu_utils import use_single_gpu
from os.path import join
from pathlib import Path
from scipy.signal import medfilt
from torch.utils.data import DataLoader
from train import _convert
from types import SimpleNamespace
from typing import TextIO
import logging
import numpy as np
import os
import random
import torch
import yamlargparse
def get_infer_dataloader(args: SimpleNamespace) -> DataLoader:
infer_set = KaldiDiarizationDataset(
args.infer_data_dir,
chunk_size=args.num_frames,
context_size=args.context_size,
feature_dim=args.feature_dim,
frame_shift=args.frame_shift,
frame_size=args.frame_size,
input_transform=args.input_transform,
n_speakers=args.num_speakers,
sampling_rate=args.sampling_rate,
shuffle=args.time_shuffle,
subsampling=args.subsampling,
use_last_samples=True,
min_length=0,
)
infer_loader = DataLoader(
infer_set,
batch_size=1,
collate_fn=_convert,
num_workers=0,
shuffle=False,
worker_init_fn=_init_fn,
)
Y, _, _ = infer_set.__getitem__(0)
assert Y.shape[1] == \
(args.feature_dim * (1 + 2 * args.context_size)), \
f"Expected feature dimensionality of \
{args.feature_dim} but {Y.shape[1]} found."
return infer_loader
def hard_labels_to_rttm(
labels: np.ndarray,
id_file: str,
rttm_file: TextIO,
frameshift: float = 10
) -> None:
"""
Transform NfxNs matrix to an rttm file
Nf is the number of frames
Ns is the number of speakers
The frameshift (in ms) determines how to interpret the frames in the array
"""
if len(labels.shape) > 1:
# Remove speakers that do not speak
non_empty_speakers = np.where(labels.sum(axis=0) != 0)[0]
labels = labels[:, non_empty_speakers]
# Add 0's before first frame to use diff
if len(labels.shape) > 1:
labels = np.vstack([np.zeros((1, labels.shape[1])), labels])
else:
labels = np.vstack([np.zeros(1), labels])
d = np.diff(labels, axis=0)
spk_list = []
ini_list = []
end_list = []
if len(labels.shape) > 1:
n_spks = labels.shape[1]
else:
n_spks = 1
for spk in range(n_spks):
if n_spks > 1:
ini_indices = np.where(d[:, spk] == 1)[0]
end_indices = np.where(d[:, spk] == -1)[0]
else:
ini_indices = np.where(d[:] == 1)[0]
end_indices = np.where(d[:] == -1)[0]
# Add final mark if needed
if len(ini_indices) == len(end_indices) + 1:
end_indices = np.hstack([
end_indices,
labels.shape[0] - 1])
assert len(ini_indices) == len(end_indices), \
"Quantities of start and end of segments mismatch. \
Are speaker labels correct?"
n_segments = len(ini_indices)
for index in range(n_segments):
spk_list.append(spk)
ini_list.append(ini_indices[index])
end_list.append(end_indices[index])
for ini, end, spk in sorted(zip(ini_list, end_list, spk_list)):
rttm_file.write(
f"SPEAKER {id_file} 1 " +
f"{round(ini * frameshift / 1000, 3)} " +
f"{round((end - ini) * frameshift / 1000, 3)} " +
f"<NA> <NA> spk{spk} <NA> <NA>\n")
def _init_fn(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def postprocess_output(
probabilities,
subsampling: int,
threshold: float,
median_window_length: int
) -> np.ndarray:
thresholded = probabilities > threshold
# Use this instead if it fails with newer medfilt version
# (see https://github.com/scipy/scipy/issues/16648)
# thresholded = 1.0 * (probabilities > threshold)
filtered = np.zeros(thresholded.shape)
for spk in range(filtered.shape[1]):
filtered[:, spk] = medfilt(
thresholded[:, spk],
kernel_size=median_window_length)
probs_extended = np.repeat(filtered, subsampling, axis=0)
return probs_extended
def parse_arguments() -> SimpleNamespace:
parser = yamlargparse.ArgumentParser(description='EEND inference')
parser.add_argument('-c', '--config', help='config file path',
action=yamlargparse.ActionConfigFile)
parser.add_argument('--context-size', default=0, type=int)
parser.add_argument('--encoder-units', type=int,
help='number of units in the encoder')
parser.add_argument('--epochs', type=str,
help='epochs to average separated by commas \
or - for intervals.')
parser.add_argument('--feature-dim', type=int)
parser.add_argument('--frame-size', type=int)
parser.add_argument('--frame-shift', type=int)
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--hidden-size', type=int,
help='number of units in SA blocks')
parser.add_argument('--infer-data-dir', help='inference data directory.')
parser.add_argument('--input-transform', default='',
choices=['logmel', 'logmel_meannorm',
'logmel_meanvarnorm'],
help='input normalization transform')
parser.add_argument('--log-report-batches-num', default=1, type=float)
parser.add_argument('--median-window-length', default=11, type=int)
parser.add_argument('--model-type', default='TransformerEDA',
help='Type of model (for now only TransformerEDA)')
parser.add_argument('--models-path', type=str,
help='directory with model(s) to evaluate')
parser.add_argument('--num-frames', default=-1, type=int,
help='number of frames in one utterance')
parser.add_argument('--num-speakers', type=int)
parser.add_argument('--rttms-dir', type=str,
help='output directory for rttm files.')
parser.add_argument('--sampling-rate', type=int)
parser.add_argument('--seed', type=int)
parser.add_argument('--subsampling', default=10, type=int)
parser.add_argument('--threshold', default=0.5, type=float)
parser.add_argument('--transformer-encoder-n-heads', type=int)
parser.add_argument('--transformer-encoder-n-layers', type=int)
parser.add_argument('--transformer-encoder-dropout', type=float)
parser.add_argument('--vad-loss-weight', default=0.0, type=float)
attractor_args = parser.add_argument_group('attractor')
attractor_args.add_argument(
'--time-shuffle', action='store_true',
help='Shuffle time-axis order before input to the network')
attractor_args.add_argument('--attractor-loss-ratio', default=1.0,
type=float, help='weighting parameter')
attractor_args.add_argument('--attractor-encoder-dropout',
default=0.1, type=float)
attractor_args.add_argument('--attractor-decoder-dropout',
default=0.1, type=float)
attractor_args.add_argument('--estimate-spk-qty', default=-1, type=int)
attractor_args.add_argument('--estimate-spk-qty-thr',
default=-1, type=float)
attractor_args.add_argument(
'--detach-attractor-loss', default=False, type=bool,
help='If True, avoid backpropagation on attractor loss')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arguments()
# For reproducibility
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
np.random.seed(args.seed) # Numpy module.
random.seed(args.seed) # Python random module.
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(args.seed)
logging.info(args)
infer_loader = get_infer_dataloader(args)
if args.gpu >= 1:
gpuid = use_single_gpu(args.gpu)
logging.info('GPU device {} is used'.format(gpuid))
args.device = torch.device("cuda")
else:
gpuid = -1
args.device = torch.device("cpu")
assert args.estimate_spk_qty_thr != -1 or \
args.estimate_spk_qty != -1, \
("Either 'estimate_spk_qty_thr' or 'estimate_spk_qty' "
"arguments have to be defined.")
if args.estimate_spk_qty != -1:
out_dir = join(args.rttms_dir, f"spkqty{args.estimate_spk_qty}_\
thr{args.threshold}_median{args.median_window_length}")
elif args.estimate_spk_qty_thr != -1:
out_dir = join(args.rttms_dir, f"spkqtythr{args.estimate_spk_qty_thr}_\
thr{args.threshold}_median{args.median_window_length}")
model = get_model(args)
model = average_checkpoints(
args.device, model, args.models_path, args.epochs)
model.eval()
out_dir = join(
args.rttms_dir,
f"epochs{args.epochs}",
f"timeshuffle{args.time_shuffle}",
(f"spk_qty{args.estimate_spk_qty}_"
f"spk_qty_thr{args.estimate_spk_qty_thr}"),
f"detection_thr{args.threshold}",
f"median{args.median_window_length}",
"rttms"
)
Path(out_dir).mkdir(parents=True, exist_ok=True)
for i, batch in enumerate(infer_loader):
input = torch.stack(batch['xs']).to(args.device)
name = batch['names'][0]
with torch.no_grad():
y_pred = model.estimate_sequential(input, args)[0]
post_y = postprocess_output(
y_pred, args.subsampling,
args.threshold, args.median_window_length)
rttm_filename = join(out_dir, f"{name}.rttm")
with open(rttm_filename, 'w') as rttm_file:
hard_labels_to_rttm(post_y, name, rttm_file)
| 10,400 | 37.098901 | 79 | py |
EEND | EEND-main/eend/train.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (authors: Federico Landini)
# Licensed under the MIT license.
from backend.models import (
average_checkpoints,
get_model,
load_checkpoint,
pad_labels,
pad_sequence,
save_checkpoint,
)
from backend.updater import setup_optimizer, get_rate
from common_utils.diarization_dataset import KaldiDiarizationDataset
from common_utils.gpu_utils import use_single_gpu
from common_utils.metrics import (
calculate_metrics,
new_metrics,
reset_metrics,
update_metrics,
)
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from types import SimpleNamespace
from typing import Any, Dict, List, Tuple
import numpy as np
import os
import random
import torch
import logging
import yamlargparse
def _init_fn(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def _convert(
batch: List[Tuple[torch.Tensor, torch.Tensor, str]]
) -> Dict[str, Any]:
return {'xs': [x for x, _, _ in batch],
'ts': [t for _, t, _ in batch],
'names': [r for _, _, r in batch]}
def compute_loss_and_metrics(
model: torch.nn.Module,
labels: torch.Tensor,
input: torch.Tensor,
n_speakers: List[int],
acum_metrics: Dict[str, float],
vad_loss_weight: float,
detach_attractor_loss: bool
) -> Tuple[torch.Tensor, Dict[str, float]]:
y_pred, attractor_loss = model(input, labels, n_speakers, args)
loss, standard_loss = model.get_loss(
y_pred, labels, n_speakers, attractor_loss, vad_loss_weight,
detach_attractor_loss)
metrics = calculate_metrics(
labels.detach(), y_pred.detach(), threshold=0.5)
acum_metrics = update_metrics(acum_metrics, metrics)
acum_metrics['loss'] += loss.item()
acum_metrics['loss_standard'] += standard_loss.item()
acum_metrics['loss_attractor'] += attractor_loss.item()
return loss, acum_metrics
def get_training_dataloaders(
args: SimpleNamespace
) -> Tuple[DataLoader, DataLoader]:
train_set = KaldiDiarizationDataset(
args.train_data_dir,
chunk_size=args.num_frames,
context_size=args.context_size,
feature_dim=args.feature_dim,
frame_shift=args.frame_shift,
frame_size=args.frame_size,
input_transform=args.input_transform,
n_speakers=args.num_speakers,
sampling_rate=args.sampling_rate,
shuffle=args.time_shuffle,
subsampling=args.subsampling,
use_last_samples=args.use_last_samples,
min_length=args.min_length,
)
train_loader = DataLoader(
train_set,
batch_size=args.train_batchsize,
collate_fn=_convert,
num_workers=args.num_workers,
shuffle=True,
worker_init_fn=_init_fn,
)
dev_set = KaldiDiarizationDataset(
args.valid_data_dir,
chunk_size=args.num_frames,
context_size=args.context_size,
feature_dim=args.feature_dim,
frame_shift=args.frame_shift,
frame_size=args.frame_size,
input_transform=args.input_transform,
n_speakers=args.num_speakers,
sampling_rate=args.sampling_rate,
shuffle=args.time_shuffle,
subsampling=args.subsampling,
use_last_samples=args.use_last_samples,
min_length=args.min_length,
)
dev_loader = DataLoader(
dev_set,
batch_size=args.dev_batchsize,
collate_fn=_convert,
num_workers=1,
shuffle=False,
worker_init_fn=_init_fn,
)
Y_train, _, _ = train_set.__getitem__(0)
Y_dev, _, _ = dev_set.__getitem__(0)
assert Y_train.shape[1] == Y_dev.shape[1], \
f"Train features dimensionality ({Y_train.shape[1]}) and \
dev features dimensionality ({Y_dev.shape[1]}) differ."
assert Y_train.shape[1] == (
args.feature_dim * (1 + 2 * args.context_size)), \
f"Expected feature dimensionality of {args.feature_dim} \
but {Y_train.shape[1]} found."
return train_loader, dev_loader
def parse_arguments() -> SimpleNamespace:
parser = yamlargparse.ArgumentParser(description='EEND training')
parser.add_argument('-c', '--config', help='config file path',
action=yamlargparse.ActionConfigFile)
parser.add_argument('--context-size', default=0, type=int)
parser.add_argument('--dev-batchsize', default=1, type=int,
help='number of utterances in one development batch')
parser.add_argument('--encoder-units', type=int,
help='number of units in the encoder')
parser.add_argument('--feature-dim', type=int)
parser.add_argument('--frame-shift', type=int)
parser.add_argument('--frame-size', type=int)
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--gradclip', default=-1, type=int,
help='gradient clipping. if < 0, no clipping')
parser.add_argument('--hidden-size', type=int,
help='number of units in SA blocks')
parser.add_argument('--init-epochs', type=str, default='',
help='Initialize model with average of epochs \
separated by commas or - for intervals.')
parser.add_argument('--init-model-path', type=str, default='',
help='Initialize the model from the given directory')
parser.add_argument('--input-transform', default='',
choices=['logmel', 'logmel_meannorm',
'logmel_meanvarnorm'],
help='input normalization transform')
parser.add_argument('--log-report-batches-num', default=1, type=float)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--max-epochs', type=int,
help='Max. number of epochs to train')
parser.add_argument('--min-length', default=0, type=int,
help='Minimum number of frames for the sequences'
' after downsampling.')
parser.add_argument('--model-type', default='TransformerEDA',
help='Type of model (for now only TransformerEDA)')
parser.add_argument('--noam-warmup-steps', default=100000, type=float)
parser.add_argument('--num-frames', default=500, type=int,
help='number of frames in one utterance')
parser.add_argument('--num-speakers', type=int,
help='maximum number of speakers allowed')
parser.add_argument('--num-workers', default=1, type=int,
help='number of workers in train DataLoader')
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--output-path', type=str)
parser.add_argument('--sampling-rate', type=int)
parser.add_argument('--seed', type=int)
parser.add_argument('--subsampling', default=10, type=int)
parser.add_argument('--train-batchsize', default=1, type=int,
help='number of utterances in one train batch')
parser.add_argument('--train-data-dir',
help='kaldi-style data dir used for training.')
parser.add_argument('--transformer-encoder-dropout', type=float)
parser.add_argument('--transformer-encoder-n-heads', type=int)
parser.add_argument('--transformer-encoder-n-layers', type=int)
parser.add_argument('--use-last-samples', default=True, type=bool)
parser.add_argument('--vad-loss-weight', default=0.0, type=float)
parser.add_argument('--valid-data-dir',
help='kaldi-style data dir used for validation.')
attractor_args = parser.add_argument_group('attractor')
attractor_args.add_argument(
'--time-shuffle', action='store_true',
help='Shuffle time-axis order before input to the network')
attractor_args.add_argument(
'--attractor-loss-ratio', default=1.0, type=float,
help='weighting parameter')
attractor_args.add_argument(
'--attractor-encoder-dropout', type=float)
attractor_args.add_argument(
'--attractor-decoder-dropout', type=float)
attractor_args.add_argument(
'--detach-attractor-loss', type=bool,
help='If True, avoid backpropagation on attractor loss')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arguments()
# For reproducibility
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
np.random.seed(args.seed) # Numpy module.
random.seed(args.seed) # Python random module.
torch.manual_seed(args.seed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(args.seed)
logging.info(args)
writer = SummaryWriter(f"{args.output_path}/tensorboard")
train_loader, dev_loader = get_training_dataloaders(args)
if args.gpu >= 1:
gpuid = use_single_gpu(args.gpu)
logging.info('GPU device {} is used'.format(gpuid))
args.device = torch.device("cuda")
else:
gpuid = -1
args.device = torch.device("cpu")
if args.init_model_path == '':
model = get_model(args)
optimizer = setup_optimizer(args, model)
else:
model = get_model(args)
model = average_checkpoints(
args.device, model, args.init_model_path, args.init_epochs)
optimizer = setup_optimizer(args, model)
train_batches_qty = len(train_loader)
dev_batches_qty = len(dev_loader)
logging.info(f"#batches quantity for train: {train_batches_qty}")
logging.info(f"#batches quantity for dev: {dev_batches_qty}")
acum_train_metrics = new_metrics()
acum_dev_metrics = new_metrics()
if os.path.isfile(os.path.join(
args.output_path, 'models', 'checkpoint_0.tar')):
# Load latest model and continue from there
directory = os.path.join(args.output_path, 'models')
checkpoints = os.listdir(directory)
paths = [os.path.join(directory, basename) for
basename in checkpoints if basename.startswith("checkpoint_")]
latest = max(paths, key=os.path.getctime)
epoch, model, optimizer, _ = load_checkpoint(args, latest)
init_epoch = epoch
else:
init_epoch = 0
# Save initial model
save_checkpoint(args, init_epoch, model, optimizer, 0)
for epoch in range(init_epoch, args.max_epochs):
model.train()
for i, batch in enumerate(train_loader):
features = batch['xs']
labels = batch['ts']
n_speakers = np.asarray([max(torch.where(t.sum(0) != 0)[0]) + 1
if t.sum() > 0 else 0 for t in labels])
max_n_speakers = max(n_speakers)
features, labels = pad_sequence(features, labels, args.num_frames)
labels = pad_labels(labels, max_n_speakers)
features = torch.stack(features).to(args.device)
labels = torch.stack(labels).to(args.device)
loss, acum_train_metrics = compute_loss_and_metrics(
model, labels, features, n_speakers, acum_train_metrics,
args.vad_loss_weight,
args.detach_attractor_loss)
if i % args.log_report_batches_num == \
(args.log_report_batches_num-1):
for k in acum_train_metrics.keys():
writer.add_scalar(
f"train_{k}",
acum_train_metrics[k] / args.log_report_batches_num,
epoch * train_batches_qty + i)
writer.add_scalar(
"lrate",
get_rate(optimizer),
epoch * train_batches_qty + i)
acum_train_metrics = reset_metrics(acum_train_metrics)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradclip)
optimizer.step()
save_checkpoint(args, epoch+1, model, optimizer, loss)
with torch.no_grad():
model.eval()
for i, batch in enumerate(dev_loader):
features = batch['xs']
labels = batch['ts']
n_speakers = np.asarray([max(torch.where(t.sum(0) != 0)[0]) + 1
if t.sum() > 0 else 0 for t in labels])
max_n_speakers = max(n_speakers)
features, labels = pad_sequence(
features, labels, args.num_frames)
labels = pad_labels(labels, max_n_speakers)
features = torch.stack(features).to(args.device)
labels = torch.stack(labels).to(args.device)
_, acum_dev_metrics = compute_loss_and_metrics(
model, labels, features, n_speakers, acum_dev_metrics,
args.vad_loss_weight,
args.detach_attractor_loss)
for k in acum_dev_metrics.keys():
writer.add_scalar(
f"dev_{k}", acum_dev_metrics[k] / dev_batches_qty,
epoch * dev_batches_qty + i)
acum_dev_metrics = reset_metrics(acum_dev_metrics)
| 13,614 | 39.885886 | 79 | py |
EEND | EEND-main/eend/backend/losses.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (authors: Federico Landini, Lukas Burget, Mireia Diez)
# Copyright 2022 AUDIAS Universidad Autonoma de Madrid (author: Alicia Lozano-Diez)
# Licensed under the MIT license.
from itertools import permutations
import numpy as np
import torch
import torch.nn.functional as F
from typing import List, Tuple
from torch.nn.functional import logsigmoid
from scipy.optimize import linear_sum_assignment
def pit_loss_multispk(
logits: List[torch.Tensor], target: List[torch.Tensor],
n_speakers: np.ndarray, detach_attractor_loss: bool):
if detach_attractor_loss:
# -1's for speakers that do not have valid attractor
for i in range(target.shape[0]):
target[i, :, n_speakers[i]:] = -1 * torch.ones(
target.shape[1], target.shape[2]-n_speakers[i])
logits_t = logits.detach().transpose(1, 2)
cost_mxs = -logsigmoid(logits_t).bmm(target) - logsigmoid(-logits_t).bmm(1-target)
max_n_speakers = max(n_speakers)
for i, cost_mx in enumerate(cost_mxs.cpu().numpy()):
if max_n_speakers > n_speakers[i]:
max_value = np.absolute(cost_mx).sum()
cost_mx[-(max_n_speakers-n_speakers[i]):] = max_value
cost_mx[:, -(max_n_speakers-n_speakers[i]):] = max_value
pred_alig, ref_alig = linear_sum_assignment(cost_mx)
assert (np.all(pred_alig == np.arange(logits.shape[-1])))
target[i, :] = target[i, :, ref_alig]
loss = torch.nn.functional.binary_cross_entropy_with_logits(
logits, target, reduction='none')
loss[torch.where(target == -1)] = 0
# normalize by sequence length
loss = torch.sum(loss, axis=1) / (target != -1).sum(axis=1)
for i in range(target.shape[0]):
loss[i, n_speakers[i]:] = torch.zeros(loss.shape[1]-n_speakers[i])
# normalize in batch for all speakers
loss = torch.mean(loss)
return loss
def vad_loss(ys: torch.Tensor, ts: torch.Tensor) -> torch.Tensor:
# Take from reference ts only the speakers that do not correspond to -1
# (-1 are padded frames), if the sum of their values is >0 there is speech
vad_ts = (torch.sum((ts != -1)*ts, 2, keepdim=True) > 0).float()
# We work on the probability space, not logits. We use silence probabilities
ys_silence_probs = 1-torch.sigmoid(ys)
# The probability of silence in the frame is the product of the
# probability that each speaker is silent
silence_prob = torch.prod(ys_silence_probs, 2, keepdim=True)
# Estimate the loss. size=[batch_size, num_frames, 1]
loss = F.binary_cross_entropy(silence_prob, 1-vad_ts, reduction='none')
# "torch.max(ts, 2, keepdim=True)[0]" keeps the maximum along speaker dim
# Invalid frames in the sequence (padding) will be -1, replace those
# invalid positions by 0 so that those losses do not count
loss[torch.where(torch.max(ts, 2, keepdim=True)[0] < 0)] = 0
# normalize by sequence length
# "torch.sum(loss, axis=1)" gives a value per batch
# if torch.mean(ts,axis=2)==-1 then all speakers were invalid in the frame,
# therefore we should not account for it
# ts is size [batch_size, num_frames, num_spks]
loss = torch.sum(loss, axis=1) / (torch.mean(ts, axis=2) != -1).sum(axis=1, keepdims=True)
# normalize in batch for all speakers
loss = torch.mean(loss)
return loss
| 3,481 | 43.641026 | 101 | py |
EEND | EEND-main/eend/backend/updater.py | #!/usr/bin/env python3
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
import torch.optim as optim
from torch.nn import Module
from types import SimpleNamespace
from typing import Any, Dict
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size: int, warmup: int, optimizer: optim) -> None:
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.model_size = model_size
self._rate = 0
def state_dict(self) -> Dict[str, Any]:
"""Returns the state of the warmup scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {
key: value
for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""Loads the warmup scheduler's state.
Arguments:
state_dict (dict): warmup scheduler state.
Should be an object returned from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def step(self) -> None:
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step: int = None) -> float:
"Implement `lrate` above"
if step is None:
step = self._step
return (
self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_rate(self) -> float:
return self._rate
def zero_grad(self) -> None:
self.optimizer.zero_grad()
def setup_optimizer(args: SimpleNamespace, model: Module) -> optim:
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=args.lr)
elif args.optimizer == 'noam':
optimizer = NoamOpt(
args.hidden_size,
args.noam_warmup_steps,
optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
else:
raise ValueError(args.optimizer)
return optimizer
def get_rate(optimizer: optim) -> float:
if isinstance(optimizer, NoamOpt):
return optimizer.get_rate()
else:
for param_group in optimizer.param_groups:
return param_group['lr']
| 2,593 | 30.253012 | 79 | py |
EEND | EEND-main/eend/backend/models.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
from os.path import isfile, join
from backend.losses import (
pit_loss_multispk,
vad_loss,
)
from backend.updater import (
NoamOpt,
setup_optimizer,
)
from pathlib import Path
from torch.nn import Module, ModuleList
from types import SimpleNamespace
from typing import Dict, List, Tuple
import copy
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import logging
"""
T: number of frames
C: number of speakers (classes)
D: dimension of embedding (for deep clustering loss)
B: mini-batch size
"""
class EncoderDecoderAttractor(Module):
def __init__(
self,
device: torch.device,
n_units: int,
encoder_dropout: float,
decoder_dropout: float,
detach_attractor_loss: bool,
) -> None:
super(EncoderDecoderAttractor, self).__init__()
self.device = device
self.encoder = torch.nn.LSTM(
input_size=n_units,
hidden_size=n_units,
num_layers=1,
dropout=encoder_dropout,
batch_first=True,
device=self.device)
self.decoder = torch.nn.LSTM(
input_size=n_units,
hidden_size=n_units,
num_layers=1,
dropout=decoder_dropout,
batch_first=True,
device=self.device)
self.counter = torch.nn.Linear(n_units, 1, device=self.device)
self.n_units = n_units
self.detach_attractor_loss = detach_attractor_loss
def forward(self, xs: torch.Tensor, zeros: torch.Tensor) -> torch.Tensor:
_, (hx, cx) = self.encoder.to(self.device)(xs.to(self.device))
attractors, (_, _) = self.decoder.to(self.device)(
zeros.to(self.device),
(hx.to(self.device), cx.to(self.device))
)
return attractors
def estimate(
self,
xs: torch.Tensor,
max_n_speakers: int = 15
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate attractors from embedding sequences
without prior knowledge of the number of speakers
Args:
xs: List of (T,D)-shaped embeddings
max_n_speakers (int)
Returns:
attractors: List of (N,D)-shaped attractors
probs: List of attractor existence probabilities
"""
zeros = torch.zeros((xs.shape[0], max_n_speakers, self.n_units))
attractors = self.forward(xs, zeros)
probs = [torch.sigmoid(
torch.flatten(self.counter.to(self.device)(att)))
for att in attractors]
return attractors, probs
def __call__(
self,
xs: torch.Tensor,
n_speakers: List[int]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate attractors and loss from embedding sequences
with given number of speakers
Args:
xs: List of (T,D)-shaped embeddings
n_speakers: List of number of speakers, or None if the number
of speakers is unknown (ex. test phase)
Returns:
loss: Attractor existence loss
attractors: List of (N,D)-shaped attractors
"""
max_n_speakers = max(n_speakers)
if self.device == torch.device("cpu"):
zeros = torch.zeros(
(xs.shape[0], max_n_speakers + 1, self.n_units))
labels = torch.from_numpy(np.asarray([
[1.0] * n_spk + [0.0] * (1 + max_n_speakers - n_spk)
for n_spk in n_speakers]))
else:
zeros = torch.zeros(
(xs.shape[0], max_n_speakers + 1, self.n_units),
device=torch.device("cuda"))
labels = torch.from_numpy(np.asarray([
[1.0] * n_spk + [0.0] * (1 + max_n_speakers - n_spk)
for n_spk in n_speakers])).to(torch.device("cuda"))
attractors = self.forward(xs, zeros)
if self.detach_attractor_loss:
attractors = attractors.detach()
logit = torch.cat([
torch.reshape(self.counter(att), (-1, max_n_speakers + 1))
for att, n_spk in zip(attractors, n_speakers)])
loss = F.binary_cross_entropy_with_logits(logit, labels)
# The final attractor does not correspond to a speaker so remove it
attractors = attractors[:, :-1, :]
return loss, attractors
class MultiHeadSelfAttention(Module):
""" Multi head self-attention layer
"""
def __init__(
self,
device: torch.device,
n_units: int,
h: int,
dropout: float
) -> None:
super(MultiHeadSelfAttention, self).__init__()
self.device = device
self.linearQ = torch.nn.Linear(n_units, n_units, device=self.device)
self.linearK = torch.nn.Linear(n_units, n_units, device=self.device)
self.linearV = torch.nn.Linear(n_units, n_units, device=self.device)
self.linearO = torch.nn.Linear(n_units, n_units, device=self.device)
self.d_k = n_units // h
self.h = h
self.dropout = dropout
self.att = None # attention for plot
def __call__(self, x: torch.Tensor, batch_size: int) -> torch.Tensor:
# x: (BT, F)
q = self.linearQ(x).reshape(batch_size, -1, self.h, self.d_k)
k = self.linearK(x).reshape(batch_size, -1, self.h, self.d_k)
v = self.linearV(x).reshape(batch_size, -1, self.h, self.d_k)
scores = torch.matmul(q.permute(0, 2, 1, 3), k.permute(0, 2, 3, 1)) \
/ np.sqrt(self.d_k)
# scores: (B, h, T, T)
self.att = F.softmax(scores, dim=3)
p_att = F.dropout(self.att, self.dropout)
x = torch.matmul(p_att, v.permute(0, 2, 1, 3))
x = x.permute(0, 2, 1, 3).reshape(-1, self.h * self.d_k)
return self.linearO(x)
class PositionwiseFeedForward(Module):
""" Positionwise feed-forward layer
"""
def __init__(
self,
device: torch.device,
n_units: int,
d_units: int,
dropout: float
) -> None:
super(PositionwiseFeedForward, self).__init__()
self.device = device
self.linear1 = torch.nn.Linear(n_units, d_units, device=self.device)
self.linear2 = torch.nn.Linear(d_units, n_units, device=self.device)
self.dropout = dropout
def __call__(self, x: torch.Tensor) -> torch.Tensor:
return self.linear2(F.dropout(F.relu(self.linear1(x)), self.dropout))
class TransformerEncoder(Module):
def __init__(
self,
device: torch.device,
idim: int,
n_layers: int,
n_units: int,
e_units: int,
h: int,
dropout: float
) -> None:
super(TransformerEncoder, self).__init__()
self.device = device
self.linear_in = torch.nn.Linear(idim, n_units, device=self.device)
self.lnorm_in = torch.nn.LayerNorm(n_units, device=self.device)
self.n_layers = n_layers
self.dropout = dropout
for i in range(n_layers):
setattr(
self,
'{}{:d}'.format("lnorm1_", i),
torch.nn.LayerNorm(n_units, device=self.device)
)
setattr(
self,
'{}{:d}'.format("self_att_", i),
MultiHeadSelfAttention(self.device, n_units, h, dropout)
)
setattr(
self,
'{}{:d}'.format("lnorm2_", i),
torch.nn.LayerNorm(n_units, device=self.device)
)
setattr(
self,
'{}{:d}'.format("ff_", i),
PositionwiseFeedForward(self.device, n_units, e_units, dropout)
)
self.lnorm_out = torch.nn.LayerNorm(n_units, device=self.device)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
# x: (B, T, F) ... batch, time, (mel)freq
BT_size = x.shape[0] * x.shape[1]
# e: (BT, F)
e = self.linear_in(x.reshape(BT_size, -1))
# Encoder stack
for i in range(self.n_layers):
# layer normalization
e = getattr(self, '{}{:d}'.format("lnorm1_", i))(e)
# self-attention
s = getattr(self, '{}{:d}'.format("self_att_", i))(e, x.shape[0])
# residual
e = e + F.dropout(s, self.dropout)
# layer normalization
e = getattr(self, '{}{:d}'.format("lnorm2_", i))(e)
# positionwise feed-forward
s = getattr(self, '{}{:d}'.format("ff_", i))(e)
# residual
e = e + F.dropout(s, self.dropout)
# final layer normalization
# output: (BT, F)
return self.lnorm_out(e)
class TransformerEDADiarization(Module):
def __init__(
self,
device: torch.device,
in_size: int,
n_units: int,
e_units: int,
n_heads: int,
n_layers: int,
dropout: float,
vad_loss_weight: float,
attractor_loss_ratio: float,
attractor_encoder_dropout: float,
attractor_decoder_dropout: float,
detach_attractor_loss: bool,
) -> None:
""" Self-attention-based diarization model.
Args:
in_size (int): Dimension of input feature vector
n_units (int): Number of units in a self-attention block
n_heads (int): Number of attention heads
n_layers (int): Number of transformer-encoder layers
dropout (float): dropout ratio
vad_loss_weight (float) : weight for vad_loss
attractor_loss_ratio (float)
attractor_encoder_dropout (float)
attractor_decoder_dropout (float)
"""
self.device = device
super(TransformerEDADiarization, self).__init__()
self.enc = TransformerEncoder(
self.device, in_size, n_layers, n_units, e_units, n_heads, dropout
)
self.eda = EncoderDecoderAttractor(
self.device,
n_units,
attractor_encoder_dropout,
attractor_decoder_dropout,
detach_attractor_loss,
)
self.attractor_loss_ratio = attractor_loss_ratio
self.vad_loss_weight = vad_loss_weight
def get_embeddings(self, xs: torch.Tensor) -> torch.Tensor:
ilens = [x.shape[0] for x in xs]
# xs: (B, T, F)
pad_shape = xs.shape
# emb: (B*T, E)
emb = self.enc(xs)
# emb: [(T, E), ...]
emb = emb.reshape(pad_shape[0], pad_shape[1], -1)
return emb
def estimate_sequential(
self,
xs: torch.Tensor,
args: SimpleNamespace
) -> List[torch.Tensor]:
assert args.estimate_spk_qty_thr != -1 or \
args.estimate_spk_qty != -1, \
"Either 'estimate_spk_qty_thr' or 'estimate_spk_qty' \
arguments have to be defined."
emb = self.get_embeddings(xs)
ys_active = []
if args.time_shuffle:
orders = [np.arange(e.shape[0]) for e in emb]
for order in orders:
np.random.shuffle(order)
attractors, probs = self.eda.estimate(
torch.stack([e[order] for e, order in zip(emb, orders)]))
else:
attractors, probs = self.eda.estimate(emb)
ys = torch.matmul(emb, attractors.permute(0, 2, 1))
ys = [torch.sigmoid(y) for y in ys]
for p, y in zip(probs, ys):
if args.estimate_spk_qty != -1:
sorted_p, order = torch.sort(p, descending=True)
ys_active.append(y[:, order[:args.estimate_spk_qty]])
elif args.estimate_spk_qty_thr != -1:
silence = np.where(
p.data.to("cpu") < args.estimate_spk_qty_thr)[0]
n_spk = silence[0] if silence.size else None
ys_active.append(y[:, :n_spk])
else:
NotImplementedError(
'estimate_spk_qty or estimate_spk_qty_thr needed.')
return ys_active
def forward(
self,
xs: torch.Tensor,
ts: torch.Tensor,
n_speakers: List[int],
args: SimpleNamespace
) -> Tuple[torch.Tensor, torch.Tensor]:
emb = self.get_embeddings(xs)
if args.time_shuffle:
orders = [np.arange(e.shape[0]) for e in emb]
for order in orders:
np.random.shuffle(order)
attractor_loss, attractors = self.eda(
torch.stack([e[order] for e, order in zip(emb, orders)]),
n_speakers)
else:
attractor_loss, attractors = self.eda(emb, n_speakers)
# ys: [(T, C), ...]
ys = torch.matmul(emb, attractors.permute(0, 2, 1))
return ys, attractor_loss
def get_loss(
self,
ys: torch.Tensor,
target: torch.Tensor,
n_speakers: List[int],
attractor_loss: torch.Tensor,
vad_loss_weight: float,
detach_attractor_loss: bool
) -> Tuple[torch.Tensor, torch.Tensor]:
max_n_speakers = max(n_speakers)
ts_padded = pad_labels(target, max_n_speakers)
ts_padded = torch.stack(ts_padded)
ys_padded = pad_labels(ys, max_n_speakers)
ys_padded = torch.stack(ys_padded)
loss = pit_loss_multispk(
ys_padded, ts_padded, n_speakers, detach_attractor_loss)
vad_loss_value = vad_loss(ys, target)
return loss + vad_loss_value * vad_loss_weight + \
attractor_loss * self.attractor_loss_ratio, loss
def pad_labels(ts: torch.Tensor, out_size: int) -> torch.Tensor:
# pad label's speaker-dim to be model's n_speakers
ts_padded = []
for _, t in enumerate(ts):
if t.shape[1] < out_size:
# padding
ts_padded.append(torch.cat((t, -1 * torch.ones((
t.shape[0], out_size - t.shape[1]))), dim=1))
elif t.shape[1] > out_size:
# truncate
ts_padded.append(t[:, :out_size].float())
else:
ts_padded.append(t.float())
return ts_padded
def pad_sequence(
features: List[torch.Tensor],
labels: List[torch.Tensor],
seq_len: int
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
features_padded = []
labels_padded = []
assert len(features) == len(labels), (
f"Features and labels in batch were expected to match but got "
"{len(features)} features and {len(labels)} labels.")
for i, _ in enumerate(features):
assert features[i].shape[0] == labels[i].shape[0], (
f"Length of features and labels were expected to match but got "
"{features[i].shape[0]} and {labels[i].shape[0]}")
length = features[i].shape[0]
if length < seq_len:
extend = seq_len - length
features_padded.append(torch.cat((features[i], -torch.ones((
extend, features[i].shape[1]))), dim=0))
labels_padded.append(torch.cat((labels[i], -torch.ones((
extend, labels[i].shape[1]))), dim=0))
elif length > seq_len:
raise (f"Sequence of length {length} was received but only "
"{seq_len} was expected.")
else:
features_padded.append(features[i])
labels_padded.append(labels[i])
return features_padded, labels_padded
def save_checkpoint(
args,
epoch: int,
model: Module,
optimizer: NoamOpt,
loss: torch.Tensor
) -> None:
Path(f"{args.output_path}/models").mkdir(parents=True, exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss},
f"{args.output_path}/models/checkpoint_{epoch}.tar"
)
def load_checkpoint(args: SimpleNamespace, filename: str):
model = get_model(args)
optimizer = setup_optimizer(args, model)
assert isfile(filename), \
f"File {filename} does not exist."
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
return epoch, model, optimizer, loss
def load_initmodel(args: SimpleNamespace):
return load_checkpoint(args, args.initmodel)
def get_model(args: SimpleNamespace) -> Module:
if args.model_type == 'TransformerEDA':
model = TransformerEDADiarization(
device=args.device,
in_size=args.feature_dim * (1 + 2 * args.context_size),
n_units=args.hidden_size,
e_units=args.encoder_units,
n_heads=args.transformer_encoder_n_heads,
n_layers=args.transformer_encoder_n_layers,
dropout=args.transformer_encoder_dropout,
attractor_loss_ratio=args.attractor_loss_ratio,
attractor_encoder_dropout=args.attractor_encoder_dropout,
attractor_decoder_dropout=args.attractor_decoder_dropout,
detach_attractor_loss=args.detach_attractor_loss,
vad_loss_weight=args.vad_loss_weight,
)
else:
raise ValueError('Possible model_type is "TransformerEDA"')
return model
def average_checkpoints(
device: torch.device,
model: Module,
models_path: str,
epochs: str
) -> Module:
epochs = parse_epochs(epochs)
states_dict_list = []
for e in epochs:
copy_model = copy.deepcopy(model)
checkpoint = torch.load(join(
models_path,
f"checkpoint_{e}.tar"), map_location=device)
copy_model.load_state_dict(checkpoint['model_state_dict'])
states_dict_list.append(copy_model.state_dict())
avg_state_dict = average_states(states_dict_list, device)
avg_model = copy.deepcopy(model)
avg_model.load_state_dict(avg_state_dict)
return avg_model
def average_states(
states_list: List[Dict[str, torch.Tensor]],
device: torch.device,
) -> List[Dict[str, torch.Tensor]]:
qty = len(states_list)
avg_state = states_list[0]
for i in range(1, qty):
for key in avg_state:
avg_state[key] += states_list[i][key].to(device)
for key in avg_state:
avg_state[key] = avg_state[key] / qty
return avg_state
def parse_epochs(string: str) -> List[int]:
parts = string.split(',')
res = []
for p in parts:
if '-' in p:
interval = p.split('-')
res.extend(range(int(interval[0])+1, int(interval[1])+1))
else:
res.append(int(p))
return res
| 18,812 | 33.519266 | 79 | py |
EEND | EEND-main/eend/backend/__init__.py | 0 | 0 | 0 | py | |
EEND | EEND-main/eend/common_utils/features.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
from common_utils.kaldi_data import KaldiData
from typing import Callable, Tuple
import numpy as np
import librosa
def get_labeledSTFT(
kaldi_obj: KaldiData,
rec: str,
start: int,
end: int,
frame_size: int,
frame_shift: int,
n_speakers: int = None,
use_speaker_id: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Extracts STFT and corresponding diarization labels for
given recording id and start/end times
Args:
kaldi_obj (KaldiData)
rec (str): recording id
start (int): start frame index
end (int): end frame index
frame_size (int): number of samples in a frame
frame_shift (int): number of shift samples
n_speakers (int): number of speakers
if None, the value is given from data
Returns:
Y: STFT
(n_frames, n_bins)-shaped np.complex64 array,
T: label
(n_frmaes, n_speakers)-shaped np.int32 array.
"""
data, rate = kaldi_obj.load_wav(
rec, start * frame_shift, end * frame_shift)
Y = stft(data, frame_size, frame_shift)
filtered_segments = kaldi_obj.segments[rec]
# filtered_segments = kaldi_obj.segments[kaldi_obj.segments['rec'] == rec]
speakers = np.unique(
[kaldi_obj.utt2spk[seg['utt']] for seg
in filtered_segments]).tolist()
if n_speakers is None:
n_speakers = len(speakers)
T = np.zeros((Y.shape[0], n_speakers), dtype=np.int32)
if use_speaker_id:
all_speakers = sorted(kaldi_obj.spk2utt.keys())
S = np.zeros((Y.shape[0], len(all_speakers)), dtype=np.int32)
for seg in filtered_segments:
speaker_index = speakers.index(kaldi_obj.utt2spk[seg['utt']])
if use_speaker_id:
all_speaker_index = all_speakers.index(
kaldi_obj.utt2spk[seg['utt']])
start_frame = np.rint(
seg['st'] * rate / frame_shift).astype(int)
end_frame = np.rint(
seg['et'] * rate / frame_shift).astype(int)
rel_start = rel_end = None
if start <= start_frame and start_frame < end:
rel_start = start_frame - start
if start < end_frame and end_frame <= end:
rel_end = end_frame - start
if rel_start is not None or rel_end is not None:
T[rel_start:rel_end, speaker_index] = 1
if use_speaker_id:
S[rel_start:rel_end, all_speaker_index] = 1
if use_speaker_id:
return Y, T, S
else:
return Y, T
def splice(Y: np.ndarray, context_size: int = 0) -> np.ndarray:
""" Frame splicing
Args:
Y: feature
(n_frames, n_featdim)-shaped numpy array
context_size:
number of frames concatenated on left-side
if context_size = 5, 11 frames are concatenated.
Returns:
Y_spliced: spliced feature
(n_frames, n_featdim * (2 * context_size + 1))-shaped
"""
Y_pad = np.pad(
Y,
[(context_size, context_size), (0, 0)],
'constant')
Y_spliced = np.lib.stride_tricks.as_strided(
np.ascontiguousarray(Y_pad),
(Y.shape[0], Y.shape[1] * (2 * context_size + 1)),
(Y.itemsize * Y.shape[1], Y.itemsize), writeable=False)
return Y_spliced
def stft(
data: np.ndarray,
frame_size: int,
frame_shift: int
) -> np.ndarray:
""" Compute STFT features
Args:
data: audio signal
(n_samples,)-shaped np.float32 array
frame_size: number of samples in a frame (must be a power of two)
frame_shift: number of samples between frames
Returns:
stft: STFT frames
(n_frames, n_bins)-shaped np.complex64 array
"""
# round up to nearest power of 2
fft_size = 1 << (frame_size - 1).bit_length()
# HACK: The last frame is omitted
# as librosa.stft produces such an excessive frame
if len(data) % frame_shift == 0:
return librosa.stft(data, n_fft=fft_size, win_length=frame_size,
hop_length=frame_shift).T[:-1]
else:
return librosa.stft(data, n_fft=fft_size, win_length=frame_size,
hop_length=frame_shift).T
def subsample(
Y: np.ndarray,
T: np.ndarray,
subsampling: int = 1
) -> Tuple[np.ndarray, np.ndarray]:
""" Frame subsampling
"""
Y_ss = Y[::subsampling]
T_ss = T[::subsampling]
return Y_ss, T_ss
def transform(
Y: np.ndarray,
sampling_rate: int,
feature_dim: int,
transform_type: str,
dtype: type = np.float32,
) -> np.ndarray:
""" Transform STFT feature
Args:
Y: STFT
(n_frames, n_bins)-shaped array
transform_type:
None, "log"
dtype: output data type
np.float32 is expected
Returns:
Y (numpy.array): transformed feature
"""
Y = np.abs(Y)
if transform_type.startswith('logmel'):
n_fft = 2 * (Y.shape[1] - 1)
mel_basis = librosa.filters.mel(sampling_rate, n_fft, feature_dim)
Y = np.dot(Y ** 2, mel_basis.T)
Y = np.log10(np.maximum(Y, 1e-10))
if transform_type == 'logmel_meannorm':
mean = np.mean(Y, axis=0)
Y = Y - mean
elif transform_type == 'logmel_meanvarnorm':
mean = np.mean(Y, axis=0)
Y = Y - mean
std = np.maximum(np.std(Y, axis=0), 1e-10)
Y = Y / std
else:
raise ValueError('Unknown transform_type: %s' % transform_type)
return Y.astype(dtype)
| 5,743 | 31.089385 | 78 | py |
EEND | EEND-main/eend/common_utils/kaldi_data.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
# This library provides utilities for kaldi-style data directory.
import io
import numpy as np
import os
import soundfile as sf
import subprocess
import sys
from functools import lru_cache
from typing import Any, Dict, Tuple
def load_segments_hash(segments_file):
ret = {}
if not os.path.exists(segments_file):
return None
for line in open(segments_file):
utt, rec, st, et = line.strip().split()
ret[utt] = (rec, float(st), float(et))
return ret
def load_segments_rechash(segments_file: str) -> Dict[str, Dict[str, Any]]:
ret = {}
if not os.path.exists(segments_file):
return None
for line in open(segments_file):
utt, rec, st, et = line.strip().split()
if rec not in ret:
ret[rec] = []
ret[rec].append({'utt': utt, 'st': float(st), 'et': float(et)})
return ret
def load_wav_scp(wav_scp_file: str) -> Dict[str, str]:
""" return dictionary { rec: wav_rxfilename } """
lines = [line.strip().split(None, 1) for line in open(wav_scp_file)]
return {x[0]: x[1] for x in lines}
@lru_cache(maxsize=1)
def load_wav(
wav_rxfilename: str,
start: int,
end: int
) -> Tuple[np.ndarray, int]:
""" This function reads audio file and return data in numpy.float32 array.
"lru_cache" holds recently loaded audio so that can be called
many times on the same audio file.
OPTIMIZE: controls lru_cache size for random access,
considering memory size
"""
if wav_rxfilename.endswith('|'):
# input piped command
p = subprocess.Popen(wav_rxfilename[:-1], shell=True,
stdout=subprocess.PIPE)
data, samplerate = sf.read(io.BytesIO(p.stdout.read()),
dtype='float32')
# cannot seek
data = data[start:end]
elif wav_rxfilename == '-':
# stdin
data, samplerate = sf.read(sys.stdin, dtype='float32')
# cannot seek
data = data[start:end]
else:
# normal wav file
data, samplerate = sf.read(wav_rxfilename, start=start, stop=end)
return data, samplerate
def load_utt2spk(utt2spk_file: str) -> Dict[str, str]:
""" returns dictionary { uttid: spkid } """
lines = [line.strip().split(None, 1) for line in open(utt2spk_file)]
return {x[0]: x[1] for x in lines}
def load_spk2utt(spk2utt_file: str) -> Dict[str, str]:
""" returns dictionary { spkid: list of uttids } """
if not os.path.exists(spk2utt_file):
return None
lines = [line.strip().split() for line in open(spk2utt_file)]
return {x[0]: x[1:] for x in lines}
def load_reco2dur(reco2dur_file: str) -> Dict[str, float]:
""" returns dictionary { recid: duration } """
if not os.path.exists(reco2dur_file):
return None
lines = [line.strip().split(None, 1) for line in open(reco2dur_file)]
return {x[0]: float(x[1]) for x in lines}
class KaldiData:
def __init__(self, data_dir: str):
self.data_dir = data_dir
self.segments = load_segments_rechash(
os.path.join(self.data_dir, 'segments'))
self.utt2spk = load_utt2spk(
os.path.join(self.data_dir, 'utt2spk'))
self.wavs = load_wav_scp(
os.path.join(self.data_dir, 'wav.scp'))
self.reco2dur = load_reco2dur(
os.path.join(self.data_dir, 'reco2dur'))
self.spk2utt = load_spk2utt(
os.path.join(self.data_dir, 'spk2utt'))
def load_wav(
self,
recid: str,
start: int,
end: int
) -> Tuple[np.ndarray, int]:
data, rate = load_wav(
self.wavs[recid], start, end)
return data, rate
| 3,928 | 30.685484 | 78 | py |
EEND | EEND-main/eend/common_utils/diarization_dataset.py | #!/usr/bin/env python3
# Copyright 2019 Hitachi, Ltd. (author: Yusuke Fujita)
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
import common_utils.features as features
import common_utils.kaldi_data as kaldi_data
import numpy as np
import torch
from typing import Tuple
import logging
def _count_frames(data_len: int, size: int, step: int) -> int:
# no padding at edges, last remaining samples are ignored
return int((data_len - size + step) / step)
def _gen_frame_indices(
data_length: int,
size: int,
step: int,
use_last_samples: bool,
min_length: int,
) -> None:
i = -1
for i in range(_count_frames(data_length, size, step)):
yield i * step, i * step + size
if use_last_samples and i * step + size < data_length:
if data_length - (i + 1) * step > min_length:
yield (i + 1) * step, data_length
class KaldiDiarizationDataset(torch.utils.data.Dataset):
def __init__(
self,
data_dir: str,
chunk_size: int,
context_size: int,
feature_dim: int,
frame_shift: int,
frame_size: int,
input_transform: str,
n_speakers: int,
sampling_rate: int,
shuffle: bool,
subsampling: int,
use_last_samples: bool,
min_length: int,
dtype: type = np.float32,
):
self.data_dir = data_dir
self.dtype = dtype
self.chunk_size = chunk_size
self.context_size = context_size
self.frame_size = frame_size
self.frame_shift = frame_shift
self.feature_dim = feature_dim
self.subsampling = subsampling
self.input_transform = input_transform
self.n_speakers = n_speakers
self.sampling_rate = sampling_rate
self.chunk_indices = []
self.data = kaldi_data.KaldiData(self.data_dir)
# make chunk indices: filepath, start_frame, end_frame
for rec in self.data.wavs:
data_len = int(
self.data.reco2dur[rec] * sampling_rate / frame_shift)
data_len = int(data_len / self.subsampling)
if chunk_size > 0:
for st, ed in _gen_frame_indices(
data_len,
chunk_size,
chunk_size,
use_last_samples,
min_length
):
self.chunk_indices.append(
(rec, st * self.subsampling, ed * self.subsampling))
else:
self.chunk_indices.append(
(rec, 0, data_len * self.subsampling))
logging.info(f"#files: {len(self.data.wavs)}, "
"#chunks: {len(self.chunk_indices)}")
self.shuffle = shuffle
def __len__(self) -> int:
return len(self.chunk_indices)
def __getitem__(self, i: int) -> Tuple[np.ndarray, np.ndarray]:
rec, st, ed = self.chunk_indices[i]
Y, T = features.get_labeledSTFT(
self.data,
rec,
st,
ed,
self.frame_size,
self.frame_shift,
self.n_speakers
)
Y = features.transform(
Y, self.sampling_rate, self.feature_dim, self.input_transform)
Y_spliced = features.splice(Y, self.context_size)
Y_ss, T_ss = features.subsample(Y_spliced, T, self.subsampling)
# If the sample contains more than "self.n_speakers" speakers,
# extract top-(self.n_speakers) speakers
if self.n_speakers and T_ss.shape[1] > self.n_speakers:
selected_spkrs = np.argsort(
T_ss.sum(axis=0))[::-1][:self.n_speakers]
T_ss = T_ss[:, selected_spkrs]
return torch.from_numpy(np.copy(Y_ss)), torch.from_numpy(
np.copy(T_ss)), rec
| 3,895 | 31.739496 | 76 | py |
EEND | EEND-main/eend/common_utils/gpu_utils.py | #!/usr/bin/env python3
# Copyright 2022 Brno University of Technology (author: Federico Landini)
# Licensed under the MIT license.
from safe_gpu import safe_gpu
def use_single_gpu(gpus_qty: int) -> safe_gpu.GPUOwner:
assert gpus_qty < 2, "Multi-GPU still not available."
gpu_owner = safe_gpu.GPUOwner(nb_gpus=gpus_qty)
return gpu_owner
| 352 | 26.153846 | 73 | py |
EEND | EEND-main/eend/common_utils/metrics.py | #!/usr/bin/env python3
# Copyright 2022 Brno University of Technology (author: Federico Landini, Mireia Diez)
# Licensed under the MIT license.
from typing import Dict
import torch
def calculate_metrics(
target: torch.Tensor,
decisions: torch.Tensor,
threshold: float = 0.5,
round_digits: int = 2,
) -> Dict[str, float]:
epsilon = 1e-6
res = {}
decisions = (decisions > threshold).float()
res["avg_ref_spk_qty"] = 0
res["avg_pred_spk_qty"] = 0
res["DER_FA"] = 0
res["DER_miss"] = 0
res["VAD_FA"] = 0
res["VAD_miss"] = 0
res["OSD_FA"] = 0
res["OSD_miss"] = 0
# Each error is accumulated per sequence as they might need
# different masking. Each sequence counts for the errors independently
# and the total speech/overlap counts are acumulated.
# Final values are estimated for the batch and returned.
active_frames_tot = 0
speech_frames_tot = 0
overlap_frames_tot = 0
for seq_num in range(target.shape[0]):
t_seq = target[seq_num, :, :]
mask = (t_seq != -1)
t_seq = torch.reshape(
torch.masked_select(t_seq, mask), (-1, t_seq.shape[1]))
d_seq = decisions[seq_num, :, :]
d_seq = torch.reshape(
torch.masked_select(d_seq, mask), (-1, d_seq.shape[1]))
ref_spk_qty = t_seq.sum(axis=1)
pred_spk_qty = d_seq.sum(axis=1)
res["avg_ref_spk_qty"] += torch.mean(ref_spk_qty.double())
res["avg_pred_spk_qty"] += torch.mean(pred_spk_qty.double())
# active_frames has frames where at least one speaker is active
active_frames = torch.where(ref_spk_qty != 0)[0]
active_frames_tot += active_frames.shape[0]
# speech_frames has #frames with speech (if n active speakers, n times)
speech_frames = ref_spk_qty[active_frames].sum()
speech_frames_tot += speech_frames
# overlap_frames has frames where at least two speakers are active
overlap_frames = torch.where(ref_spk_qty > 1)[0]
overlap_frames_tot += overlap_frames.shape[0]
diff_qty = pred_spk_qty - ref_spk_qty
res["DER_FA"] += diff_qty[torch.where(diff_qty > 0)].sum()
res["DER_miss"] += -diff_qty[torch.where(diff_qty < 0)].sum()
# conf. error not calculated as computing all permutations is expensive
# TODO use Hungarian algorithm?
res["VAD_FA"] += torch.where(ref_spk_qty[torch.where(pred_spk_qty > 0)[0]] < 1)[0].shape[0]
res["VAD_miss"] += torch.where(pred_spk_qty[torch.where(ref_spk_qty > 0)[0]] < 1)[0].shape[0]
res["OSD_FA"] += torch.where(ref_spk_qty[torch.where(pred_spk_qty > 1)[0]] < 2)[0].shape[0]
res["OSD_miss"] += torch.where(pred_spk_qty[torch.where(ref_spk_qty > 1)[0]] < 2)[0].shape[0]
# divide by the numerators estimated in the whole batch
res["DER_FA"] = torch.round(100 * res["DER_FA"] / (epsilon + speech_frames_tot) * 10**round_digits / (10**round_digits))
res["DER_miss"] = torch.round(100 * res["DER_miss"] / (epsilon + speech_frames_tot) * 10**round_digits / (10**round_digits))
res["VAD_FA"] = round(100 * res["VAD_FA"] / (epsilon + active_frames_tot), 2)
res["VAD_miss"] = round(100 * res["VAD_miss"] / (epsilon + active_frames_tot), 2)
res["OSD_FA"] = round(100 * res["OSD_FA"] / (epsilon + overlap_frames_tot), 2)
res["OSD_miss"] = round(100 * res["OSD_miss"] / (epsilon + overlap_frames_tot), 2)
res["avg_ref_spk_qty"] = res["avg_ref_spk_qty"] / target.shape[0]
res["avg_pred_spk_qty"] = res["avg_pred_spk_qty"] / target.shape[0]
return res
def new_metrics() -> Dict[str, float]:
metrics = {}
for k in [
'loss',
'loss_standard',
'loss_attractor',
'avg_ref_spk_qty',
'avg_pred_spk_qty',
'DER_FA',
'DER_miss',
'VAD_FA',
'VAD_miss',
'OSD_FA',
'OSD_miss'
]:
metrics[k] = 0.0
return metrics
def reset_metrics(acum_dict: Dict[str, float]) -> Dict[str, float]:
for k in acum_dict.keys():
acum_dict[k] = 0.0
return acum_dict
def update_metrics(
acum_dict: Dict[str, float],
new_dict: Dict[str, float]
) -> Dict[str, float]:
for k in new_dict.keys():
assert (k in acum_dict), \
f"The key {k} is not defined in the dictionary \
where metrics are accumulated."
acum_dict[k] += new_dict[k]
return acum_dict
| 4,438 | 36.940171 | 128 | py |
ICWS2021-quantum-classical-microservices | ICWS2021-quantum-classical-microservices-main/utils_tsp.py | # imports
import numpy as np
import networkx as nx
import dimod
import dwave_networkx as dnx
# helper function to compute distance from route
def get_distance(route, data):
"""
find distance for given route = [0, 4, 3, 1, 2] and original data
"""
# get the total distance without return
total_dist = 0
for idx, node in enumerate(route[:-1]):
dist = data[route[idx+1]][route[idx]]
total_dist += dist
print('Total distance (without return):', total_dist)
# add distance between start and end point to complete cycle
return_distance = data[route[0]][route[-1]]
# print('Distance between start and end:', return_distance)
# get distance for full cyle
distance_with_return = total_dist + return_distance
print('Total distance (including return):', distance_with_return)
return total_dist, distance_with_return
# helper function for solving TSP with D-Wave adapted from Ocean
# including some heuristical filling if not all contraints have been met
def traveling_salesperson(G, sampler=None, lagrange=None, weight='weight',
start=None, **sampler_args):
"""Returns an approximate minimum traveling salesperson route.
Defines a QUBO with ground states corresponding to the
minimum routes and uses the sampler to sample
from it.
A route is a cycle in the graph that reaches each node exactly once.
A minimum route is a route with the smallest total edge weight.
Parameters
----------
G : NetworkX graph
The graph on which to find a minimum traveling salesperson route.
This should be a complete graph with non-zero weights on every edge.
sampler :
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
lagrange : number, optional (default None)
Lagrange parameter to weight constraints (visit every city once)
versus objective (shortest distance route).
weight : optional (default 'weight')
The name of the edge attribute containing the weight.
start : node, optional
If provided, the route will begin at `start`.
sampler_args :
Additional keyword parameters are passed to the sampler.
Returns
-------
route : list
List of nodes in order to be visited on a route
Examples
--------
>>> import dimod
...
>>> G = nx.Graph()
>>> G.add_weighted_edges_from({(0, 1, .1), (0, 2, .5), (0, 3, .1), (1, 2, .1),
... (1, 3, .5), (2, 3, .1)})
>>> dnx.traveling_salesperson(G, dimod.ExactSolver(), start=0) # doctest: +SKIP
[0, 1, 2, 3]
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
"""
# get lists with all cities
list_cities = list(G.nodes())
# Get a QUBO representation of the problem
Q = dnx.traveling_salesperson_qubo(G, lagrange, weight)
# use the sampler to find low energy states
response = sampler.sample_qubo(Q, **sampler_args)
sample = response.first.sample
# fill route with None values
route = [None]*len(G)
# get cities from sample
# NOTE: Prevent duplicate city entries by enforcing only one occurrence per city along route
for (city, time), val in sample.items():
if val and (city not in route):
route[time] = city
# run heuristic replacing None values
if None in route:
# get not assigned cities
cities_unassigned = [city for city in list_cities if city not in route]
cities_unassigned = list(np.random.permutation(cities_unassigned))
for idx, city in enumerate(route):
if city == None:
route[idx] = cities_unassigned[0]
cities_unassigned.remove(route[idx])
# cycle solution to start at provided start location
if start is not None and route[0] != start:
# rotate to put the start in front
idx = route.index(start)
route = route[idx:] + route[:idx]
return route
| 4,549 | 33.469697 | 96 | py |
ICWS2021-quantum-classical-microservices | ICWS2021-quantum-classical-microservices-main/api/main.py | from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
from gate import *
from annealing import *
gate_machines_arn= { "riggeti_aspen8":"arn:aws:braket:::device/qpu/rigetti/Aspen-8",
"riggeti_aspen9":"arn:aws:braket:::device/qpu/rigetti/Aspen-9",
"ionq":"arn:aws:braket:::device/qpu/ionq/ionQdevice",
"sv1":"arn:aws:braket:::device/quantum-simulator/amazon/sv1",
"tn1":"arn:aws:braket:::device/quantum-simulator/amazon/tn1",
"local":"local"}
adiabatic_machines_arn= { "dwave_advantage":"arn:aws:braket:::device/qpu/d-wave/Advantage_system1",
"dwave_dw2000":"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6"}
s3_folder = ("amazon-braket-7c2f2fa45286", "api") # Use the S3 bucket you created during onboarding
app = Flask(__name__)
CORS(app)
# #OPTION 1: Create a session in AWS in the API
# session = boto3.Session(
# aws_access_key_id=YOUR_ACCESS_KEY_ID,
# aws_secret_access_key=YOUR_SECRET_ACCESS_JKEY,
# aws_session_token=SESSION_TOKEN,
# region_name="us-east-1"
# )
# aws_session = AwsSession(session)
# #OPTION 2 (the one choosed): Configure credentials and region in the files ~/.aws/credentials and ~/.aws/config, respectively
@app.route('/execute/adiabatic', methods=["get"])
def execute_adiabatic_quantum_tsp():
machine = request.args.get('device')
if machine not in adiabatic_machines_arn:
return "Not machine", 400
if 'file' not in request.files:
return "Not file", 400
file = request.files['file']
if file.filename == '':
return "Not file", 400
filename = "graph.txt"
file.save(filename)
data, G, weights = def_graph(filename)
best_route, best_distance = TSP(data, G, weights,
s3_folder,
adiabatic_machines_arn[machine])
response = {
"best_route":best_route,
"best_distance":best_distance
}
return jsonify(response)
@app.route('/execute/gate', methods=["get"])
def execute_gate_based_quantum_tsp():
machine = request.args.get('device')
if machine not in gate_machines_arn:
return "Not machine", 400
eigenstates = ["11000110", "10001101", "11100001"]
results = {}
for e in eigenstates:
counts = QPE(e, s3_folder, gate_machines_arn[machine])
measure = list(counts.keys())[0]
eigenstate = measure[n_ancilla:]
phase = measure[:n_ancilla]
results[eigenstate] = phase
hamiltonian_cycle = get_minor_route_by_phase(results)
response = {
"best_route":hamiltonian_cycle
}
return jsonify(response)
if __name__ == '__main__':
app.run(host="localhost", port=33888)
| 2,787 | 27.742268 | 127 | py |
ICWS2021-quantum-classical-microservices | ICWS2021-quantum-classical-microservices-main/api/annealing.py | import boto3
from braket.aws import AwsDevice
from braket.ocean_plugin import BraketSampler, BraketDWaveSampler
import numpy as np
import networkx as nx
import dimod
import dwave_networkx as dnx
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dwave.system.composites import EmbeddingComposite
import matplotlib.pyplot as plt
# magic word for producing visualizations in notebook
from collections import defaultdict
import itertools
import pandas as pd
# local imports
from utils_tsp import get_distance, traveling_salesperson
# fix random seed for reproducibility
seed = 1
np.random.seed(seed)
def def_graph(file):
data = pd.read_csv(file, sep='\s+', header=None)
# G = nx.from_pandas_dataframe(data)
G = nx.from_pandas_adjacency(data)
# pos = nx.random_layout(G)
pos = nx.spring_layout(G, seed=seed)
# get characteristics of graph
nodes = G.nodes()
edges = G.edges()
weights = nx.get_edge_attributes(G, 'weight');
return data, G, weights
def get_lagrange_list(G):
# get QUBO for TSP
tsp_qubo = dnx.algorithms.tsp.traveling_salesperson_qubo(G)
# find default Langrange parameter for enforcing constraints
# set parameters
lagrange = None
weight = 'weight'
# get corresponding QUBO step by step
N = G.number_of_nodes()
if lagrange is None:
# If no lagrange parameter provided, set to 'average' tour length.
# Usually a good estimate for a lagrange parameter is between 75-150%
# of the objective function value, so we come up with an estimate for
# tour length and use that.
if G.number_of_edges() > 0:
lagrange = G.size(weight=weight) * G.number_of_nodes() / G.number_of_edges()
else:
lagrange = 2
print('Default Lagrange parameter:', lagrange)
# create list around default value for HPO
lagrange_list = list(np.arange(int(0.8 * lagrange), int(1.1 * lagrange)))
print('Lagrange parameter for HPO:', lagrange_list)
return lagrange_list
def TSP(data, G, weights, s3_folder, machine):
lagrange_list = get_lagrange_list(G)
# run TSP with imported TSP routine
sampler = BraketDWaveSampler(s3_folder,machine)
sampler = EmbeddingComposite(sampler)
# set parameters
num_shots = 1000
start_city = 0
best_distance = sum(weights.values())
best_route = [None]*len(G)
# run HPO to find route
for lagrange in lagrange_list:
print('Running quantum annealing for TSP with Lagrange parameter=', lagrange)
route = traveling_salesperson(G, sampler, lagrange=lagrange,
start=start_city, num_reads=num_shots, answer_mode="histogram")
# print distance
total_dist, distance_with_return = get_distance(route, data)
# update best values
if distance_with_return < best_distance:
best_distance = distance_with_return
best_route = route
return best_route, best_distance
| 3,019 | 26.962963 | 101 | py |
ICWS2021-quantum-classical-microservices | ICWS2021-quantum-classical-microservices-main/api/utils_tsp.py | # imports
import numpy as np
import networkx as nx
import dimod
import dwave_networkx as dnx
# helper function to compute distance from route
def get_distance(route, data):
"""
find distance for given route = [0, 4, 3, 1, 2] and original data
"""
# get the total distance without return
total_dist = 0
for idx, node in enumerate(route[:-1]):
dist = data[route[idx+1]][route[idx]]
total_dist += dist
print('Total distance (without return):', total_dist)
# add distance between start and end point to complete cycle
return_distance = data[route[0]][route[-1]]
# print('Distance between start and end:', return_distance)
# get distance for full cyle
distance_with_return = total_dist + return_distance
print('Total distance (including return):', distance_with_return)
return total_dist, distance_with_return
# helper function for solving TSP with D-Wave adapted from Ocean
# including some heuristical filling if not all contraints have been met
def traveling_salesperson(G, sampler=None, lagrange=None, weight='weight',
start=None, **sampler_args):
# get lists with all cities
list_cities = list(G.nodes())
# Get a QUBO representation of the problem
Q = dnx.traveling_salesperson_qubo(G, lagrange, weight)
# use the sampler to find low energy states
response = sampler.sample_qubo(Q, **sampler_args)
sample = response.first.sample
# fill route with None values
route = [None]*len(G)
# get cities from sample
# NOTE: Prevent duplicate city entries by enforcing only one occurrence per city along route
for (city, time), val in sample.items():
if val and (city not in route):
route[time] = city
# run heuristic replacing None values
if None in route:
# get not assigned cities
cities_unassigned = [city for city in list_cities if city not in route]
cities_unassigned = list(np.random.permutation(cities_unassigned))
for idx, city in enumerate(route):
if city == None:
route[idx] = cities_unassigned[0]
cities_unassigned.remove(route[idx])
# cycle solution to start at provided start location
if start is not None and route[0] != start:
# rotate to put the start in front
idx = route.index(start)
route = route[idx:] + route[:idx]
return route
# def traveling_salesperson(G, sampler=None, lagrange=None,
# weight='weight', start=None,
# **sampler_args):
# list_cities = list(G.nodes())
#
# Q = dnx.traveling_salesperson_qubo(G, lagrange, weight)
#
# response = sampler.sample_qubo(Q, **sampler_args)
# sample = response.first.sample
#
# route = [None] * len(G)
#
# for (city, time), val in sample.items():
# if val and (city not in route):
# route[time] = city
#
# if None in route:
# cities_unassigned = \
# [city for city in list_cities if city not in route]
# cities_unassigned = \
# list(np.random.permutation(cities_unassigned))
# for idx, city in enumerate(route):
# if city == None:
# route[idx] = cities_unassigned[0]
# cities_unassigned.remove(route[idx])
#
# if start is not None and route[0] != start:
# idx = route.index(start)
# route = route[idx:] + route[:idx]
#
# return route
| 3,506 | 32.721154 | 96 | py |
ICWS2021-quantum-classical-microservices | ICWS2021-quantum-classical-microservices-main/api/gate.py | import time
import math
# AWS imports: Import Braket SDK modules
import boto3
from braket.circuits import Circuit, Gate, Observable
from braket.devices import LocalSimulator
from braket.aws import AwsDevice
n_ancilla = 6
n_eigenvector = 8
def recover_task_result(task_load):
# recover task
sleep_times = 0
while sleep_times < 100000:
status = task_load.state()
print('Status of (reconstructed) task:', status)
print('\n')
# wait for job to complete
# terminal_states = ['COMPLETED', 'FAILED', 'CANCELLED']
if status == 'COMPLETED':
# get results
return task_load.result()
else:
time.sleep(1)
sleep_times = sleep_times + 1
print("Quantum execution time exceded")
return None
def execute(circuit, s3_folder, machine):
if machine=="local":
device = LocalSimulator()
result = device.run(circuit, shots=100000).result()
return result.measurement_counts
device = AwsDevice(machine)
if "sv1" not in machine and "tn1" not in machine:
task = device.run(circuit, s3_folder, shots=1000, poll_timeout_seconds=5 * 24 * 60 * 60)
return recover_task_result(task).measurement_counts
else:
task = device.run(circuit, s3_folder, shots=1000)
return task.result().measurement_counts
def get_minor_route_by_phase(results):
cycle_per_eigenstate = {"11000110": [0,1,2,3], "10001101":[0,3,1,2], "11100001":[0,1,3,2]}
minor_eigenstate = None
for e in results.keys():
if minor_eigenstate == None or int(results[e],2)<int(results[minor_eigenstate],2):
minor_eigenstate = e
return cycle_per_eigenstate[minor_eigenstate]
def crz(circ, tetha, a, b):
circ.rz(b, tetha/2)
circ.cnot(a,b)
circ.rz(b, -tetha/2)
circ.cnot(a,b)
def controlled_U(circ,a,b,c,d,n,k):
"""this is corresponding to the C-U_j in equation (8) """
crz(circ, c-a, n_ancilla-k, n_ancilla+n)
circ.rz(n_ancilla-k, a)
crz(circ, b-a, n_ancilla-k, n_ancilla+n+1)
circ.ccnot(n_ancilla-k, n_ancilla+n, n_ancilla+n+1)
circ.rz(n_ancilla+n+1, d+c-b-a)
circ.ccnot(n_ancilla-k, n_ancilla+n, n_ancilla+n+1)
def qft_dagger(circ, n):
"""n-qubit QFTdagger on q in circ."""
for j in range(n):
k = (n-1) - j
for m in range(k):
crz(circ, -math.pi/float(2**(k-m)), k, m)
circ.h(k)
def QPE(eigenstate, s3_folder, machine):
# implementation limited to 8 qubits eigenstates
if len(eigenstate) != n_eigenvector:
return None
qpe = Circuit()
for i in range(0, n_eigenvector):
if eigenstate[i] == "1":
qpe.x(n_ancilla + i)
for i in range(n_ancilla):
qpe.h(i)
controlled_U(qpe, 0, math.pi / 2, math.pi / 8, math.pi / 4, 0, 1)
controlled_U(qpe, math.pi / 2, 0, math.pi / 4, math.pi / 4, 2, 1)
controlled_U(qpe, math.pi / 8, math.pi / 4, 0, math.pi / 8, 4, 1)
controlled_U(qpe, math.pi / 4, math.pi / 4, math.pi / 8, 0, 6, 1)
for i in range(2):
controlled_U(qpe, 0, math.pi / 2, math.pi / 8, math.pi / 4, 0, 2)
controlled_U(qpe, math.pi / 2, 0, math.pi / 4, math.pi / 4, 2, 2)
controlled_U(qpe, math.pi / 8, math.pi / 4, 0, math.pi / 8, 4, 2)
controlled_U(qpe, math.pi / 4, math.pi / 4, math.pi / 8, 0, 6, 2)
for i in range(4):
controlled_U(qpe, 0, math.pi / 2, math.pi / 8, math.pi / 4, 0, 3)
controlled_U(qpe, math.pi / 2, 0, math.pi / 4, math.pi / 4, 2, 3)
controlled_U(qpe, math.pi / 8, math.pi / 4, 0, math.pi / 8, 4, 3)
controlled_U(qpe, math.pi / 4, math.pi / 4, math.pi / 8, 0, 6, 3)
for i in range(8):
controlled_U(qpe, 0, math.pi / 2, math.pi / 8, math.pi / 4, 0, 4)
controlled_U(qpe, math.pi / 2, 0, math.pi / 4, math.pi / 4, 2, 4)
controlled_U(qpe, math.pi / 8, math.pi / 4, 0, math.pi / 8, 4, 4)
controlled_U(qpe, math.pi / 4, math.pi / 4, math.pi / 8, 0, 6, 4)
for i in range(16):
controlled_U(qpe, 0, math.pi / 2, math.pi / 8, math.pi / 4, 0, 5)
controlled_U(qpe, math.pi / 2, 0, math.pi / 4, math.pi / 4, 2, 5)
controlled_U(qpe, math.pi / 8, math.pi / 4, 0, math.pi / 8, 4, 5)
controlled_U(qpe, math.pi / 4, math.pi / 4, math.pi / 8, 0, 6, 5)
for i in range(32):
controlled_U(qpe, 0, math.pi / 2, math.pi / 8, math.pi / 4, 0, 6)
controlled_U(qpe, math.pi / 2, 0, math.pi / 4, math.pi / 4, 2, 6)
controlled_U(qpe, math.pi / 8, math.pi / 4, 0, math.pi / 8, 4, 6)
controlled_U(qpe, math.pi / 4, math.pi / 4, math.pi / 8, 0, 6, 6)
qft_dagger(qpe, 6)
# for i in range(n_ancilla):
# qpe.measure(q[i],c[i])
# print(qpe)
results = execute(qpe, s3_folder, machine)
return results | 4,822 | 33.205674 | 96 | py |
strees | strees-master/charge.py | import sys
import os, os.path
from optparse import OptionParser
from matplotlib.colors import LogNorm
from numpy import *
import h5py
try:
import pylab
except ImportError:
pass
X, Y, Z = 0, 1, 2
def main():
parser = OptionParser()
parser.add_option("--show", dest="show", action="store_true",
help="Open the matplotlib window?", default=False)
parser.add_option("--ofile", "-o", dest="ofile", action="store",
help="Save to this file", default=None)
(opts, args) = parser.parse_args()
fname = args[0]
fp = h5py.File(fname, "r")
main = fp['main']
run_name = main.attrs['run_name']
steps = main.keys()
eta = main.attrs['conductance']
mu_T = main.attrs['tip_mobility']
E0 = main.attrs['external_field']
external_field_vector = array([0.0, 0.0, E0])
eps0 = 1 / (4 * pi * main.attrs['maxwell_factor'])
d = main.attrs['conductor_thickness']
q0 = eta**2 / (mu_T**2 * E0 * eps0)
ell = eta / (mu_T * E0 * eps0)
tau = ell / (mu_T * E0)
t = zeros((len(steps)),)
q = zeros((len(steps)),)
r = zeros((len(steps)),)
E = zeros((len(steps)),)
for i, step in enumerate(steps):
qi = array(main[step]['q'])
ri = array(main[step]['r'])
phi = array(main[step]['phi'])
phi = phi - dot(ri, external_field_vector)
t[i] = main[step].attrs['t']
q[i] = sum(qi)
r[i] = weighted_r(ri, qi)
E[i] = (phi[1] - phi[0]) / abs(ri[0, Z] - ri[1, Z])
print "%g\t%g\t%g\t%g\t#%s" % (t[i], q[i], r[i], E[i], step)
if opts.show:
pylab.xlabel("t")
pylab.ylabel("Q")
pylab.plot(t, q, lw=1.7)
pylab.show()
if opts.ofile is not None:
savetxt(opts.ofile, c_[t, q, r, E, t / tau, q / q0, r / ell])
def weighted_r(r, q):
""" Finds the average |r| weighted with q. """
rmod = sqrt(sum(r**2, axis=1))
try:
return average(rmod, weights=q)
except ZeroDivisionError:
# If there are no charges...
return 0.0
if __name__ == '__main__':
main()
| 2,129 | 23.204545 | 72 | py |
strees | strees-master/grow_tree.py | """ grow_tree.py is the main module of the code and the one you invoke from
the command line to start a simulation.
To start a simulation with parameters read from a file simulation.ini
simply invoke this module as::
python grow_tree.py simulation.ini
The output will be written in a file name simulation.h5 with a
`HDF5 <http://www.hdfgroup.org/HDF5//>`_ format.
"""
# NOTES:
#
# In this module and in all the rest, the geometrical dimension always
# corresponds to the last axis of an array. For example to save the locations
# of k points we use an array with shape [k, 3].
#
# All-caps variable names are in general global variables that are read
# from the input file. We use globals().update(...) so they are avaliable
# everywhere.
#
import sys
import time
from numpy import *
from numpy.random import rand, randn, seed
from numpy.linalg import norm
from scipy.integrate import odeint, ode
from readinput import load_input
import tree
from refinement import Box, containing_box
import mpolar
from datafile import DataFile
import os.path
from angles import branching_angles
import parameters as param_descriptors
import electrodes
from contexttimer import ContextTimer
latest_phi = None
EXTERNAL_FIELD_VECTOR = None
ELECTRODE = None
X, Y, Z = 0, 1, 2
class TooLongTimestep(Exception):
pass
def main():
""" This is the main function of the code it is the starting point of
a simulation. """
# Load input parameters from the input file and add the, in allcaps
# to the global namespace.
global EXTERNAL_FIELD_VECTOR, ELECTRODE
parameters = load_input(sys.argv[1], param_descriptors)
globals().update(dict((key.upper(), item)
for key, item in parameters.iteritems()))
if RANDOM_SEED >= 0:
seed(RANDOM_SEED)
EXTERNAL_FIELD_VECTOR = array([0.0, 0.0, EXTERNAL_FIELD])
ELECTRODE = init_electrode()
# init a tree from scratch
tr, r0, q0 = init_from_scratch(INITIAL_NODES)
dt = TIME_STEP
t = r_[0:END_TIME:dt]
r, q = r0, q0
dfile = DataFile(OUT_FILE, parameters=parameters)
branched = False
for i, it in enumerate(t):
# with ContextTimer("plotting"):
# plot_projections(r, q)
# pylab.savefig('tree_%.3d.png' % i)
# print 't = %g\ttree_%.3d.png' % (it, i)
print "%d/%d t = %g" % (i, len(t), it)
branch_prob = BRANCHING_PROBABILITY
if SINGLE_BRANCHING_TIME > 0:
if it > SINGLE_BRANCHING_TIME:
if not branched:
branch_prob = inf
branched = True
if SINGLE_BRANCHING_Z != 0 and not branched:
zterm = r[tr.terminals()[0], Z]
if zterm < SINGLE_BRANCHING_Z:
if not branched:
branch_prob = inf
branched = True
r, q = adapt_step(tr, r, q, dt, p=branch_prob)
with ContextTimer("saving %d" % i):
phi = solve_phi(r, q)
dfile.add_step(it, tr, r, q, phi,
error=error, error_dq=error_dq)
if END_WITH_RECONNECTION and tr.reconnects(r):
print "Finishing due to a reconnection."
break
def init_from_scratch(n=0):
""" Init a 'tree' with the root node plus n additional nodes in a vertical
string. """
tr = tree.Tree()
root = tr.make_root()
for i in xrange(n):
tr.extend([i,])
r0 = tr.zeros(dim=3)
k = r0.shape[0]
r0[:, Z] = -arange(k) * CONDUCTOR_THICKNESS
q0 = zeros((k, ))
return tr, r0, q0
def adapt_step(tr, r0, q0, dt, p=0.0):
""" Performs a step of duration dt but divides it into sub steps
to make sure that the length of a channel is never longer than ``MAX_STEP``.
"""
current_dt = dt
r, q = r0, q0
remaining_steps = 1
while remaining_steps > 0:
try:
r, q = step(tr, r, q, current_dt, p=p)
remaining_steps -= 1
except TooLongTimestep:
current_dt /= 2.
remaining_steps *= 2
return r, q
def step(tr, r, q0, dt, p=0.0):
""" Performs an elementary step, including relaxation and advancing
the channels.
Arguments:
* *tr*: the :class:`tree.Tree` instance containing the tree structure.
* *r*: an array containing the node locations.
* *q0*: an array containing the charges of the nodes.
* *dt*: the time step.
"""
iterm = tr.terminals()
box = containing_box(r, electrode=ELECTRODE)
box.set_charges(r, q0,
max_charges=MAX_CHARGES_PER_BOX,
min_length=16 * CONDUCTOR_THICKNESS)
box.build_lists(recurse=True)
box.set_field_evaluation(r[iterm, :])
# 1. Calculate the velocities at t
v0 = velocities(box, tr, r, q0)
# 2. Relax the tree from t to t + dt
q1 = relax(box, tr, r, q0, dt)
# 3. Calculate the velocities again at t + dt
v1 = velocities(box, tr, r, q1)
# 4. Extend the tree with the leap-frog algo.
v = 0.5 * (v0 + v1)
# 5. Branch some of the tips
vabs = sqrt(sum(v**2, axis=1))
# If the longest step is longer than MAX_STEP, raise an exception
# telling the calling function to reduce dt.
if (max(vabs) * dt) > MAX_STEP:
raise TooLongTimestep
does_branch = rand(*iterm.shape) < (p * vabs * dt)
radv = empty((sum(does_branch) + sum(vabs > 0), 3))
j = 0
for i, branches in enumerate(does_branch):
if not branches:
if vabs[i] > 0:
radv[j, :] = r[iterm[i], :] + dt * v[i, :]
j += 1
else:
# Note that slow channels, although unlikely, may branch.
# However, not if their velocity is 0
dr1, dr2 = symmetric_gaussian(dt * v[i, :], BRANCHING_SIGMA)
radv[j, :] = r[iterm[i], :] + dr1
radv[j + 1, :] = r[iterm[i], :] + dr2
j += 2
rnew = concatenate((r, radv), axis=0)
qnew = concatenate((q1, zeros((sum(does_branch)
+ sum(vabs > 0),))), axis=0)
tr.extend(sort(r_[iterm[vabs > 0],
iterm[does_branch]]))
return rnew, qnew
def velocities(box, tr, r, q):
""" Calculates the electric fields at the tips of the tree and from
them obtains the propagation velocities of the *streamers* """
iterm = tr.terminals()
# When we have a single charge the velocity is simply given by the
# external electric field
if len(q) == 1:
return TIP_MOBILITY * external_field(r[iterm, :])
box.update_charges(q)
box.upward(MULTIPOLAR_TERMS)
box.downward()
box.solve_all(a=CONDUCTOR_THICKNESS, field=True)
box.collect_solutions(field=True)
sfields = self_fields(tr, r, q)
E = (MAXWELL_FACTOR * box.field
+ MAXWELL_FACTOR * sfields
+ external_field(r[iterm, :]))
absE = sqrt(sum(E**2, axis=1))
# An unit vector with the same direction as E
u = E / absE[:, newaxis]
# Now we can calculate the absolute value of the velocity
vabs = TIP_MOBILITY * where(absE > TIP_MIN_FIELD, absE - TIP_MIN_FIELD, 0)
v = u * vabs[:, newaxis]
return v
def self_fields(tr, r, q):
""" Calculates the fields created by the charges at the streamer tips
on themselves. """
iterm = tr.terminals()
parents = tr.parents()[iterm]
dr = r[iterm, :] - r[parents, :]
u = dr / (sqrt(sum(dr**2, axis=1)))[:, newaxis]
return q[iterm][:, newaxis] * u / CONDUCTOR_THICKNESS**2
def relax(box, tr, r, q0, dt):
""" Relax the conductor :class:`tree.Tree` *tr* for a time *dt*.
Arguments:
* *tr*: the :class:`tree.Tree` instance containing the tree structure.
* *r*: an array containing the node locations.
* *q0*: an array containing the charges of the nodes.
* *dt*: the time step.
"""
global latest_phi, error, error_dq
#with ContextTimer("re-computing Ohm matrix"):
# If we have an electrode, we fix q[0] by setting the first row of
# M to zero.
fix = [] if ELECTRODE is None else [0]
# On Fri Aug 31 11:46:47 2012 I found a factor 2 here that I do not know
# where it comes from. Probably it was a reliq of the mid-points approach
# (But it was duplicated in ohm_matrix anyway!). I am removing it.
M = CONDUCTANCE * tr.ohm_matrix(r, fix=fix)
n = len(q0)
def f(t0, q):
global latest_phi, error, error_dq
phi = solve_phi(r, q, box)
# err = sqrt(sum((phi - box.phi)**2)) / len(phi)
latest_phi = phi
error = phi - latest_phi
error_dq = M.dot(error)
dq = M.dot(phi + external_potential(r))
return dq
d = ode(f).set_integrator('vode', nsteps=250000, rtol=1e-8)
d.set_initial_value(q0, 0.0)
d.integrate(dt)
return d.y
def solve_phi(r, q, box=None):
if len(q) >= FMM_THRESHOLD and box is not None:
# with ContextTimer("FMM") as ct_fmm:
box.update_charges(q)
box.upward(MULTIPOLAR_TERMS)
box.downward()
box.solve_all(a=CONDUCTOR_THICKNESS, field=False)
box.collect_solutions(field=False)
phi = MAXWELL_FACTOR * box.phi
else:
# with ContextTimer("direct") as ct_direct:
if ELECTRODE is None:
rx, qx = r, q
else:
rx, qx = ELECTRODE.extend(r, q)
phi0 = MAXWELL_FACTOR * mpolar.direct(rx, qx, r,
CONDUCTOR_THICKNESS)
phi = phi0
return phi
def symmetric_gaussian(dr, sigma):
""" Samples a branch from a symmetric, gaussian branching model.
In a plane perpendicular to dr we sample dr1 from a cylindrically
symmetric gaussian distribution; the two branching points are dr1 and
its symmetric vector wrt dr. """
u = dr / norm(dr)
# We find two unit vectors orthonormal to u (also dr); note that this
# fails if u is parallel to x !!!
ex = array([1.0, 0, 0])
e1 = ex - dot(u, ex) * u
e1 = e1 / norm(e1)
e2 = cross(u, e1)
if not BRANCH_IN_XZ:
p, q = sigma * randn(2)
else:
if FIXED_BRANCHING_ANGLE > 0:
p, q = norm(dr) * tan(FIXED_BRANCHING_ANGLE / 2), 0.0
else:
p, q = sigma * randn(), 0.0
dr1 = dr + (p * e1 + q * e2)
dr2 = dr - (p * e1 + q * e2)
if FIXED_BRANCHING_ANGLE > 0:
# This is to avoid too long segments at branching points.
# Presently I am doing it only here to preserve compatibility
# with the algorithm described in the paper as of Sat Mar 23 20:58:46 2013
dr1 *= norm(dr) / norm(dr1)
dr2 *= norm(dr) / norm(dr2)
return dr1, dr2
def external_field(r):
""" Calculates the external field at points *r*. This is calculated
from ``EXTERNAL_FIELD`` and ``ELECTRODE_POTENTIAL``. As the code stands now
only these two possibilities are physically meaningful:
1. Specify ``EXTERNAL_FIELD`` with a planar electrode or with no electrode,
but use ``ELECTRODE_POTENTIAL=0``.
2. ``ELECTRODE_POTENTIAL != 0``, but ``ELECTRODE_GEOMETRY = 'sphere'`` and
``EXTERNAL_FIELD = 0``.
However, we allow the user to shoot himself on his foot, so he can
select any arbitrary combination of these parameters. Beware.
"""
field = EXTERNAL_FIELD_VECTOR[newaxis, :]
if ELECTRODE_POTENTIAL == 0:
return field
center = array([0.0, 0.0, ELECTRODE_RADIUS])
dr = r - center[newaxis, :]
rabs = sqrt(sum(dr**2, axis=1))
field = field + (ELECTRODE_RADIUS * ELECTRODE_POTENTIAL
* dr / rabs[:, newaxis]**3)
return field
def external_potential(r):
""" Calculates the external potential at points *r*. See above, in
external_field for the risks here.
"""
phi = -dot(r, EXTERNAL_FIELD_VECTOR)
if ELECTRODE_POTENTIAL == 0:
return phi
center = array([0.0, 0.0, ELECTRODE_RADIUS])
dr = r - center[newaxis, :]
rabs = sqrt(sum(dr**2, axis=1))
phi = phi + ELECTRODE_RADIUS * ELECTRODE_POTENTIAL / rabs
return phi
def init_electrode():
""" Uses the input parameters to select an electrode geometry. """
def planar():
""" Planar electrode. Always located at z=0. """
return electrodes.Planar(0)
def sphere():
""" Sphere electrode. Located at [0, 0, -ELECTRODE_RADIUS]. """
center = array([0.0, 0.0, ELECTRODE_RADIUS])
return electrodes.Sphere(center, ELECTRODE_RADIUS)
def null():
""" No electrode. """
# return electrodes.NullElectrode()
# This is actually faster:
return None
d = dict(planar=planar, plane=planar, sphere=sphere, null=null, none=null)
try:
return d[ELECTRODE_GEOMETRY]()
except KeyError:
raise KeyError("Electrode geometry '%s' not recognized"
% ELECTRODE_GEOMETRY)
def plot_projections(r, q):
X, Y, Z = 0, 1, 2
names = ["X", "Y", "Z"]
axes = [(X, Z), (Y, Z), (X, Y)]
pylab.subplots_adjust(wspace=0.35, hspace=0.25, right=0.95, top=0.95)
for i, (d1, d2) in enumerate(axes):
ax = pylab.subplot(2, 2, i)
ax.clear()
ax.scatter(r[:, d1], r[:, d2], c=q,
s=5.0, faceted=False),
ax.set_xlabel(names[d1])
ax.set_ylabel(names[d2])
#ax.quiver(r[iterm, 0], r[iterm, 2], field[0, :], field[2, :])
#ax.set_xlim([0.0, 1.0])
#ax.set_ylim([-0.2, 1.0])
if __name__ == '__main__':
main()
| 13,725 | 28.32906 | 82 | py |
strees | strees-master/tree.py | """ This module contains the data representation for the structure of trees.
Note that we separate the structure of the branched tree from its
realization, that would contain things such as charges, positions, etc.
that evolve even when the structure is fixed.
"""
from random import random as uniform
from scipy.sparse import lil_matrix, csr_matrix
from numpy import *
class Tree(object):
""" Instances of the Tree class contain the topological information
of a tree discharge: i.e. they encapsulate the relations between different
segments in a tree but not about locations, conductivities etc.."""
def __init__(self):
# We must carry a global (tree-level) index to access the parameter
# data arrays
self.n = 0
self.segments = []
self.root = None
def add_segment(self, segment):
""" Adds a *segment* to this tree. Returns the index of the segment
inside the tree. """
if self.n == 0:
self.root = segment
self.segments.append(segment)
index = self.n
self.n += 1
return index
def parents(self, root_index=0):
""" Builds an array with the indices to each segment's parent.
The root segment gets an index *root_index*. """
p = zeros((self.n,), dtype='i')
for i, segment in enumerate(self.segments):
try:
p[i] = segment.parent.index
except AttributeError:
p[i] = root_index
return p
def make_root(self):
""" Creates a segment node to be root of this tree. """
root = Segment()
root.set_tree(self)
self.root = root
return root
def terminals(self):
""" Finds all segments contained in the tree that do not
have any children. Returns an array with segment indices. """
l = []
for i, segment in enumerate(self.segments):
if not segment.children:
l.append(i)
return array(l)
def branches(self):
""" Finds all indices of segments that branch in the tree"""
l = []
for i, segment in enumerate(self.segments):
if len(segment.children) > 1:
l.append(i)
return array(l)
def extend(self, indices):
""" Extends the tree adding one children to each segment indexed
by *indices*, in that order. This is used to extend a propagating tree.
"""
for i in indices:
new_segment = Segment()
self.segments[i].add_child(new_segment)
def zeros(self, dim=None):
""" Returns an array that can hold all the data needed for a variable
in this tree's segments. For multi-dimension data, use *dim*. """
if dim is None:
return zeros((self.n,))
else:
return zeros((self.n, dim))
def lengths(self, endpoints):
""" Returns an array with the segment lengths of the tree, given
an array with the *endpoints*. """
parents = self.parents()
l = sqrt(sum((endpoints - endpoints[parents, :])**2, axis=1))
return l
def midpoints(self, endpoints):
""" Returns an array with the segment midpoints of the tree, given
an array with the *endpoints*. """
parents = self.parents()
return 0.5 * (endpoints + endpoints[parents, :])
def ohm_matrix(self, endpoints, fix=[]):
""" Builds a matrix M that will provide the evolution of charges
in every segment of the tree as dq/dt = M . phi, where phi is
the potential at the center of each segment and '.' is the dot product.
This function builds the matrix from scratch. Usually it is much
better to keep updating the matrix as the tree grows.
* *endpoints* must contain an array with the endpoints.
* *fix* contains an array with indices of nodes with a fixed charge.
usually that means the root node.
"""
l = self.lengths(endpoints)
linv = 1.0 / l
# We build the matrix in LIL format first, later we convert to a
# format more efficient for matrix-vector multiplications
M = lil_matrix((self.n, self.n))
for segment in self:
i = segment.index
m = 0.0
for other in segment.children:
j = other.index
M[i, j] = linv[j]
m -= linv[j]
if segment.parent is not None:
j = segment.parent.index
M[i, j] = linv[i]
m -= linv[i]
M[i, i] = m
for f in fix:
M[f, :] = 0
return csr_matrix(M)
def branch_label(self, labels=None, label=1, segment=None):
""" Returns an array with an integer for each node that is unique
for the branch where it sits. """
if labels is None:
labels = zeros((self.n,), dtype='i')
if segment is None:
segment = self.root
while True:
labels[segment.index] = label
if len(segment.children) != 1:
break
segment = segment.children[0]
for i, c in enumerate(segment.children):
self.branch_label(labels, label=2*label + i, segment=c)
return labels
def branch_distance(self, endpoints, dist=None, segment=None,
lengths=None):
""" Returns an array with the distance of each node from the
branching immediately above it. The distance is calculated along the
branch. """
if dist is None:
dist = zeros((self.n,), dtype='d')
if segment is None:
segment = self.root
if lengths is None:
lengths = self.lengths(endpoints)
l = 0
while True:
dist[segment.index] = l
l += lengths[segment.index]
if len(segment.children) != 1:
break
segment = segment.children[0]
for i, c in enumerate(segment.children):
self.branch_distance(endpoints, dist, segment=c, lengths=lengths)
return dist
def reconnects(self, endpoints, rmin=5e-4, dmin=1e-3):
""" Finds reconnections in a tree. """
term = array(self.terminals())
rterm = endpoints[term, :]
labels = self.branch_label()
lterm = labels[term]
dist = self.branch_distance(endpoints)
dterm = dist[term]
# We look only at node pairs where one of the node is a terminal.
r2 = sum((rterm[newaxis, :, :] - endpoints[:, newaxis, :])**2, axis=2)
dlabel = lterm[newaxis, :] - labels[:, newaxis]
# These still include branching events, which are very close but
# close to the branching points
s = logical_and(dlabel != 0, r2 <= rmin**2)
t = logical_and(dterm[newaxis, :] > dmin, dist[:, newaxis] > dmin)
u = logical_and(s, t)
i, j = nonzero(u)
return len(i) > 0
def save(self, fname):
""" Saves the tree structure into file fname. """
parents = self.parents()
i = arange(self.n)
savetxt(fname, c_[i, parents])
@staticmethod
def loadtxt(fname):
""" Loads a tree structure from a txt file [DEBUG]. """
indices, parents = loadtxt(fname, unpack=True)
return Tree.from_parents(parents)
@staticmethod
def from_parents(parents):
""" Builds a tree from a list of the parent indices. """
t = Tree()
indices = arange(parents.shape[0])
for i in indices:
if i == 0:
t.make_root()
else:
seg = Segment()
t.segments[parents[i]].add_child(seg)
return t
def __iter__(self):
return iter(self.segments)
class Segment(object):
""" This is class of the segments composing a :class:`Tree`. """
def __init__(self):
self.children = []
self.parent = None
self.tree = None
def set_tree(self, tree):
self.tree = tree
self.index = tree.add_segment(self)
def set_parent(self, parent):
self.parent = parent
self.set_tree(parent.tree)
def get(self, a):
""" Gets the value in array a corresponding to this segment. """
return a[self.index]
def set(self, a, value):
""" Sets the value in array a corresponding to this segment. """
a[self.index] = value
def iter_adjacent(self):
""" Iterates over all adjacent segments, including parent and
children (if any). """
if self.parent is not None:
yield self.parent
for child in self.children:
yield child
def add_child(self, other):
""" Adds the :class:`Segment` *other* as a child of this segment. """
other.set_parent(self)
self.children.append(other)
def random_branching_tree(n, p):
""" Builds a branched tree of n segments where every segment has a
probability p of having two descendants. This produces nice pictures
and can be useful for testing. """
tree = Tree()
root = tree.make_root()
leafs = [root]
for i in xrange(n):
l = leafs.pop(0)
# Every leaf has at least one descendant
s = Segment()
l.add_child(s)
leafs.append(s)
# With probability p it has two children
if random.uniform() < p:
s = Segment()
l.add_child(s)
leafs.append(s)
return tree
def sample_endpoints(tree):
""" Gives endpoints to a tree structure. Useful for plotting sample
trees [DEBUG]. """
r = tree.zeros(dim=3)
deltav = {1: array([[0, 0, -1]]),
2: array([[-1, 0, -1], [1, 0, -1]])}
def recurse(leaf, v):
if leaf.parent is None:
leaf.set(r, (0, 0, 0))
else:
leaf.set(r, leaf.parent.get(r) + v)
n = len(leaf.children)
lr = leaf.get(r)
for i, child in enumerate(leaf.children):
vnew = (v * array([0.9, 1.0, 0.95]) +
(deltav[n][i] + random.uniform(-0.1, 0.1, size=3))
* exp(lr[1] / 100))
recurse(child, vnew)
recurse(tree.root, array([0, 0, 0]))
return r
def test():
import pylab
tree = random_branching_tree(1000, 0.05)
r = sample_endpoints(tree)
for segment in tree:
ep = segment.get(r)
try:
ip = segment.parent.get(r)
except AttributeError:
ip = array([0, 0, 0])
pylab.plot([ip[0], ep[0]], [ip[2], ep[2]], lw=0.8, c='k')
pylab.show()
if __name__ == '__main__':
test()
| 11,005 | 26.722922 | 80 | py |
strees | strees-master/refinement.py | """ This module implement the structure of oct-trees for the implementation
of the Fast Multipolar Method (FMM).
. note::
Most of this code is not used, since we disabled the use of the FMM for
the simulations reported in the paper. However, the simulated tree is
embedded in a bounding box defined by a :class:`Box` instance.
"""
import timeit
from itertools import product
from numpy import *
try:
import pylab
from matplotlib.patches import Rectangle
except ImportError:
pass
import mpolar
INWARD, INOUT, OUTWARD = -1, 0, 1
class Box(object):
""" Class of 3d boxes. Each box can be linked to a set of charges."""
def __init__(self, r0, r1, parent=None, rel_coords=None,
electrode=None):
""" Initializes a box. r0 contains the smaller (x, y, z) coordinates
and r1 the largest (x, y, z).
"""
self.r0 = r0
self.r1 = r1
self.center = 0.5 * (r0 + r1)
self.lengths = (r1 - r0)
self.children = []
self.parent = parent
self.outward, self.inward = None, None
if parent is not None:
self.level = parent.level + 1
else:
self.level = 0
if rel_coords is None or parent is None:
self.coords = array([0, 0, 0])
else:
self.coords = parent.coords * 2 + rel_coords
self.rf = None
self.electrode = electrode
def clear(self, recurse=True):
self.outward, self.inward = None, None
if recurse:
for child in self.children:
child.clear()
def refine(self):
""" Creates the 8 children of the box. """
for t in ndindex(2, 2, 2):
r0 = self.r0 + array(t) * self.lengths / 2
r1 = r0 + self.lengths / 2
self.children.append(Box(r0, r1, parent=self,
rel_coords=array(t)))
def set_charges(self, r, q, max_charges=None, min_length=0,
evaluation=True):
""" Sets the charges of this box.
If max_charges is not None, refines the box into smaller children
until each leaf box contains no more than max_charges.
If evaluation is true, assumes that the charge points will also
be the evaluation points.
"""
self.r = r
self.q = q
self.n = len(q)
if evaluation:
self.rv = self.r
self.phi = zeros((self.r.shape[0],))
# We do the reflection that implements the image charge method after
# setting the evaluation points because we are not usually interested
# in evaluating "image fields".
if self.electrode:
self.r, self.q = self.electrode.extend(self.r, self.q)
self.n = len(self.q)
if (max_charges is not None and self.n > max_charges
and 2 * min_length < self.lengths[0]):
indices = self._indices(self.r)
if not self.children:
self.refine()
self.flt = empty([8, len(self.q)], dtype=bool)
if evaluation:
# Note that when electrode != None self.q and q are not the same
# (the former includes the reflections). To evaluate only
# at self.rv we create a view of the first half of flt.
self.fltv = self.flt[:, :len(q)].view()
for i, child in enumerate(self.children):
self.flt[i, :] = (indices == i)
child.set_charges(self.r[self.flt[i, :], :],
self.q[self.flt[i, :]],
max_charges=max_charges,
evaluation=evaluation)
def update_charges(self, q):
""" Recursively re-set the charges contained in the box. This
keeps the refinement oct-tree. """
if self.electrode is not None:
q = r_[q, self.electrode.images_q(self.r, q)]
self.q[:] = q
for i, child in enumerate(self.children):
child.update_charges(q[self.flt[i, :]])
def set_evaluation(self, rv):
""" Recursively sets the points where the potential will be evaluated.
"""
self.rv = rv
self.phi = zeros((self.rv.shape[0],))
indices = self._indices(rv)
self.fltv = empty([8, len(self.phi)], dtype=bool)
for i, child in enumerate(self.children):
self.fltv[i, :] = (indices == i)
child.set_evaluation(rv[self.fltv[i, :], :])
def set_field_evaluation(self, rf):
""" Recursively sets the points where the fields will be evaluated.
"""
self.rf = rf
self.field = zeros((self.rf.shape[0], 3))
indices = self._indices(rf)
self.fltf = empty([8, self.rf.shape[0]], dtype=bool)
for i, child in enumerate(self.children):
self.fltf[i, :] = (indices == i)
#print self.fltf[i, :]
if any(self.fltf[i, :]):
child.set_field_evaluation(rf[self.fltf[i, :], :])
def _indices(self, r):
""" Finds the child indices (from 0 to 7) of the points at r. """
bits = (2 * (r - self.r0[newaxis, :]) / self.lengths[newaxis, :])
bits = bits.astype('i')
# We do not want to exclude the boundaries with higher values
bits = where(bits < 2, bits, 1)
p2 = array([4, 2, 1])
# This is the index of the chid where each charge is sitting
return dot(bits, p2)
def collect_solutions(self, field=False):
""" Collects the solution for each of the box's children. """
for i, child in enumerate(self.children):
child.collect_solutions(field=field)
# In newer versions of numpy this gives a ValueError exception
# here I am just ignoring it in order to maintain perfect
# compatibility with the results in the paper. There phi
# is anyhow ignored since it we only use these routines for the
# electric field. This bug is corrected in newer commits.
try:
self.phi[self.fltv[i, :]] = child.phi
except ValueError:
pass
if field and child.rf is not None:
# Note that if child contains field evaluation points,
# then self do too.
self.field[self.fltf[i, :], :] = child.field
def is_near_neighbour(self, other):
""" Checks whether other is a near neighbour of this box.
Note that they must belong to the same oct-tree or the algorithm
fails.
"""
if self.level != other.level:
return False
return mpolar.are_near_neighbours(self.coords, other.coords)
#absdif = abs(self.coords - other.coords)
# Note that every box is considered a near-neighbour of herself.
#return all(absdif <= 1)
def is_well_separated(self, other):
return not self.is_near_neighbour(other)
def build_lists(self, recurse=False):
""" Builds the lists of near-neighbours and the interaction list
of this box. Assumes that the near-neighbours are already calculated
up in the tree. """
self.interaction_list = []
self.neighbours = [self]
if self.parent is not None:
for other in self.parent.neighbours:
if not other.children:
# Here we must count the direct interaction between boxes
# at different levels.
self.neighbours.append(other)
for child in other.children:
if self.is_well_separated(child):
self.interaction_list.append(child)
elif self != child:
self.neighbours.append(child)
if recurse:
for child in self.children:
child.build_lists(recurse=True)
def expand(self, p):
""" Directly calculates the multipolar expansion of this box
around its center. """
self.outward = mpolar.expand(p, self.r - self.center[newaxis, :],
self.q, OUTWARD)
def collect(self):
""" Collects the multipolar expansions of this box's children,
translate them to the center and sums them. """
for i, t in enumerate(ndindex(2, 2, 2)):
rshift = (t - array([0.5, 0.5, 0.5])) * self.lengths / 2
M_child = mpolar.shift(rshift, OUTWARD, self.children[i].outward)
self.outward += M_child
def upward(self, p):
""" Goes through the oct-tree. For leaves of the tree, directly
calculates the multipole expansion; for nodes with descendants
calculates the expansion by adding the children's expansion.
This is called "Upward Pass" in the Greengard papers.
"""
# Here we build an outward and an inward expansion for each box
# that are initially set to zero
self.inward = zeros((p, p), dtype='complex128')
self.outward = zeros((p, p), dtype='complex128')
if not self.children:
self.expand(p)
return
for child in self.children:
child.upward(p)
self.collect()
def collect_inward(self):
""" Calculates the inward (local) expansions of all boxes in the
interaction list and adds them. """
for other in self.interaction_list:
rshift = other.center - self.center
M = mpolar.shift(rshift, INOUT, other.outward)
# mpolar.accum(self.inward,
# mpolar.shift(rshift, INOUT, other.outward))
self.inward[:, :] += M
def eval_subtree(self, other):
""" DEBUG purposes only. """
#self.phi += mpolar.eval_array(other.outward,
# self.r - other.center[:, newaxis],
# OUTWARD)
self.phi += mpolar.direct(other.r, other.q, self.r)
for child in self.children:
child.eval_subtree(other)
def downward(self):
""" Performs the "Downward Pass" of the Greengard papers. """
self.collect_inward()
for child in self.children:
rshift = child.center - self.center
child.inward[:, :] += mpolar.shift(-rshift, INWARD, self.inward)
child.downward()
def solve(self, a, field=False):
""" Once we have the local expansion for the box and the list
of near-neighbours, we can finally evaluate the potential.
Note that generally this function is called only for leaf nodes. """
self.phi[:] = mpolar.eval_array(self.inward,
self.rv - self.center[newaxis, :],
INWARD)
for other in self.neighbours:
self.phi += mpolar.direct(other.r, other.q, self.rv, a)
if field and self.rf is not None:
self.field[:, :] = mpolar.eval_field_array(
self.inward, self.rf - self.center[newaxis, :],
INWARD)
for other in self.neighbours:
self.field += mpolar.field_direct(other.r, other.q, self.rf, a)
def solve_all(self, a=0.0, **kwargs):
""" Calls solve for the leaf nodes of the sub-tree rooted at self. """
if not self.children:
self.solve(a, **kwargs)
else:
for child in self.children:
child.solve_all(a, **kwargs)
def plot(self, dims=[0, 1], recurse=False, **kwargs):
r0 = self.r0[dims]
r1 = self.r1[dims]
lx, ly = r1 - r0
rect = Rectangle(r0, lx, ly, **kwargs)
pylab.gca().add_patch(rect)
if recurse:
for child in self.children:
child.plot(dims=dims, recurse=True, **kwargs)
def scatter(self, dims=[0, 1], **kwargs):
if len(self.phi) == 0:
return
x = self.rv[:, dims[0]]
y = self.rv[:, dims[1]]
pylab.scatter(x, y, c=self.phi, **kwargs)
def scatter_leafs(self, *args, **kwargs):
if not self.children:
self.scatter(*args, **kwargs)
else:
for child in self.children:
child.scatter_leafs(*args, **kwargs)
def __str__(self):
#return str(self.coords)
return "(%s)@%d" % (str(self.coords), self.level)
def containing_box(r, electrode=None):
""" Builds a Box object that contains all k points in r[3, k].
The Box has to be a perfect cube for the FMM to work.
If reflect is True, inludes also the reflection of all points
over the z=0 plane.
"""
if electrode is not None:
r = concatenate((r, electrode.images_r(r)), axis=0)
rmin = amin(r, axis=0)
rmax = amax(r, axis=0)
lengths = rmax - rmin
center = 0.5 * (rmax + rmin)
sides = amax(lengths) * ones((3,))
r0 = center - sides / 2
r1 = center + sides / 2
return Box(r0, r1, electrode=electrode)
def main():
k = 1200
r = random.uniform(-1, 1, size=(k, 3))
q = random.uniform(-1.0, 1.0, size=k)
# Let's make things simpler
#r[2, :] = pi / 5
q[:] = 1.0
r0 = array([-1.0, -1.0, -1.0])
r1 = array([1.0, 1.0, 1.0])
#pylab.plot(r[0, :], r[1, :], 'o', mfc='k', mec='k')
box = Box(r0, r1)
box.plot(recurse=True, fill=False)
pylab.xlim([-1, 1])
pylab.ylim([-1, 1])
box.set_charges(r, q, max_charges=5)
box.build_lists(recurse=True)
box.upward(15)
box.downward()
box.solve_all()
box.collect_solutions()
phi = mpolar.direct(r, q, r, 0.0)
box.scatter_leafs(vmin=0, vmax=1200)
pylab.colorbar()
# Let's compare with the exact solution
pylab.figure(2)
box.plot(recurse=True, fill=False)
pylab.xlim([-1, 1])
pylab.ylim([-1, 1])
pylab.scatter(r[:, 0], r[:, 1],
c=phi, vmin=0, vmax=1200)
pylab.colorbar()
err = sqrt(sum((phi - box.phi)**2)) / k
savetxt("cmp.txt", c_[phi, box.phi])
print "Error = %g" % err
pylab.show()
if __name__ == '__main__':
main()
| 14,659 | 29.541667 | 80 | py |
strees | strees-master/solve_tree.py | import hotshot, hotshot.stats
import time
from numpy import *
import pylab
from matplotlib import cm
import tree
import mpolar
from refinement import Box
def main():
p = 5
k = 50000
t0 = time.time()
tr = tree.random_branching_tree(k, 0.05)
r = tree.sample_endpoints(tr)
r0 = amin(r, axis=0)
r1 = amax(r, axis=0)
r = (r - r0) / (r1 - r0)
r0 = amin(r, axis=0)
r1 = amax(r, axis=0)
q = ones((r.shape[0],))
t1 = time.time()
# Let's play with trees
parents = tr.parents()
l = sqrt(sum((r - r[parents, :])**2, axis=1))
print l.shape
box = Box(r0, r1)
box.set_charges(r.T, q, max_charges=200)
box.build_lists(recurse=True)
box.upward(p)
box.downward()
box.solve_all()
box.collect_solutions()
t2 = time.time()
phi = mpolar.direct(r.T, q, r.T, 0.0)
t3 = time.time()
savetxt("cmp.txt", c_[phi, box.phi])
eps = sqrt(sum((phi - box.phi)**2) / (k * (k - 1)))
print ("t(setup) = %g s t(multipol) = %g s t(direct) = %g s"
% (t1 - t0, t2 - t1, t3 - t2))
print "TOTAL: %g" % (t3 - t0)
print "ERROR: %g" % eps
# box.plot(dims=[0, 2], recurse=True, fill=False)
# plot_tree(tr, r, box.phi)
# pylab.figure(2)
# plot_tree(tr, r, phi)
# pylab.show()
def plot_tree(tr, r, phi):
vmin, vmax = amin(phi), amax(phi)
cmap = cm.get_cmap('jet')
#cmap.set_clim(vmin=vmin, vmax=vmax)
for segment in tr:
ep = segment.get(r)
try:
ip = segment.parent.get(r)
except AttributeError:
ip = ep
c = cmap((segment.get(phi) - vmin) / (vmax - vmin))
pylab.plot([ip[0], ep[0]], [ip[2], ep[2]], lw=1.2, c=c)
if __name__ == '__main__':
prof = hotshot.Profile("solve_tree.prof")
prof.runcall(main)
prof.close()
stats = hotshot.stats.load("solve_tree.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
| 2,010 | 19.731959 | 66 | py |
strees | strees-master/relax_tree.py | import time
from numpy import *
from scipy.sparse import lil_matrix, csr_matrix
from scipy.integrate import odeint, ode
import pylab
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import tree
import mpolar
from refinement import Box
def main():
p = 5
k = 5000
tr = tree.random_branching_tree(k, 0.05)
r = tree.sample_endpoints(tr)
r0 = amin(r, axis=0)
r1 = amax(r, axis=0)
r = (r - r0) / (r1 - r0)
r0 = amin(r, axis=0)
r1 = amax(r, axis=0)
# Let's play with trees
parents = tr.parents()
l = sqrt(sum((r - r[parents, :])**2, axis=1))
rmid = 0.5 * (r + r[parents, :])
M = tree_matrix(tr, l)
t = linspace(0, 5e-3, 100)
q0 = zeros((r.shape[0],))
f = build_func(tr, M, rmid.T, r0, r1,
array([0.0, 0.0, -1.0]),
a=0.01, for_ode=True)
# qt = odeint(f, q0, t, atol=1e-1)
d = ode(f).set_integrator('dopri5', method='bdf')
d.set_initial_value(q0, 0.0)
dt = t[1] - t[0]
qt = zeros((len(t) + 1, len(q0)))
i = 0
while d.successful() and d.t < t[-1]:
i += 1
d.integrate(d.t + dt)
print d.t
qt[i, :] = d.y
animate(tr, r0, r1, rmid, qt, array([0.0, 0.0, -1.0]))
def build_func(tr, M, r, r0, r1, e0, a=0.0, p=4, for_ode=False):
iterm = tr.terminals()
def f(q, t0):
box = Box(r0, r1)
box.set_charges(r, q, max_charges=200)
box.set_field_evaluation(r[:, iterm])
box.build_lists(recurse=True)
box.upward(p)
box.downward()
box.solve_all(a=a, field=True)
box.collect_solutions(field=True)
return M.dot(box.phi - dot(r.T, e0))
def f_ode(t0, q):
return f(q, t0)
return f if not for_ode else f_ode
def animate(tr, r0, r1, r, qt, e0):
nt, n = qt.shape
vmin, vmax = amin(qt), amax(qt)
iterm = tr.terminals()
ax = pylab.gcf().add_subplot(111)
#ax = pylab.gcf().add_subplot(111, projection='3d')
for i in xrange(nt):
box = Box(r0, r1)
box.set_charges(r.T, qt[i, :], max_charges=200)
# box.set_evaluation(array([]))
box.set_field_evaluation(r.T[:, iterm])
box.build_lists(recurse=True)
box.upward(4)
box.downward()
box.solve_all(a=0.01, field=True)
box.collect_solutions(field=True)
field = box.field + e0[:, newaxis]
#pylab.clf()
#plot_tree(tr, r, qt[i, :], vmin=vmin, vmax=vmax)
ax.clear()
ax.scatter(r[:, 0], r[:, 2], c=qt[i, :],
s=5.0, faceted=False,
vmin=vmin, vmax=vmax)
ax.quiver(r[iterm, 0], r[iterm, 2], field[0, :], field[2, :])
ax.set_xlim([0.0, 1.0])
ax.set_ylim([-0.2, 1.0])
pylab.savefig('tree_%.3d.png' % i)
print 'tree_%.3d.png' % i
def tree_matrix(tr, l):
""" Builds a matrix M that will give use the evolution of charges
in every segment of the tree as dq/dt = M . phi, where phi is
the potential at the center of each segment and '.' is the dot product.
"""
n = l.shape[0]
# We build the matrix in LIL format first, later we convert to a
# format more efficient for matrix-vector multiplications
M = lil_matrix((n, n))
for segment in tr:
i = segment.index
m = 0.0
for other in segment.iter_adjacent():
j = other.index
a = 2.0 / (l[i] + l[j])
M[i, j] = a
m -= a
M[i, i] = m
return csr_matrix(M)
def plot_tree(tr, r, phi, vmin=None, vmax=None):
if vmin is None:
vmin = amin(phi)
if vmax is None:
vmax = amax(phi)
cmap = cm.get_cmap('jet')
#cmap.set_clim(vmin=vmin, vmax=vmax)
for segment in tr:
ep = segment.get(r)
try:
ip = segment.parent.get(r)
except AttributeError:
ip = ep
#print segment.get(phi)
c = cmap((segment.get(phi) - vmin) / (vmax - vmin))
pylab.plot([ip[0], ep[0]], [ip[2], ep[2]], lw=1.2, c=c)
if __name__ == '__main__':
main()
| 4,196 | 23.982143 | 75 | py |
strees | strees-master/datafile.py | """ This module contains the code to save/retrieve the state of a simulation.
"""
import sys
import time
from contextlib import closing
from numpy import *
import h5py
from tree import Tree
class DataFile(object):
""" Class to store and retrieve simulation data. """
def __init__(self, fname, parameters):
self.fp = h5py.File(fname, 'w')
self.main = self.fp.create_group('main')
# We always write at least these two metadata
self.main.attrs['command'] = ' '.join(sys.argv)
self.main.attrs['timestamp'] = time.time()
self.main.attrs['ctime'] = time.ctime()
for key, item in parameters.iteritems():
print key, item
self.main.attrs[key] = item
self.step = 0
def add_step(self, t, tree, r, q, phi, **kwargs):
""" Adds a step to the file. """
g = self.main.create_group('%.5d' % self.step)
self.step += 1
g.attrs['timestamp'] = time.time()
g.attrs['t'] = t
g.create_dataset('r', data=r, compression='gzip')
g.create_dataset('q', data=q, compression='gzip')
g.create_dataset('phi', data=phi, compression='gzip')
parents = tree.parents()
g.create_dataset('parents', data=parents, compression='gzip')
for key, item in kwargs.iteritems():
g.create_dataset(key, data=item, compression='gzip')
self.fp.flush()
def close(self):
self.fp.close()
def load_tree(file, step):
""" Loads a tree from file and returns a tree object and the endpoints.
file can be either an open h5file handler or a string with the filename.
"""
try:
# Let's assume that we received an open h5file
parents = array(file['main/%s/parents' % step])
r = array(file['main/%s/r' % step])
tr = Tree.from_parents(parents)
return tr, r
except TypeError:
with closing(h5py.File(file)) as fp:
tr, r = load_tree(fp, step)
return tr, r
| 2,051 | 26.72973 | 80 | py |
strees | strees-master/errors.py | """ Calculation of the errors in solveing the potential and dq/dt. """
from numpy import *
from numpy.linalg import norm
import h5py
import pylab
from refinement import Box, containing_box
import mpolar
import datafile
from plotter import plot_projections, bounding_box
def main():
from optparse import OptionParser
from contextlib import closing
parser = OptionParser()
(opts, args) = parser.parse_args()
steps = args[1:]
fname = args[0]
for step in steps:
with closing(h5py.File(fname)) as fp:
tr, r = datafile.load_tree(fp, step)
q = array(fp['main'][step]['q'])
M = tr.ohm_matrix(r)
r = r.T
CONDUCTOR_THICKNESS = fp['main'].attrs['conductor_thickness']
MAX_CHARGES_PER_BOX = fp['main'].attrs['max_charges_per_box']
MULTIPOLAR_TERMS = fp['main'].attrs['multipolar_terms']
EXTERNAL_FIELD = fp['main'].attrs['external_field']
EXTERNAL_FIELD_VECTOR = array([0.0, 0.0, EXTERNAL_FIELD])
box = containing_box(r, reflect=True)
box.set_charges(r, q,
max_charges=MAX_CHARGES_PER_BOX,
min_length=16 * CONDUCTOR_THICKNESS)
box.build_lists(recurse=True)
box.update_charges(q)
box.upward(MULTIPOLAR_TERMS)
box.downward()
box.solve_all(a=CONDUCTOR_THICKNESS, field=True)
box.collect_solutions(field=True)
phi = mpolar.direct(r, q, r, CONDUCTOR_THICKNESS)
dq0 = M.dot(phi - dot(r.T, EXTERNAL_FIELD_VECTOR))
dq1 = M.dot(box.phi - dot(r.T, EXTERNAL_FIELD_VECTOR))
pylab.figure(figsize=(7, 14))
pylab.subplot(2, 1, 1)
pylab.plot(abs(dq0), abs(dq0 - dq1), 'o', mew=0, ms=2.5,
label="abs error")
pylab.loglog()
pylab.legend()
pylab.subplot(2, 1, 2)
pylab.plot(abs(dq0), abs((dq0 - dq1) / dq0), 'o', mew=0,
ms=2.5, label="rel error")
pylab.loglog()
pylab.legend()
pylab.figure(figsize=(12, 10))
r0, r1 = bounding_box(r)
plot_projections(r.T, abs(dq0 - dq1), r0, r1, log=False)
pylab.show()
if __name__ == '__main__':
main()
| 2,418 | 29.2375 | 73 | py |
strees | strees-master/expand.py | import sys
from readinput import expand_input
import parameters
def main():
for ifile in sys.argv[1:]:
expand_input(ifile, parameters)
if __name__ == '__main__':
main()
| 191 | 11.8 | 39 | py |
strees | strees-master/readinput.py | """ This is an auxiliary module for reading input .ini files.
It is based on Python's stdlib
`ConfigParser <http://docs.python.org/2/library/configparser.html/>`_
This module also provides functionality for setting run sets where
one or more of the parameters run over a list of values.
"""
import os, os.path, socket
from ConfigParser import SafeConfigParser, RawConfigParser, NoOptionError
from warnings import warn
from itertools import product
import re
import numpy
def guess_type(s):
""" Converts s into int or float if possible. If not, leave it as a
string. This will give us problems if the user wants to e.g. use
filenames or run names that can be parser as a number. So have to
implement a better approach whereby parameter names are associated with
types.
"""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
# These are decorators to check allowed values.
def positive(func):
""" A decorator to constraint the parameter to positive values. """
def f(s):
r = func(s)
if not r >= 0:
raise ValueError("%s must be positive" % func.func_name)
return r
f.__doc__ = func.__doc__
f.func_name = func.func_name
return f
# These are decorators to check allowed values.
def nonnegative(func):
""" A decorator to constraint the parameter to nonnegative values. """
def f(s):
r = func(s)
if r < 0:
raise ValueError("%s must be positive" % func.func_name)
return r
f.__doc__ = func.__doc__
f.func_name = func.func_name
return f
# A decorator to set a default value, stored in the dictionary PARAM_DEFAULTS
PARAM_DEFAULTS = {}
def default(value):
def deco(func):
global PARAM_DEFAULTS
PARAM_DEFAULTS[func.func_name] = value
return func
return deco
def load_input(fname, parameters, d=None, upper=False, raw=False):
""" Loads an input file and stores its values in the dictionary
d. If upper is true, transforms the parameter names to upper case. """
if d is None:
d = dict()
config = SafeConfigParser()
defaults = dict(home=os.environ['HOME'],
user=os.environ['LOGNAME'],
cwd=os.getcwd(),
input=os.path.splitext(os.path.basename(fname))[0],
input_dir=os.path.split(os.path.realpath(fname))[0],
hostname=socket.gethostname())
config.read(fname)
for section in ['global', 'parameters']:
for name, value in config.items(section, vars=defaults, raw=raw):
print name, repr(value)
try:
func = getattr(parameters, name)
value = expand(value, func)
print "%-30s =\t%-10s [%s]" % (name, repr(value), func.__doc__)
except AttributeError:
warn("'%s' is not defined as a parameter." % name)
value = guess_type(value)
if upper:
name = name.upper()
d[name] = value
r = PARAM_DEFAULTS.copy()
r.update(d)
return r
RE_LIST = re.compile(r'@\((.+)\)')
RE_LOOP = re.compile(r'@@\(([\d.-]+):([\d.-]+)(:([\d.-]+))?\)')
def expand(s, parser):
""" Expands special characters to produce e.g. lists of many parameters.
"""
# All expansions start with the symbol '@':
if not '@' in s:
return parser(s)
m = RE_LIST.match(s)
if m:
return [parser(x) for x in m.group(1).split()]
m = RE_LOOP.match(s)
if m:
if m.group(4) is not None:
a, b, c = parser(m.group(1)), parser(m.group(2)), parser(m.group(4))
else:
a, b, c = parser(m.group(1)), parser(m.group(2)), None
r = numpy.arange(a, b, c)
print r
print [parser(x) for x in r]
return [parser(str(x)) for x in r]
def expand_dict(d):
""" Takes a dictionary that may contain a few lists as values and returns
an iterator over dictionaries where each of these elements is iterated. """
keys, lists = zip(*[(k, v) for k, v in d.iteritems()
if isinstance(v, list)])
for tpl in product(*lists):
d0 = d.copy()
for k, v in zip(keys, tpl):
d0[k] = v
yield d0
def expand_input(ifile, parameters):
""" Expands an input file by expanding some of its arguments.
"""
d = load_input(ifile, parameters, d={}, upper=False, raw=True)
base = os.path.splitext(ifile)[0]
config = RawConfigParser()
config.read(ifile)
l = []
for i, d0 in enumerate(expand_dict(d)):
fname = '%s_%.4d.ini' % (base, i)
l.append(fname)
with open(fname, 'w') as fp:
for k, v in d0.iteritems():
for sect in config.sections():
try:
old = config.get(sect, k)
config.set(sect, k, v)
except NoOptionError:
pass
config.write(fp)
return l
def main():
import sys
import parameters
expand_input(sys.argv[1], parameters)
# d = load_input(sys.argv[1], parameters, d={}, upper=True)
# for d0 in expand_dict(d):
# print d0
if __name__ == '__main__':
main()
| 5,485 | 26.707071 | 80 | py |
strees | strees-master/electrodes.py | """ Implementation of electrode geometries with the image charge method.
"""
from numpy import *
X, Y, Z = 0, 1, 2
class Electrode(object):
def __init__(self):
pass
def images(self, r, q):
""" Calculates the images of charges q located at points r.
Returns rimag, qimag. """
raise NotImplementedError
def extend(self, r, q):
rimag, qimag = self.images(r, q)
return concatenate((r, rimag), axis=0), r_[q, qimag]
class NullElectrode(object):
""" Simply a trick to simulate no electrode. """
def images(self, r, q):
return zeros((0, 3)), zeros((0,))
class Planar(Electrode):
def __init__(self, x0, axis=Z):
""" Inits a plane electrode perpendicular to the axis axis, located
at x0. """
self.x0 = x0
self.u = ones((3, ))
self.u[axis] *= -1
def images(self, r, q):
return self.images_r(r), self.images_q(r, q)
def images_q(self, r, q):
return -q
def images_r(self, r):
return 2 * self.x0 + r * self.u
class Sphere(Electrode):
def __init__(self, center, a):
""" A spherical electrode centered at center and with radius a. """
self.center = center
self.a = a
self.a2 = a * 2
def images(self, r, q):
# See e.g. Jackson, 2.2
dr = r - self.center[newaxis, :]
y = sqrt(sum(dr**2, axis=1))
yp = self.a2 / y
qp = -q * self.a / y
rp = self.center[newaxis, :] + dr * yp / y
return rp, qp
def images_q(self, r, q):
dr = r - self.center[newaxis, :]
y = sqrt(sum(dr**2, axis=1))
return (-q * self.a / y)
def images_r(self, r):
dr = r - self.center[newaxis, :]
y = sqrt(sum(dr**2, axis=1))
yp = self.a2 / y
rp = self.center[newaxis, :] + dr * yp / y
return rp
| 1,946 | 23.037037 | 75 | py |
strees | strees-master/cst.py | """ Implementation of the charge simulation technique. """
import sys
from numpy import *
import pylab
import h5py
from scipy.sparse.linalg import LinearOperator, bicgstab, bicg, gmres
from refinement import Box, containing_box
import mpolar
from plotter import bounding_box, plot_projections
def paraboloid(a, ztip, nz, ntheta, phase_shift=False):
""" Generates points in a paraboloid z - z0 = a * r**2 with ntheta points
in each circular cross-section located at points z. If phase_shift is
True the points are phase-shifted pi / ntheta radians. """
theta = linspace(0, 2 * pi, ntheta)[:-1]
z = linspace(ztip, 0, nz + 1)[1:-1]
if phase_shift:
theta = theta + (theta[1] - theta[0]) / 2
rho = sqrt((z - ztip) / a)
rx = rho * cos(theta[:, newaxis])
ry = rho * sin(theta[:, newaxis])
rz = z + 0 * theta[:, newaxis]
return c_[r_[0, ravel(rx)], r_[0, ravel(ry)], r_[ztip, ravel(rz)]].T
def direct_with_electrode(r, q, reval, conductor_thickness):
u = array([1, 1, -1])[:, newaxis]
rx = concatenate((r, r * u), axis=1)
qx = r_[q, -q]
phi = mpolar.direct(rx, qx, reval, conductor_thickness)
return phi
def build_linop(r, reval, conductor_thickness):
def f_phi(q):
return direct_with_electrode(r, q, reval, conductor_thickness)
return LinearOperator((reval.shape[1], r.shape[1]), f_phi, dtype='float64')
def build_poisson_matrix(r, reval, conductor_thickness):
def main():
ztip = -0.01
rp = paraboloid(800.0, ztip, 16, 16)
rp_phi = paraboloid(1.0, ztip, 16, 16, phase_shift=True)
np = rp.shape[1]
print rp.shape, rp_phi.shape
fname, step = sys.argv[1:3]
fp = h5py.File(fname, "r")
g = fp['main']
r = array(g[step]['r'])
q = array(g[step]['q'])
phi0 = array(g[step]['phi'])
CONDUCTOR_THICKNESS = fp['main'].attrs['conductor_thickness']
MAX_CHARGES_PER_BOX = fp['main'].attrs['max_charges_per_box']
MULTIPOLAR_TERMS = fp['main'].attrs['multipolar_terms']
HAS_PLANAR_ELECTRODE = fp['main'].attrs['has_plane_electrode']
print "%d charges" % len(q)
r[:, 2] += ztip
rt = concatenate((rp, r), axis=1)
# 1. Compute the potential created at the sources and in the phi points
# of the electrode
phi = direct_with_electrode(r, q, rt, CONDUCTOR_THICKNESS)
phip = phi[:np]
print phip
# 2. Solve the charges that will make phi zero in the paraboloid points.
linop = build_linop(rp, rp_phi, CONDUCTOR_THICKNESS)
qp, info = gmres(linop, -phip, tol=1e-6)
print qp
r0, r1 = bounding_box(rt)
plot_projections(rt.T, r_[qp, 0 * q], r0, r1, plot3d=True)
pylab.show()
if __name__ == '__main__':
main()
| 2,745 | 26.46 | 79 | py |
strees | strees-master/contexttimer.py | """ Using the with keyword to time code snippets. """
from contextlib import contextmanager
import time
import sys
# @contextmanager
# def ContextTimer(name="Unknown", outf=sys.stdout):
# t0 = time.time()
# outf.write("[%s ..." % name)
# outf.flush()
# yield
# t1 = time.time()
# outf.write(" completed (%f seconds)]\n" % (t1 - t0))
class ContextTimer(object):
def __init__(self, name="Unknown", outf=sys.stdout):
self.name = name
self.outf = outf
def __enter__(self):
if self.outf is not None:
self.outf.write("[%s ..." % self.name)
self.outf.flush()
self.t0 = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.t1 = time.time()
self.duration = self.t1 - self.t0
self.outf.write(" completed (%f seconds)]\n" % self.duration)
def main():
with ContextTimer("Long loop") as ct:
s = 0
for i in xrange(10000000):
s += i
print s, ct.duration
if __name__ == '__main__':
main()
| 1,093 | 18.535714 | 69 | py |
strees | strees-master/parameters.py | """ This is the parameter description file. Basically it provides a namespace
where we store a function per input parameter. Each function receives a string
and is responsible for converting to the appropriate type, checking for
allowed values. The docstring of the function is a description of the
parameter. """
from readinput import positive, nonnegative, default
def run_name(s):
""" Name of the run. """
return s
def out_file(s):
""" File name (including path) of the .h5 output file. """
return s
def desc(s):
""" A comment to describe this simulation. It is ignored by the code. """
return s
@default(-1)
def random_seed(s):
""" Seed for the random generator. If < 0 the system time is used (default)."""
return int(s)
def dummy(s):
""" This parameter is completely ignored. It is used only to produce
series of identical runs. """
@positive
def max_charges_per_box(s):
""" Maximum number of charges per box in the FMM refinement scheme. """
return int(s)
@positive
def fmm_threshold(s):
""" Threshold in the number of charges between using the direct solver \
and FMM solver. """
return int(s)
@positive
def multipolar_terms(s):
""" Order of the multipole expansion in the FMM. """
return int(s)
def external_field(s):
""" Externally applied electric field in the z direction. """
return float(s)
@positive
def conductance(s):
""" Conductance of the channels. """
return float(s)
@positive
def maxwell_factor(s):
""" Maxwell factor for the potential and electric fields. In SI units
it is 1 / 4 pi epsilon_0"""
return float(s)
@positive
def tip_mobility(s):
""" Ratio between the tip velocity of each streamer and the local field. """
return float(s)
@default(0)
@nonnegative
def tip_min_field(s):
""" Minimum field at the tip for a streamer to propagate. """
return float(s)
@default(0)
@nonnegative
def initial_nodes(s):
""" Starts the simulation with a vertical string with this number of
charged nodes separated a distance CONDUCTOR_THICKNESS. """
return int(s)
@positive
def end_time(s):
""" Final time of the simulation. """
return float(s)
@positive
def time_step(s):
""" Timestep of the simulation. """
return float(s)
@positive
def conductor_thickness(s):
""" Thickness of the conductors for the thin-wire approximation. """
return float(s)
@nonnegative
def branching_probability(s):
""" Probability that a filament branches per unit distance travelled. """
return float(s)
@positive
def branching_sigma(s):
""" Standard deviation of the branching dispacement in the symmetric gaussian branching model. """
return float(s)
@default(0)
@nonnegative
def single_branching_time(s):
""" If nonzero, performs a single branching at the given time. """
return float(s)
@default(0)
def single_branching_z(s):
""" If nonzero, performs a single branching at the given z. """
return float(s)
@default(0)
def fixed_branching_angle(s):
""" If nonzero, fixes the angle between sibling branches. """
return float(s)
@default(False)
def branch_in_xz(s):
""" If true, branches always within the XZ plane. """
return (s.lower() == 'true')
@default('null')
def electrode_geometry(s):
""" The electrode geometry. """
return s.lower()
@positive
def electrode_radius(s):
""" Radius of an spherical electrode. """
return float(s)
@default(0)
def electrode_potential(s):
""" Electrostatic potential of a (spherical) electrode. """
return float(s)
@default(1e10)
def max_step(s):
""" Longest step for a channel in dt. The timestep will be reduced
to satisfy this constraint"""
return float(s)
@default(True)
def end_with_reconnection(s):
""" If true, finishes when a reconnection is detected """
return (s.lower() == 'true')
| 3,918 | 22.467066 | 102 | py |
strees | strees-master/angles.py | """ Calculation of the branching angles of a tree. """
from numpy import *
from numpy.linalg import norm
import h5py
import datafile
def branching_angles(tr, r, **kwargs):
""" Returns an array with the branching angles of the tree. """
ibranch = tr.branches()
angles = zeros(ibranch.shape)
for i, ib in enumerate(ibranch):
angles[i] = angle1(tr.segments[ib], r, **kwargs)
return angles
def angle1(segment, r, max_n=15, skip_n=0):
""" Gets the angle that forms the branch at segment. Sees further by
max_n segments. If there is another branch inside these segments,
measures the angle only up to that branching point. """
branches = [segment.children[0], segment.children[1]]
origin = segment.get(r)
for i in xrange(max_n):
rb = [b.get(r) for b in branches]
if i == skip_n:
r0 = rb
if len(branches[0].children) != 1 or len(branches[1].children) != 1:
break
branches = [b.children[0] for b in branches]
u = [ri - r0i for ri, r0i in zip(rb, r0)]
return arccos(dot(u[0], u[1]) / norm(u[0]) / norm(u[1]))
def main():
""" We called as a stand-alone program, we calculate the angles of the given
file, step. """
import sys
from contextlib import closing
from optparse import OptionParser
tfile = None
params_str = ['external_field', 'branching_sigma', 'tip_mobility']
parser = OptionParser()
parser.add_option("--ofile", "-o", dest="ofile", type="str",
help="Output file", default=None)
parser.add_option("--degrees", "-d", dest="degrees", action="store_true",
help="Use sexadecimal degrees instead of radians",
default=False)
parser.add_option("--max", "-m", dest="max_n", action="store",
type="int",
help="Look at N segments below the branching point",
default=15)
parser.add_option("--skip", "-s", dest="skip_n", action="store",
type="int",
help="Ignore N segments below the branching point",
default=0)
parser.add_option("--table", "-t", dest="tfile", type="str",
help="Write a table in a file", default=None)
(opts, args) = parser.parse_args()
step = args[0]
files = args[1:]
if opts.tfile is not None:
tfile = open(opts.tfile, "a")
for file in files:
with closing(h5py.File(file)) as fp:
tr, r = datafile.load_tree(fp, step)
params = [fp['main'].attrs[k] for k in params_str]
a = branching_angles(tr, r,
max_n=opts.max_n,
skip_n=opts.skip_n)
if opts.degrees:
a[:] = a * 180 / pi
if opts.ofile is not None:
savetxt(opts.ofile, a)
avg = mean(a)
sigma = std(a)
if tfile:
tfile.write("\t".join(str(x) for x in (params + [avg, sigma]))
+ '\t# %s\n' % (file))
print "[%s] mean = %g\tstd deviation = %g" % (file, avg, sigma)
if tfile:
tfile.close()
if __name__ == '__main__':
main()
| 3,313 | 28.070175 | 80 | py |
strees | strees-master/plotter.py | import sys
import os, os.path
from optparse import OptionParser
from matplotlib.colors import LogNorm
import matplotlib as mpl
import scipy.constants as co
from scipy.stats import scoreatpercentile
from numpy import *
import h5py
import datafile
import cmaps
try:
import pylab
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors
except ImportError:
pass
kV_cm = co.kilo / co.centi
class Plotter(object):
def __init__(self, fname, **kwargs):
self.fp = h5py.File(fname, "r")
self.main = self.fp['main']
self.run_name = self.main.attrs['run_name']
self.external_field = self.main.attrs['external_field']
self.external_field_vector = array([0.0, 0.0, self.external_field])
self.conductor_thickness = self.main.attrs['conductor_thickness']
self.ref_step = None
self.branches = 0
options = dict(single=False, axisbg='#c7c0c0')
options.update(kwargs)
for key, value in options.iteritems():
setattr(self, key, value)
print self.single
@property
def steps(self):
return self.main.keys()
def __getitem__(self, key):
return self.main[key]
def set_ref(self, step):
self.ref_step = step
r, q = self.charge_dens(step)
rmid, efield = self.inner_field(step)
self.r0, self.r1 = bounding_box(r)
# Focus only on the positive charges
aqmax = amax(q)
#self.qmin, self.qmax = -aqmax, aqmax
# To avoid large charges at the end of the channels, we truncate
# the colorbar.
self.qmin = -15 * scoreatpercentile(-q[q < 0], 50.)
self.qmax = 5 * scoreatpercentile(q[q > 0], 50.)
self.emin, self.emax = nanmin(efield), nanmax(efield)
def charge_dens(self, step):
tr, r = datafile.load_tree(self.fp, step)
q = array(self.main[step]['q'])
l = tr.lengths(r)
# We drop the charges at the endpoints of each branch
t = tr.terminals()
n = len(t)
q, r, l = q[:-n], r[:-n], l[:-n]
# flt = l > 0.01 * self.conductor_thickness
# q, r, l = q[flt], r[flt, :], l[flt]
q = q / l
q = where(isfinite(q), q, 0)
return r, q / (co.nano / co.centi)
def inner_field(self, step):
tr, r = datafile.load_tree(self.fp, step)
phi = array(self.main[step]['phi'])
p = tr.parents()
l = tr.lengths(r)
midpoints = tr.midpoints(r)
t = tr.terminals()
self.branches = len(t)
# print "%d branches." % len(t)
n = len(t)
p, l, r, phi = p[:-n], l[:-n], r[:-n], phi[:-n]
midpoints = midpoints[:-n]
phi = phi - dot(r, self.external_field_vector)
efields = (phi - phi[p]) / l
return r, -efields / kV_cm
def plot(self, step, **kwargs):
if self.ref_step is None:
self.set_ref(step)
r, q = self.charge_dens(step)
plot_projections(r / co.centi, q,
self.r0 / co.centi, self.r1 / co.centi,
vmin=self.qmin, vmax=self.qmax, plot3d=True,
reduce_range=True, dynamic=True,
label="Linear charge density [nC/cm]",
single=self.single,
axisbg=self.axisbg, **kwargs)
# with reduce_range=35, we see the sign everywhere.
savetxt("charge.dat", q)
def plot_field(self, step):
if self.ref_step is None:
self.set_ref(step)
midpoints, efield = self.inner_field(step)
print self.emin, self.emax
try:
plot_projections(midpoints / co.centi,
efield,
self.r0 / co.centi, self.r1 / co.centi,
vmin=self.emin, vmax=self.emax,
plot3d=True, log=False,
dynamic=True,
label="Electric field, $E$ [kV/cm]",
single=self.single,
axisbg=self.axisbg)
except ValueError:
pass
def main():
parser = OptionParser()
parser.add_option("--ref", dest="ref", type="str",
help="The reference step", default=None)
parser.add_option("--show", dest="show", action="store_true",
help="Open the matplotlib window?", default=False)
parser.add_option("--field", dest="field", action="store_true",
help="Plot the electric field instead of the charge?",
default=False)
parser.add_option("--print-parameters", dest="print_parameters",
action="store_true",
help="The reference step", default=None)
parser.add_option("--print-times", dest="print_times",
action="store_true",
help="Print real and simulated times for each step",
default=False)
parser.add_option("--format", dest="format", type="str",
help="Format of the output figures", default='png')
parser.add_option("--single", dest="single", action="store_true",
help="Plot only one projection", default=False)
parser.add_option("--axisbg", dest="axisbg", type="str",
help="Background color", default='#eaeaea')
(opts, args) = parser.parse_args()
fname = args[0]
plotter = Plotter(fname, single=opts.single, axisbg=opts.axisbg)
try:
os.mkdir(plotter.run_name)
except OSError:
pass
steps = args[1:]
if not steps:
steps = plotter.steps
steps = [s if s != 'last' else plotter.steps[-1] for s in steps]
if opts.ref is not None:
plotter.set_ref(opts.ref)
else:
plotter.set_ref(steps[-1])
print "%s [%s] (%d steps)" % (plotter.run_name,
plotter.main.attrs['ctime'], len(steps))
if opts.print_parameters:
for key, item in plotter.main.attrs.iteritems():
print "%-30s =\t%s" % (key, repr(item))
if opts.print_times:
for i, step in enumerate(steps):
t = plotter.main[step].attrs['t']
timestamp = plotter.main[step].attrs['timestamp']
print "%s\t%f\t%f" % (step, t, timestamp)
if opts.single:
mpl.rcParams['font.size'] = 22.0
pylab.figure(figsize=(12.5, 11.5))
else:
pylab.figure(figsize=(13, 10.5))
for i, step in enumerate(steps):
if not opts.field:
plotter.plot(step)
else:
plotter.plot_field(step)
print ("[%s (%d)]" % (step, plotter.branches)),
sys.stdout.flush()
if not ((i + 1) % 10):
print ''
if opts.show:
pylab.show()
pylab.savefig(os.path.join(plotter.run_name,
'%s_%s.%s' % (plotter.run_name, step,
opts.format)),
dpi=200)
def bounding_box(r, expand_r0=None, expand_r1=None):
rmin = amin(r, axis=0)
rmax = amax(r, axis=0)
lengths = rmax - rmin
center = 0.5 * (rmax + rmin)
sides = amax(lengths) * ones((3,))
if expand_r0 is None:
expand_r0 = array([1, 1, 1])
if expand_r1 is None:
expand_r1 = array([1, 1, 1])
r0 = center - expand_r0 * sides / 2
r1 = center + expand_r1 * sides / 2
return r0, r1
def plot_projections(r, q, r0, r1, vmin=None, vmax=None, log=False,
plot3d=False, dynamic=False, label=None,
reduce_range=None, single=False,
axisbg='#404060', subplots=True):
X, Y, Z = 0, 1, 2
r0 = r0 * array([1.0, 1.0, 1.1])
names = ["X [cm]", "Y [cm]", "Z [cm]"]
axes = [(X, Z, Y), (Y, Z, X), (X, Y, Z)]
if single:
axes = [(Y, Z, X)]
if subplots:
pylab.subplots_adjust(left=0.1, wspace=0.35, hspace=0.2,
right=0.85, top=0.95)
cmap = pylab.get_cmap("jet")
#cmap = charge_cmap()
extend = 'neither'
if vmin is None or vmax is None:
vmin = nanmin(q)
vmax = nanmax(q)
if reduce_range is not None:
extend = 'both'
if dynamic:
cmap = cmaps.get_colormap('bluered', dynamic=True)
cmap.center = -vmin / (vmax - vmin)
iplot = [1, 2, 3]
for i, (d1, d2, d3) in enumerate(axes):
# For gray use axisbg='#eeefef'
if subplots:
if not single:
ax = pylab.subplot(2, 2, iplot[i], axisbg=axisbg) # was #404060
else:
ax = pylab.subplot(1, 1, 1, axisbg=axisbg) # was #404060
else:
ax = pylab.gca()
ax.clear()
ax.grid(ls='-', lw=1.0, c='#c0c0c0', zorder=-20)
# Thu Aug 23 15:58:10 2012
# I used this and --axisbg='aaaaaa' for the reconnection plot:
#ax.grid(ls='-', lw=1.0, c='#888888', zorder=-20)
norm = None if not log else LogNorm()
isort = argsort(r[:, d3])
pylab.scatter(r[isort, d1], r[isort, d2], c=q[isort],
s=14.5, faceted=False, vmin=vmin, vmax=vmax,
cmap=cmap, zorder=20, norm=norm),
#pylab.colorbar()
ax.set_xlabel(names[d1])
ax.set_ylabel(names[d2])
ax.set_xlim([r0[d1], r1[d1]])
ax.set_ylim([r0[d2], r1[d2]])
ax = pylab.axes([0.88, 0.1, 0.025, 0.85])
cbar = pylab.colorbar(cax=ax, extend=extend)
if label is not None:
cbar.set_label(label)
if plot3d and not single:
ax = pylab.subplot(2, 2, 4, projection='3d')
isort = argsort(dot(array([-1, 1, 0]), r.T))
ax.scatter(r[isort, 0],
r[isort, 1],
r[isort, 2], zdir='z', c=q[isort],
s=9.5, faceted=False, vmin=vmin, vmax=vmax,
cmap=cmap, zorder=20, norm=norm)
#ax.set_xlim3d([nanmin(r[:, X]), nanmax(r[:, X])])
#ax.set_ylim3d([nanmin(r[:, Y]), nanmax(r[:, Y])])
#ax.set_zlim3d([nanmin(r[:, Z]), nanmax(r[:, Z])])
ax.set_xlim3d([r0[X], r1[X]])
ax.set_xlabel("X [cm]")
ax.set_ylim3d([r0[Y], r1[Y]])
ax.set_ylabel("Y [cm]")
ax.set_zlim3d([r0[Z], r1[Z]])
ax.set_zlabel("Z [cm]")
def charge_cmap():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.9, 0.9),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.5, 0.5),
(1.0, 0.0, 0.0))}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
return my_cmap
if __name__ == '__main__':
main()
| 11,230 | 28.555263 | 82 | py |
strees | strees-master/cmaps.py | from matplotlib.colors import Colormap
import matplotlib.cm as cm
import matplotlib.colors as colors
from numpy import where
def get_colormap(cmap, dynamic=False):
""" If cmap is not None, sets the colormap by te specified string"""
cmaps = {'hot': cm.hot,
'autumn': cm.autumn,
'bone': cm.bone,
'cool': cm.cool,
'copper': cm.copper,
'flag': cm.flag,
'gray': cm.gray,
'hsv': cm.hsv,
'jet': cm.jet,
'pink': cm.pink,
'prism': cm.prism,
'spring': cm.spring,
'summer': cm.summer,
'winter': cm.winter,
'YlOrRd': cm.YlOrRd,
'invjet': invjet,
'invhot': invhot,
'blue': blue,
'hottest': hottest,
'invhottest': InvertedColormap(hottest),
'darkhot': darkhot,
'invdarkhot': InvertedColormap(darkhot),
'darkred': darkred,
'invdarkred': InvertedColormap(darkred),
'coolhot': coolhot,
'reddish': reddish,
'bluered': bluered,
#'spectra': spectra
}
if cmap:
try:
cmap = cmaps[cmap]
except KeyError:
warn("Colormap `%s' unknown: using default colormap" % cmap)
return None
if dynamic:
return DynamicColormap(cmap)
else:
return cmap
class InvertedColormap(Colormap):
def __init__(self, cm):
self._cm = cm
def __getattr__(self, name):
return getattr(self._cm, name)
def __call__(self, X, alpha=1.0, **kwargs):
return self._cm(1.0 - X, alpha=alpha, **kwargs)
class DynamicColormap(Colormap):
def __init__(self, basis, center=0.5):
self.center = center
self.basis = basis
self.__dict__.update(self.basis.__dict__)
#def __getattr__(self, name):
# return getattr(self.basis, name)
#def __setattr__(self, name, value):
# setattr(self.basis, name, value)
def __call__(self, X, **kwargs):
#if self.center <= 0.5:
# Y = 0.5 * (1 + (X - self.center) / (1 - self.center))
#else:
# Y = 0.5 * X / self.center
Y = where(X < self.center,
0.5 * X / self.center,
0.5 * (1 + (X - self.center) / (1 - self.center)))
#if X <= self.center:
# Y = 0.5 * X / self.center
#else:
# Y = 0.5 * (1 + (X - self.center) / (1 - self.center))
return self.basis(Y, **kwargs)
def combine_data(low, high):
newd = {}
for color in 'blue', 'red', 'green':
l = [(0.5 * (1.0 - x), y, z) for x, y, z in low[color]]
l.reverse()
h = [(0.5 * (1.0 + x), y, z) for x, y, z in high[color]]
newd[color] = tuple(l + h)
return newd
def swap_data(data, col1, col2):
newd = data.copy()
newd[col1] = newd[col2]
newd[col2] = data[col1]
return newd
# Some colormaps definitions
_blue_data = {'blue': ((0., 0.0416, 0.0416),
(0.365079, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'red': ((0., 0., 0.),
(0.746032, 0.000000, 0.000000),
(1.0, 1.0, 1.0))}
_darkhot_data = {'red': ((0., 0.0416, 0.0416),(0.565079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.565079, 0.000000, 0.000000),
(0.9, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.9, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hottest_data = {'red': ((0., 0.0416, 0.0416),
(0.365079, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),
(0.646032, 0.200000, 0.200000),
(0.846032, 0.400000, 0.400000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),
(0.446032, 0.000000, 0.000000),
(1.0, 1.0, 1.0))}
_darkred_data = {'red': ((0., 0.6, 0.6), (1.0, 0.6, 0.6)),
'green': ((0., 0., 0.), (1.0, 1.0, 0.)),
'blue': ((0., 0., 0.), (1.0, 1.0, 0.))}
_reddish_data = {'red': ((0., 1.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.6, 0.6)),
'green': ((0., 1.0, 1.0), (0.6, 0.6, 0.6), (1.0, 0., 0.)),
'blue': ((0., 1.0, 1.0), (0.4, 0., 0.), (1.0, 0, 0))}
_redhot_data = {'red': ((0., 1.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.6, 0.6)),
'green': ((0., 1.0, 1.0), (0.6, 0.6, 0.6), (1.0, 0., 0.)),
'blue': ((0., 1.0, 1.0), (0.4, 0., 0.), (1.0, 0, 0))}
_greyred_data = {'red': ((0., 1.0, 1.0), (0.25, 1.0, 1.0), (1.0, 0.5, 0.5)),
'green': ((0., 0.9, 0.9), (0.25, 0.2, 0.2), (1.0, 0., 0.)),
'blue': ((0., 0.4, 0.4), (0.25, 0.0, 0.0), (1.0, 0, 0))}
_bluegrey_data = {'red': ((0., 1.0, 1.0), (0.15, 0.0, 0.0), (1.0, 0., 0.)),
'green': ((0., 0.9, 0.9), (0.33, 0.0, 0.0), (1.0, 0., 0.)),
'blue': ((0., 0.4, 0.4), (0.33, 1.0, 1.0), (1.0, 0.5, 0.5))}
_bluish_data = swap_data(_reddish_data, 'red', 'blue')
_coolhot_data = combine_data(_bluish_data, _reddish_data)
_bluered_data = combine_data(_bluegrey_data, _greyred_data)
invjet = InvertedColormap(cm.jet)
invhot = InvertedColormap(cm.hot)
blue = colors.LinearSegmentedColormap('blue', _blue_data, cm.LUTSIZE)
hottest = colors.LinearSegmentedColormap('hottest', _hottest_data, cm.LUTSIZE)
darkhot = colors.LinearSegmentedColormap('darkhot', _darkhot_data, cm.LUTSIZE)
darkred = colors.LinearSegmentedColormap('darkred', _darkred_data, cm.LUTSIZE)
coolhot = colors.LinearSegmentedColormap('coolhot', _coolhot_data, cm.LUTSIZE)
reddish = colors.LinearSegmentedColormap('reddish', _reddish_data, cm.LUTSIZE)
bluered = colors.LinearSegmentedColormap('bluered', _bluered_data, cm.LUTSIZE)
| 6,305 | 34.426966 | 97 | py |
strees | strees-master/branch.py | """ Extract the shape of a branch.
Mostly useful in single-branch simulations."""
from numpy import *
from numpy.linalg import norm
from scipy.optimize import curve_fit
import h5py
import pylab
import datafile
def extract_branch(tr, r, **kwargs):
""" an array with the distances between segments in the two branches
after the first branching in a tree. """
ibranch = tr.branches()
if len(ibranch) > 1:
raise ValueError("I get confused with more than a single branch")
ib = ibranch[0]
segment = tr.segments[ib]
branches = [segment.children[0], segment.children[1]]
z0 = segment.get(r)[2]
d = []
z = []
while True:
rb = [b.get(r) for b in branches]
if len(branches[0].children) != 1 or len(branches[1].children) != 1:
break
branches = [b.children[0] for b in branches]
z.append(z0 - rb[0][2])
d.append(norm(rb[0] - rb[1]))
return array(z), array(d)
def analysis(z, y):
""" Perform some analysis. Whatever I think of at the moment. """
print "-" * 20
# First plot the dat
pylab.plot(z, y, lw=1.7, c='k')
pylab.grid(ls='-', c='#999999')
# asymptote
dy = y[-1] - y[-100]
dz = z[-1] - z[-100]
a = dy / dz
b = y[-100] - z[-100] * dy / dz
print "Asymtote y = a * z + b"
print " a = %f" % a
print " b = %f" % b
pylab.plot(z, a*z + b)
# Fit to an hyperbola
def f(x, a, b):
return a * sqrt(x * abs((x - b)))
popt, pcov = curve_fit(f, z, y, [dy / dz, -1.0], maxfev=50000)
print "Hyperbola"
print popt
pylab.plot(z, f(z, *popt))
pylab.show()
def main():
""" We called as a stand-alone program, we calculate the angles of the given
file, step. """
import sys
from contextlib import closing
from optparse import OptionParser
tfile = None
params_str = ['external_field', 'branching_sigma', 'tip_mobility']
parser = OptionParser()
parser.add_option("--ofile", "-o", dest="ofile", type="str",
help="Output file", default=None)
parser.add_option("--analysis", "-a", dest="analysis", action="store_true",
help="Perform some analysis of the data", default=False)
(opts, args) = parser.parse_args()
step = args[0]
files = args[1:]
for file in files:
with closing(h5py.File(file)) as fp:
tr, r = datafile.load_tree(fp, step)
params = [fp['main'].attrs[k] for k in params_str]
z, d = extract_branch(tr, r)
if opts.ofile is not None:
savetxt(opts.ofile, c_[z, d])
if opts.analysis:
analysis(z, d)
if __name__ == '__main__':
main()
| 2,815 | 22.864407 | 80 | py |
strees | strees-master/submit.py | """ A python script to submit to the qsub queue. """
import sys
import os
from optparse import OptionParser
from subprocess import call
DEF_QUEUE = 'exe-x86_64'
def encode_envlist(d):
return ','.join('%s=%s' % (key, item) for key, item in d.iteritems())
def submit(ifile, queue, onlyprint=False):
mypath = os.path.split(os.path.realpath(sys.argv[0]))[0]
ipath = os.path.split(os.path.realpath(ifile))[0]
env = encode_envlist(dict(FMM_INPUT_FILE=os.path.join(ipath, ifile),
FMM_PATH=mypath))
runid = os.path.splitext(os.path.basename(ifile))[0]
script = os.path.join(mypath, 'qrun.sh')
args = {'-N': runid,
'-j': 'oe',
'-q': queue,
'-o': 'localhost:%s' % os.path.join(ipath, runid + '.out'),
'-v': env}
cmd = ('qsub %s %s'
% (' '.join('%s %s' % (key, item)
for key, item in args.iteritems()),
script))
if not onlyprint:
call(cmd, shell=True)
else:
print(cmd)
def completed(ifile):
runid = os.path.splitext(os.path.basename(ifile))[0]
ipath = os.path.split(os.path.realpath(ifile))[0]
ofile = os.path.join(ipath, runid + '.h5')
if not os.path.exists(ofile):
return False
itime, otime = [os.stat(f).st_mtime for f in ifile, ofile]
if otime > itime:
return True
return False
def main():
parser = OptionParser()
parser.add_option("--queue", "-q", dest="queue", type="str",
help="Queue to submit to", default=DEF_QUEUE)
parser.add_option("--check-completed", "-c", dest="check",
action="store_true",
help="Check for a .h5 file to see if the code has already run",
default=False)
parser.add_option("--only-print", "-p", dest="onlyprint",
action="store_true",
help="Just print commands, do nothing.",
default=False)
(opts, args) = parser.parse_args()
for ifile in args:
if opts.check and completed(ifile):
print("Skipping %s due to an existing and newer .h5 file"
% ifile)
continue
submit(ifile, queue=opts.queue, onlyprint=opts.onlyprint)
if __name__ == '__main__':
main()
| 2,385 | 26.744186 | 86 | py |
strees | strees-master/propagation.py | import sys
import os, os.path
from optparse import OptionParser
from matplotlib.colors import LogNorm
from numpy import *
import h5py
try:
import pylab
except ImportError:
pass
def main():
parser = OptionParser()
parser.add_option("--show", dest="show", action="store_true",
help="Open the matplotlib window?", default=False)
parser.add_option("--ofile", "-o", dest="ofile", action="store",
help="Save to this file", default=None)
parser.add_option("--endt", "-t", dest="endt", action="store",
type=float,
help="Final time", default=None)
(opts, args) = parser.parse_args()
fname = args[0]
fp = h5py.File(fname, "r")
main = fp['main']
run_name = main.attrs['run_name']
steps = main.keys()
t = zeros((len(steps)),)
r = zeros((len(steps)),)
try:
for i, step in enumerate(steps):
ri = array(main[step]['r'])
t[i] = main[step].attrs['t']
r[i] = amax(sqrt(sum(ri**2, axis=1)))
#print "%g\t%g\t#%s" % (t[i], r[i], step)
except KeyError:
pass
if opts.endt is not None:
t, r = t[t <= endt], r[t <= endt]
dt = t[1] - t[0]
v = diff(r) / dt
tmid = 0.5 * (t[1:] + t[:-1])
#a, b = simple_regression(log(tmid[-100:]), log(v[-100:]))
#print a
if opts.show:
pylab.figure(1)
pylab.xlabel("t")
pylab.ylabel("r")
pylab.plot(t, r, lw=1.7)
pylab.figure(2)
pylab.xlabel("t")
pylab.ylabel("v")
pylab.plot(tmid, v, lw=1.7)
#pylab.plot(tmid, exp(b + a * log(tmid)))
pylab.loglog()
pylab.show()
if opts.ofile is not None:
savetxt(opts.ofile, c_[t, r])
def simple_regression(xi, yi):
A = ones((len(yi), 2), dtype=float)
A[:, 0] = xi[:]
r = linalg.lstsq(A, yi)
return r[0][0], r[0][1]
if __name__ == '__main__':
main()
| 2,028 | 19.917526 | 72 | py |
strees | strees-master/list.py | import sys
import os, os.path
from optparse import OptionParser
from numpy import *
import h5py
def main():
parser = OptionParser()
(opts, args) = parser.parse_args()
for fname in args:
fp = h5py.File(fname, "r")
main = fp['main']
run_name = main.attrs['run_name']
steps = main.keys()
print "%s [%s]" % (fname, run_name)
for key, item in main.attrs.iteritems():
print "\t%-30s =\t%s" % (key, repr(item))
print "\tLast step: %s [t=%g]" % (steps[-1],
main[steps[-1]].attrs['t'])
if __name__ == '__main__':
main()
| 664 | 19.78125 | 69 | py |
strees | strees-master/src/setup.py | #! /usr/bin/python
import distutils.sysconfig
from distutils.core import setup, Extension
import numpy as np
numod = Extension('mpolar',
sources = ['mpolarmod.c'],
extra_objects = ['misc.o', 'multipol.o', 'efield.o'],
include_dirs = [np.get_include(),
distutils.sysconfig.get_python_inc()]
)
setup (ext_modules = [numod],
name = 'mpolar',
version = '1.0',
description = 'Multipolar expansions',
author = 'Alejandro Luque',
author_email = 'aluque@iaa.es')
| 599 | 29 | 71 | py |
strees | strees-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# strees documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 13 15:04:29 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'strees'
copyright = u'2013, Alejandro Luque'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'streesdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'strees.tex', u'strees Documentation',
u'Alejandro Luque', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'strees', u'strees Documentation',
[u'Alejandro Luque'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'strees', u'strees Documentation',
u'Alejandro Luque', 'strees', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 7,832 | 30.971429 | 80 | py |
COAT | COAT-main/engine.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import math
import sys
from copy import deepcopy
import torch
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from eval_func import eval_detection, eval_search_cuhk, eval_search_prw
from utils.utils import MetricLogger, SmoothedValue, mkdir, reduce_dict, warmup_lr_scheduler
from utils.transforms import mixup_data
def to_device(images, targets, device):
images = [image.to(device) for image in images]
for t in targets:
t["boxes"] = t["boxes"].to(device)
t["labels"] = t["labels"].to(device)
return images, targets
def train_one_epoch(cfg, model, optimizer, data_loader, device, epoch, tfboard, softmax_criterion_s2, softmax_criterion_s3):
model.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = "Epoch: [{}]".format(epoch)
# warmup learning rate in the first epoch
if epoch == 0:
warmup_factor = 1.0 / 1000
warmup_iters = len(data_loader) - 1
warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for i, (images, targets) in enumerate(
metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)
):
images, targets = to_device(images, targets, device)
# if using image based data augmentation
if cfg.INPUT.IMAGE_MIXUP:
images = mixup_data(images, alpha=0.8)
loss_dict, feats_reid_2nd, targets_reid_2nd, feats_reid_3rd, targets_reid_3rd = model(images, targets)
if cfg.MODEL.LOSS.USE_SOFTMAX:
softmax_loss_2nd = cfg.SOLVER.LW_RCNN_SOFTMAX_2ND * softmax_criterion_s2(feats_reid_2nd, targets_reid_2nd)
softmax_loss_3rd = cfg.SOLVER.LW_RCNN_SOFTMAX_3RD * softmax_criterion_s3(feats_reid_3rd, targets_reid_3rd)
loss_dict.update(loss_box_softmax_2nd=softmax_loss_2nd)
loss_dict.update(loss_box_softmax_3rd=softmax_loss_3rd)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print(f"Loss is {loss_value}, stopping training")
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if cfg.SOLVER.CLIP_GRADIENTS > 0:
clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)
optimizer.step()
if epoch == 0:
warmup_scheduler.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if tfboard:
iter = epoch * len(data_loader) + i
for k, v in loss_dict_reduced.items():
tfboard.add_scalars("train", {k: v}, iter)
@torch.no_grad()
def evaluate_performance(
model, gallery_loader, query_loader, device, use_gt=False, use_cache=False, use_cbgm=False, gallery_size=100):
"""
Args:
use_gt (bool, optional): Whether to use GT as detection results to verify the upper
bound of person search performance. Defaults to False.
use_cache (bool, optional): Whether to use the cached features. Defaults to False.
use_cbgm (bool, optional): Whether to use Context Bipartite Graph Matching algorithm.
Defaults to False.
"""
model.eval()
if use_cache:
eval_cache = torch.load("data/eval_cache/eval_cache.pth")
gallery_dets = eval_cache["gallery_dets"]
gallery_feats = eval_cache["gallery_feats"]
query_dets = eval_cache["query_dets"]
query_feats = eval_cache["query_feats"]
query_box_feats = eval_cache["query_box_feats"]
else:
gallery_dets, gallery_feats = [], []
for images, targets in tqdm(gallery_loader, ncols=0):
images, targets = to_device(images, targets, device)
if not use_gt:
outputs = model(images)
else:
boxes = targets[0]["boxes"]
n_boxes = boxes.size(0)
embeddings = model(images, targets)
outputs = [
{
"boxes": boxes,
"embeddings": torch.cat(embeddings),
"labels": torch.ones(n_boxes).to(device),
"scores": torch.ones(n_boxes).to(device),
}
]
for output in outputs:
box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
gallery_dets.append(box_w_scores.cpu().numpy())
gallery_feats.append(output["embeddings"].cpu().numpy())
# regarding query image as gallery to detect all people
# i.e. query person + surrounding people (context information)
query_dets, query_feats = [], []
for images, targets in tqdm(query_loader, ncols=0):
images, targets = to_device(images, targets, device)
# targets will be modified in the model, so deepcopy it
outputs = model(images, deepcopy(targets), query_img_as_gallery=True)
# consistency check
gt_box = targets[0]["boxes"].squeeze()
assert (
gt_box - outputs[0]["boxes"][0]
).sum() <= 0.001, "GT box must be the first one in the detected boxes of query image"
for output in outputs:
box_w_scores = torch.cat([output["boxes"], output["scores"].unsqueeze(1)], dim=1)
query_dets.append(box_w_scores.cpu().numpy())
query_feats.append(output["embeddings"].cpu().numpy())
# extract the features of query boxes
query_box_feats = []
for images, targets in tqdm(query_loader, ncols=0):
images, targets = to_device(images, targets, device)
embeddings = model(images, targets)
assert len(embeddings) == 1, "batch size in test phase should be 1"
query_box_feats.append(embeddings[0].cpu().numpy())
mkdir("data/eval_cache")
save_dict = {
"gallery_dets": gallery_dets,
"gallery_feats": gallery_feats,
"query_dets": query_dets,
"query_feats": query_feats,
"query_box_feats": query_box_feats,
}
torch.save(save_dict, "data/eval_cache/eval_cache.pth")
eval_detection(gallery_loader.dataset, gallery_dets, det_thresh=0.01)
eval_search_func = (
eval_search_cuhk if gallery_loader.dataset.name == "CUHK-SYSU" else eval_search_prw
)
eval_search_func(
gallery_loader.dataset,
query_loader.dataset,
gallery_dets,
gallery_feats,
query_box_feats,
query_dets,
query_feats,
cbgm=use_cbgm,
gallery_size=gallery_size,
)
| 7,288 | 39.494444 | 124 | py |
COAT | COAT-main/defaults.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
from yacs.config import CfgNode as CN
_C = CN()
# -------------------------------------------------------- #
# Input #
# -------------------------------------------------------- #
_C.INPUT = CN()
_C.INPUT.DATASET = "CUHK-SYSU"
_C.INPUT.DATA_ROOT = "data/CUHK-SYSU"
# Size of the smallest side of the image
_C.INPUT.MIN_SIZE = 900
# Maximum size of the side of the image
_C.INPUT.MAX_SIZE = 1500
# Number of images per batch
_C.INPUT.BATCH_SIZE_TRAIN = 5
_C.INPUT.BATCH_SIZE_TEST = 1
# Number of data loading threads
_C.INPUT.NUM_WORKERS_TRAIN = 5
_C.INPUT.NUM_WORKERS_TEST = 1
# Image augmentation
_C.INPUT.IMAGE_CUTOUT = False
_C.INPUT.IMAGE_ERASE = False
_C.INPUT.IMAGE_MIXUP = False
# -------------------------------------------------------- #
# GRID #
# -------------------------------------------------------- #
_C.INPUT.IMAGE_GRID = False
_C.GRID = CN()
_C.GRID.ROTATE = 1
_C.GRID.OFFSET = 0
_C.GRID.RATIO = 0.5
_C.GRID.MODE = 1
_C.GRID.PROB = 0.5
# -------------------------------------------------------- #
# Solver #
# -------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_EPOCHS = 13
# Learning rate settings
_C.SOLVER.BASE_LR = 0.003
# The epoch milestones to decrease the learning rate by GAMMA
_C.SOLVER.LR_DECAY_MILESTONES = [10, 14]
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.SGD_MOMENTUM = 0.9
# Loss weight of RPN regression
_C.SOLVER.LW_RPN_REG = 1
# Loss weight of RPN classification
_C.SOLVER.LW_RPN_CLS = 1
# Loss weight of Cascade R-CNN and Re-ID (OIM)
_C.SOLVER.LW_RCNN_REG_1ST = 10
_C.SOLVER.LW_RCNN_CLS_1ST = 1
_C.SOLVER.LW_RCNN_REG_2ND = 10
_C.SOLVER.LW_RCNN_CLS_2ND = 1
_C.SOLVER.LW_RCNN_REG_3RD = 10
_C.SOLVER.LW_RCNN_CLS_3RD = 1
_C.SOLVER.LW_RCNN_REID_2ND = 0.5
_C.SOLVER.LW_RCNN_REID_3RD = 0.5
# Loss weight of box reid, softmax loss
_C.SOLVER.LW_RCNN_SOFTMAX_2ND = 0.5
_C.SOLVER.LW_RCNN_SOFTMAX_3RD = 0.5
# Set to negative value to disable gradient clipping
_C.SOLVER.CLIP_GRADIENTS = 10.0
# -------------------------------------------------------- #
# RPN #
# -------------------------------------------------------- #
_C.MODEL = CN()
_C.MODEL.RPN = CN()
# NMS threshold used on RoIs
_C.MODEL.RPN.NMS_THRESH = 0.7
# Number of anchors per image used to train RPN
_C.MODEL.RPN.BATCH_SIZE_TRAIN = 256
# Target fraction of foreground examples per RPN minibatch
_C.MODEL.RPN.POS_FRAC_TRAIN = 0.5
# Overlap threshold for an anchor to be considered foreground (if >= POS_THRESH_TRAIN)
_C.MODEL.RPN.POS_THRESH_TRAIN = 0.7
# Overlap threshold for an anchor to be considered background (if < NEG_THRESH_TRAIN)
_C.MODEL.RPN.NEG_THRESH_TRAIN = 0.3
# Number of top scoring RPN RoIs to keep before applying NMS
_C.MODEL.RPN.PRE_NMS_TOPN_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOPN_TEST = 6000
# Number of top scoring RPN RoIs to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOPN_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOPN_TEST = 300
# -------------------------------------------------------- #
# RoI head #
# -------------------------------------------------------- #
_C.MODEL.ROI_HEAD = CN()
# Whether to use bn neck (i.e. batch normalization after linear)
_C.MODEL.ROI_HEAD.BN_NECK = True
# Number of RoIs per image used to train RoI head
_C.MODEL.ROI_HEAD.BATCH_SIZE_TRAIN = 128
# Target fraction of foreground examples per RoI minibatch
_C.MODEL.ROI_HEAD.POS_FRAC_TRAIN = 0.25 # 0.5
_C.MODEL.ROI_HEAD.USE_DIFF_THRESH = True
# Overlap threshold for an RoI to be considered foreground (if >= POS_THRESH_TRAIN)
_C.MODEL.ROI_HEAD.POS_THRESH_TRAIN = 0.5
_C.MODEL.ROI_HEAD.POS_THRESH_TRAIN_2ND = 0.6
_C.MODEL.ROI_HEAD.POS_THRESH_TRAIN_3RD = 0.7
# Overlap threshold for an RoI to be considered background (if < NEG_THRESH_TRAIN)
_C.MODEL.ROI_HEAD.NEG_THRESH_TRAIN = 0.5
_C.MODEL.ROI_HEAD.NEG_THRESH_TRAIN_2ND = 0.6
_C.MODEL.ROI_HEAD.NEG_THRESH_TRAIN_3RD = 0.7
# Minimum score threshold
_C.MODEL.ROI_HEAD.SCORE_THRESH_TEST = 0.5
# NMS threshold used on boxes
_C.MODEL.ROI_HEAD.NMS_THRESH_TEST = 0.4
_C.MODEL.ROI_HEAD.NMS_THRESH_TEST_1ST = 0.4
_C.MODEL.ROI_HEAD.NMS_THRESH_TEST_2ND = 0.4
_C.MODEL.ROI_HEAD.NMS_THRESH_TEST_3RD = 0.5
# Maximum number of detected objects
_C.MODEL.ROI_HEAD.DETECTIONS_PER_IMAGE_TEST = 300
# -------------------------------------------------------- #
# Transformer head #
# -------------------------------------------------------- #
_C.MODEL.TRANSFORMER = CN()
_C.MODEL.TRANSFORMER.DIM_MODEL = 512
_C.MODEL.TRANSFORMER.ENCODER_LAYERS = 1
_C.MODEL.TRANSFORMER.N_HEAD = 8
_C.MODEL.TRANSFORMER.USE_OUTPUT_LAYER = False
_C.MODEL.TRANSFORMER.DROPOUT = 0.
_C.MODEL.TRANSFORMER.USE_LOCAL_SHORTCUT = True
_C.MODEL.TRANSFORMER.USE_GLOBAL_SHORTCUT = True
_C.MODEL.TRANSFORMER.USE_DIFF_SCALE = True
_C.MODEL.TRANSFORMER.NAMES_1ST = ['scale1','scale2']
_C.MODEL.TRANSFORMER.NAMES_2ND = ['scale1','scale2']
_C.MODEL.TRANSFORMER.NAMES_3RD = ['scale1','scale2']
_C.MODEL.TRANSFORMER.KERNEL_SIZE_1ST = [(1,1),(3,3)]
_C.MODEL.TRANSFORMER.KERNEL_SIZE_2ND = [(1,1),(3,3)]
_C.MODEL.TRANSFORMER.KERNEL_SIZE_3RD = [(1,1),(3,3)]
_C.MODEL.TRANSFORMER.USE_MASK_1ST = False
_C.MODEL.TRANSFORMER.USE_MASK_2ND = True
_C.MODEL.TRANSFORMER.USE_MASK_3RD = True
_C.MODEL.TRANSFORMER.USE_PATCH2VEC = True
####
_C.MODEL.USE_FEATURE_MASK = True
_C.MODEL.FEATURE_AUG_TYPE = 'exchange_token' # 'exchange_token', 'jigsaw_token', 'cutout_patch', 'erase_patch', 'mixup_patch', 'jigsaw_patch'
_C.MODEL.FEATURE_MASK_SIZE = 4
_C.MODEL.MASK_SHAPE = 'stripe' # 'square', 'random'
_C.MODEL.MASK_SIZE = 1
_C.MODEL.MASK_MODE = 'random_direction' # 'horizontal', 'vertical' for stripe; 'random_size' for square
_C.MODEL.MASK_PERCENT = 0.1
####
_C.MODEL.EMBEDDING_DIM = 256
# -------------------------------------------------------- #
# Loss #
# -------------------------------------------------------- #
_C.MODEL.LOSS = CN()
# Size of the lookup table in OIM
_C.MODEL.LOSS.LUT_SIZE = 5532
# Size of the circular queue in OIM
_C.MODEL.LOSS.CQ_SIZE = 5000
_C.MODEL.LOSS.OIM_MOMENTUM = 0.5
_C.MODEL.LOSS.OIM_SCALAR = 30.0
_C.MODEL.LOSS.USE_SOFTMAX = True
# -------------------------------------------------------- #
# Evaluation #
# -------------------------------------------------------- #
# The period to evaluate the model during training
_C.EVAL_PERIOD = 1
# Evaluation with GT boxes to verify the upper bound of person search performance
_C.EVAL_USE_GT = False
# Fast evaluation with cached features
_C.EVAL_USE_CACHE = False
# Evaluation with Context Bipartite Graph Matching (CBGM) algorithm
_C.EVAL_USE_CBGM = False
# Gallery size in evaluation, only for CUHK-SYSU
_C.EVAL_GALLERY_SIZE = 100
# Feature used for evaluation
_C.EVAL_FEATURE = 'concat' # 'stage2', 'stage3'
# -------------------------------------------------------- #
# Miscs #
# -------------------------------------------------------- #
# Save a checkpoint after every this number of epochs
_C.CKPT_PERIOD = 1
# The period (in terms of iterations) to display training losses
_C.DISP_PERIOD = 10
# Whether to use tensorboard for visualization
_C.TF_BOARD = True
# The device loading the model
_C.DEVICE = "cuda"
# Set seed to negative to fully randomize everything
_C.SEED = 1
# Directory where output files are written
_C.OUTPUT_DIR = "./output"
def get_default_cfg():
"""
Get a copy of the default config.
"""
return _C.clone()
| 7,923 | 35.018182 | 141 | py |
COAT | COAT-main/eval_func.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import os.path as osp
import numpy as np
from scipy.io import loadmat
from sklearn.metrics import average_precision_score
from utils.km import run_kuhn_munkres
from utils.utils import write_json
def _compute_iou(a, b):
x1 = max(a[0], b[0])
y1 = max(a[1], b[1])
x2 = min(a[2], b[2])
y2 = min(a[3], b[3])
inter = max(0, x2 - x1) * max(0, y2 - y1)
union = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - inter
return inter * 1.0 / union
def eval_detection(
gallery_dataset, gallery_dets, det_thresh=0.5, iou_thresh=0.5, labeled_only=False
):
"""
gallery_det (list of ndarray): n_det x [x1, y1, x2, y2, score] per image
det_thresh (float): filter out gallery detections whose scores below this
iou_thresh (float): treat as true positive if IoU is above this threshold
labeled_only (bool): filter out unlabeled background people
"""
assert len(gallery_dataset) == len(gallery_dets)
annos = gallery_dataset.annotations
y_true, y_score = [], []
count_gt, count_tp = 0, 0
for anno, det in zip(annos, gallery_dets):
gt_boxes = anno["boxes"]
if labeled_only:
# exclude the unlabeled people (pid == 5555)
inds = np.where(anno["pids"].ravel() != 5555)[0]
if len(inds) == 0:
continue
gt_boxes = gt_boxes[inds]
num_gt = gt_boxes.shape[0]
if det != []:
det = np.asarray(det)
inds = np.where(det[:, 4].ravel() >= det_thresh)[0]
det = det[inds]
num_det = det.shape[0]
else:
num_det = 0
if num_det == 0:
count_gt += num_gt
continue
ious = np.zeros((num_gt, num_det), dtype=np.float32)
for i in range(num_gt):
for j in range(num_det):
ious[i, j] = _compute_iou(gt_boxes[i], det[j, :4])
tfmat = ious >= iou_thresh
# for each det, keep only the largest iou of all the gt
for j in range(num_det):
largest_ind = np.argmax(ious[:, j])
for i in range(num_gt):
if i != largest_ind:
tfmat[i, j] = False
# for each gt, keep only the largest iou of all the det
for i in range(num_gt):
largest_ind = np.argmax(ious[i, :])
for j in range(num_det):
if j != largest_ind:
tfmat[i, j] = False
for j in range(num_det):
y_score.append(det[j, -1])
y_true.append(tfmat[:, j].any())
count_tp += tfmat.sum()
count_gt += num_gt
det_rate = count_tp * 1.0 / count_gt
ap = average_precision_score(y_true, y_score) * det_rate
print("{} detection:".format("labeled only" if labeled_only else "all"))
print(" recall = {:.2%}".format(det_rate))
if not labeled_only:
print(" ap = {:.2%}".format(ap))
return det_rate, ap
def eval_search_cuhk(
gallery_dataset,
query_dataset,
gallery_dets,
gallery_feats,
query_box_feats,
query_dets,
query_feats,
k1=10,
k2=3,
det_thresh=0.5,
cbgm=False,
gallery_size=100,
):
"""
gallery_dataset/query_dataset: an instance of BaseDataset
gallery_det (list of ndarray): n_det x [x1, x2, y1, y2, score] per image
gallery_feat (list of ndarray): n_det x D features per image
query_feat (list of ndarray): D dimensional features per query image
det_thresh (float): filter out gallery detections whose scores below this
gallery_size (int): gallery size [-1, 50, 100, 500, 1000, 2000, 4000]
-1 for using full set
"""
assert len(gallery_dataset) == len(gallery_dets)
assert len(gallery_dataset) == len(gallery_feats)
assert len(query_dataset) == len(query_box_feats)
use_full_set = gallery_size == -1
fname = "TestG{}".format(gallery_size if not use_full_set else 50)
protoc = loadmat(osp.join(gallery_dataset.root, "annotation/test/train_test", fname + ".mat"))
protoc = protoc[fname].squeeze()
# mapping from gallery image to (det, feat)
annos = gallery_dataset.annotations
name_to_det_feat = {}
for anno, det, feat in zip(annos, gallery_dets, gallery_feats):
name = anno["img_name"]
if det != []:
scores = det[:, 4].ravel()
inds = np.where(scores >= det_thresh)[0]
if len(inds) > 0:
name_to_det_feat[name] = (det[inds], feat[inds])
aps = []
accs = []
topk = [1, 5, 10]
ret = {"image_root": gallery_dataset.img_prefix, "results": []}
for i in range(len(query_dataset)):
y_true, y_score = [], []
imgs, rois = [], []
count_gt, count_tp = 0, 0
# get L2-normalized feature vector
feat_q = query_box_feats[i].ravel()
# ignore the query image
query_imname = str(protoc["Query"][i]["imname"][0, 0][0])
query_roi = protoc["Query"][i]["idlocate"][0, 0][0].astype(np.int32)
query_roi[2:] += query_roi[:2]
query_gt = []
tested = set([query_imname])
name2sim = {}
name2gt = {}
sims = []
imgs_cbgm = []
# 1. Go through the gallery samples defined by the protocol
for item in protoc["Gallery"][i].squeeze():
gallery_imname = str(item[0][0])
# some contain the query (gt not empty), some not
gt = item[1][0].astype(np.int32)
count_gt += gt.size > 0
# compute distance between query and gallery dets
if gallery_imname not in name_to_det_feat:
continue
det, feat_g = name_to_det_feat[gallery_imname]
# no detection in this gallery, skip it
if det.shape[0] == 0:
continue
# get L2-normalized feature matrix NxD
assert feat_g.size == np.prod(feat_g.shape[:2])
feat_g = feat_g.reshape(feat_g.shape[:2])
# compute cosine similarities
sim = feat_g.dot(feat_q).ravel()
if gallery_imname in name2sim:
continue
name2sim[gallery_imname] = sim
name2gt[gallery_imname] = gt
sims.extend(list(sim))
imgs_cbgm.extend([gallery_imname] * len(sim))
# 2. Go through the remaining gallery images if using full set
if use_full_set:
for gallery_imname in gallery_dataset.imgs:
if gallery_imname in tested:
continue
if gallery_imname not in name_to_det_feat:
continue
det, feat_g = name_to_det_feat[gallery_imname]
# get L2-normalized feature matrix NxD
assert feat_g.size == np.prod(feat_g.shape[:2])
feat_g = feat_g.reshape(feat_g.shape[:2])
# compute cosine similarities
sim = feat_g.dot(feat_q).ravel()
# guaranteed no target query in these gallery images
label = np.zeros(len(sim), dtype=np.int32)
y_true.extend(list(label))
y_score.extend(list(sim))
imgs.extend([gallery_imname] * len(sim))
rois.extend(list(det))
if cbgm:
# -------- Context Bipartite Graph Matching (CBGM) ------- #
sims = np.array(sims)
imgs_cbgm = np.array(imgs_cbgm)
# only process the top-k1 gallery images for efficiency
inds = np.argsort(sims)[-k1:]
imgs_cbgm = set(imgs_cbgm[inds])
for img in imgs_cbgm:
sim = name2sim[img]
det, feat_g = name_to_det_feat[img]
# only regard the people with top-k2 detection confidence
# in the query image as context information
qboxes = query_dets[i][:k2]
qfeats = query_feats[i][:k2]
assert (
query_roi - qboxes[0][:4]
).sum() <= 0.001, "query_roi must be the first one in pboxes"
# build the bipartite graph and run Kuhn-Munkres (K-M) algorithm
# to find the best match
graph = []
for indx_i, pfeat in enumerate(qfeats):
for indx_j, gfeat in enumerate(feat_g):
graph.append((indx_i, indx_j, (pfeat * gfeat).sum()))
km_res, max_val = run_kuhn_munkres(graph)
# revise the similarity between query person and its matching
for indx_i, indx_j, _ in km_res:
# 0 denotes the query roi
if indx_i == 0:
sim[indx_j] = max_val
break
for gallery_imname, sim in name2sim.items():
gt = name2gt[gallery_imname]
det, feat_g = name_to_det_feat[gallery_imname]
# assign label for each det
label = np.zeros(len(sim), dtype=np.int32)
if gt.size > 0:
w, h = gt[2], gt[3]
gt[2:] += gt[:2]
query_gt.append({"img": str(gallery_imname), "roi": list(map(float, list(gt)))})
iou_thresh = min(0.5, (w * h * 1.0) / ((w + 10) * (h + 10)))
inds = np.argsort(sim)[::-1]
sim = sim[inds]
det = det[inds]
# only set the first matched det as true positive
for j, roi in enumerate(det[:, :4]):
if _compute_iou(roi, gt) >= iou_thresh:
label[j] = 1
count_tp += 1
break
y_true.extend(list(label))
y_score.extend(list(sim))
imgs.extend([gallery_imname] * len(sim))
rois.extend(list(det))
tested.add(gallery_imname)
# 3. Compute AP for this query (need to scale by recall rate)
y_score = np.asarray(y_score)
y_true = np.asarray(y_true)
assert count_tp <= count_gt
recall_rate = count_tp * 1.0 / count_gt
ap = 0 if count_tp == 0 else average_precision_score(y_true, y_score) * recall_rate
aps.append(ap)
inds = np.argsort(y_score)[::-1]
y_score = y_score[inds]
y_true = y_true[inds]
accs.append([min(1, sum(y_true[:k])) for k in topk])
# 4. Save result for JSON dump
new_entry = {
"query_img": str(query_imname),
"query_roi": list(map(float, list(query_roi))),
"query_gt": query_gt,
"gallery": [],
}
# only record wrong results
if int(y_true[0]):
continue
# only save top-10 predictions
for k in range(10):
new_entry["gallery"].append(
{
"img": str(imgs[inds[k]]),
"roi": list(map(float, list(rois[inds[k]]))),
"score": float(y_score[k]),
"correct": int(y_true[k]),
}
)
ret["results"].append(new_entry)
print("search ranking:")
print(" mAP = {:.2%}".format(np.mean(aps)))
accs = np.mean(accs, axis=0)
for i, k in enumerate(topk):
print(" top-{:2d} = {:.2%}".format(k, accs[i]))
write_json(ret, "vis/results.json")
ret["mAP"] = np.mean(aps)
ret["accs"] = accs
return ret
def eval_search_prw(
gallery_dataset,
query_dataset,
gallery_dets,
gallery_feats,
query_box_feats,
query_dets,
query_feats,
k1=30,
k2=4,
det_thresh=0.5,
cbgm=False,
gallery_size=None, # not used in PRW
ignore_cam_id=True,
):
"""
gallery_det (list of ndarray): n_det x [x1, x2, y1, y2, score] per image
gallery_feat (list of ndarray): n_det x D features per image
query_feat (list of ndarray): D dimensional features per query image
det_thresh (float): filter out gallery detections whose scores below this
gallery_size (int): -1 for using full set
ignore_cam_id (bool): Set to True acoording to CUHK-SYSU,
although it's a common practice to focus on cross-cam match only.
"""
assert len(gallery_dataset) == len(gallery_dets)
assert len(gallery_dataset) == len(gallery_feats)
assert len(query_dataset) == len(query_box_feats)
annos = gallery_dataset.annotations
name_to_det_feat = {}
for anno, det, feat in zip(annos, gallery_dets, gallery_feats):
name = anno["img_name"]
scores = det[:, 4].ravel()
inds = np.where(scores >= det_thresh)[0]
if len(inds) > 0:
name_to_det_feat[name] = (det[inds], feat[inds])
aps = []
accs = []
topk = [1, 5, 10]
ret = {"image_root": gallery_dataset.img_prefix, "results": []}
for i in range(len(query_dataset)):
y_true, y_score = [], []
imgs, rois = [], []
count_gt, count_tp = 0, 0
feat_p = query_box_feats[i].ravel()
query_imname = query_dataset.annotations[i]["img_name"]
query_roi = query_dataset.annotations[i]["boxes"]
query_pid = query_dataset.annotations[i]["pids"]
query_cam = query_dataset.annotations[i]["cam_id"]
# Find all occurence of this query
gallery_imgs = []
for x in annos:
if query_pid in x["pids"] and x["img_name"] != query_imname:
gallery_imgs.append(x)
query_gts = {}
for item in gallery_imgs:
query_gts[item["img_name"]] = item["boxes"][item["pids"] == query_pid]
# Construct gallery set for this query
if ignore_cam_id:
gallery_imgs = []
for x in annos:
if x["img_name"] != query_imname:
gallery_imgs.append(x)
else:
gallery_imgs = []
for x in annos:
if x["img_name"] != query_imname and x["cam_id"] != query_cam:
gallery_imgs.append(x)
name2sim = {}
sims = []
imgs_cbgm = []
# 1. Go through all gallery samples
for item in gallery_imgs:
gallery_imname = item["img_name"]
# some contain the query (gt not empty), some not
count_gt += gallery_imname in query_gts
# compute distance between query and gallery dets
if gallery_imname not in name_to_det_feat:
continue
det, feat_g = name_to_det_feat[gallery_imname]
# get L2-normalized feature matrix NxD
assert feat_g.size == np.prod(feat_g.shape[:2])
feat_g = feat_g.reshape(feat_g.shape[:2])
# compute cosine similarities
sim = feat_g.dot(feat_p).ravel()
if gallery_imname in name2sim:
continue
name2sim[gallery_imname] = sim
sims.extend(list(sim))
imgs_cbgm.extend([gallery_imname] * len(sim))
if cbgm:
sims = np.array(sims)
imgs_cbgm = np.array(imgs_cbgm)
inds = np.argsort(sims)[-k1:]
imgs_cbgm = set(imgs_cbgm[inds])
for img in imgs_cbgm:
sim = name2sim[img]
det, feat_g = name_to_det_feat[img]
qboxes = query_dets[i][:k2]
qfeats = query_feats[i][:k2]
# assert (
# query_roi - qboxes[0][:4]
# ).sum() <= 0.001, "query_roi must be the first one in pboxes"
graph = []
for indx_i, pfeat in enumerate(qfeats):
for indx_j, gfeat in enumerate(feat_g):
graph.append((indx_i, indx_j, (pfeat * gfeat).sum()))
km_res, max_val = run_kuhn_munkres(graph)
for indx_i, indx_j, _ in km_res:
if indx_i == 0:
sim[indx_j] = max_val
break
for gallery_imname, sim in name2sim.items():
det, feat_g = name_to_det_feat[gallery_imname]
# assign label for each det
label = np.zeros(len(sim), dtype=np.int32)
if gallery_imname in query_gts:
gt = query_gts[gallery_imname].ravel()
w, h = gt[2] - gt[0], gt[3] - gt[1]
iou_thresh = min(0.5, (w * h * 1.0) / ((w + 10) * (h + 10)))
inds = np.argsort(sim)[::-1]
sim = sim[inds]
det = det[inds]
# only set the first matched det as true positive
for j, roi in enumerate(det[:, :4]):
if _compute_iou(roi, gt) >= iou_thresh:
label[j] = 1
count_tp += 1
break
y_true.extend(list(label))
y_score.extend(list(sim))
imgs.extend([gallery_imname] * len(sim))
rois.extend(list(det))
# 2. Compute AP for this query (need to scale by recall rate)
y_score = np.asarray(y_score)
y_true = np.asarray(y_true)
assert count_tp <= count_gt
recall_rate = count_tp * 1.0 / count_gt
ap = 0 if count_tp == 0 else average_precision_score(y_true, y_score) * recall_rate
aps.append(ap)
inds = np.argsort(y_score)[::-1]
y_score = y_score[inds]
y_true = y_true[inds]
accs.append([min(1, sum(y_true[:k])) for k in topk])
# 4. Save result for JSON dump
new_entry = {
"query_img": str(query_imname),
"query_roi": list(map(float, list(query_roi.squeeze()))),
"query_gt": query_gts,
"gallery": [],
}
# only save top-10 predictions
for k in range(10):
new_entry["gallery"].append(
{
"img": str(imgs[inds[k]]),
"roi": list(map(float, list(rois[inds[k]]))),
"score": float(y_score[k]),
"correct": int(y_true[k]),
}
)
ret["results"].append(new_entry)
print("search ranking:")
mAP = np.mean(aps)
print(" mAP = {:.2%}".format(mAP))
accs = np.mean(accs, axis=0)
for i, k in enumerate(topk):
print(" top-{:2d} = {:.2%}".format(k, accs[i]))
# write_json(ret, "vis/results.json")
ret["mAP"] = np.mean(aps)
ret["accs"] = accs
return ret
| 18,670 | 37.182004 | 98 | py |
COAT | COAT-main/train.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import argparse
import datetime
import os.path as osp
import time
import torch
import torch.utils.data
from datasets import build_test_loader, build_train_loader
from defaults import get_default_cfg
from engine import evaluate_performance, train_one_epoch
from models.coat import COAT
from utils.utils import mkdir, resume_from_ckpt, save_on_master, set_random_seed
from loss.softmax_loss import SoftmaxLoss
def main(args):
cfg = get_default_cfg()
if args.cfg_file:
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
device = torch.device(cfg.DEVICE)
if cfg.SEED >= 0:
set_random_seed(cfg.SEED)
print("Creating model...")
model = COAT(cfg)
model.to(device)
print("Loading data...")
train_loader = build_train_loader(cfg)
gallery_loader, query_loader = build_test_loader(cfg)
softmax_criterion_s2 = None
softmax_criterion_s3 = None
if cfg.MODEL.LOSS.USE_SOFTMAX:
softmax_criterion_s2 = SoftmaxLoss(cfg)
softmax_criterion_s3 = SoftmaxLoss(cfg)
softmax_criterion_s2.to(device)
softmax_criterion_s3.to(device)
if args.eval:
assert args.ckpt, "--ckpt must be specified when --eval enabled"
resume_from_ckpt(args.ckpt, model)
evaluate_performance(
model,
gallery_loader,
query_loader,
device,
use_gt=cfg.EVAL_USE_GT,
use_cache=cfg.EVAL_USE_CACHE,
use_cbgm=cfg.EVAL_USE_CBGM,
gallery_size=cfg.EVAL_GALLERY_SIZE,
)
exit(0)
params = [p for p in model.parameters() if p.requires_grad]
if cfg.MODEL.LOSS.USE_SOFTMAX:
params_softmax_s2 = [p for p in softmax_criterion_s2.parameters() if p.requires_grad]
params_softmax_s3 = [p for p in softmax_criterion_s3.parameters() if p.requires_grad]
params.extend(params_softmax_s2)
params.extend(params_softmax_s3)
optimizer = torch.optim.SGD(
params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.SGD_MOMENTUM,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=cfg.SOLVER.LR_DECAY_MILESTONES, gamma=cfg.SOLVER.GAMMA
)
start_epoch = 0
if args.resume:
assert args.ckpt, "--ckpt must be specified when --resume enabled"
start_epoch = resume_from_ckpt(args.ckpt, model, optimizer, lr_scheduler) + 1
print("Creating output folder...")
output_dir = cfg.OUTPUT_DIR
mkdir(output_dir)
path = osp.join(output_dir, "config.yaml")
with open(path, "w") as f:
f.write(cfg.dump())
print(f"Full config is saved to {path}")
tfboard = None
if cfg.TF_BOARD:
from torch.utils.tensorboard import SummaryWriter
tf_log_path = osp.join(output_dir, "tf_log")
mkdir(tf_log_path)
tfboard = SummaryWriter(log_dir=tf_log_path)
print(f"TensorBoard files are saved to {tf_log_path}")
print("Start training...")
start_time = time.time()
for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCHS):
train_one_epoch(cfg, model, optimizer, train_loader, device, epoch, tfboard, softmax_criterion_s2, softmax_criterion_s3)
lr_scheduler.step()
# only save the last three checkpoints
if epoch >= cfg.SOLVER.MAX_EPOCHS - 3:
save_on_master(
{
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
},
osp.join(output_dir, f"epoch_{epoch}.pth"),
)
# evaluate the current checkpoint
evaluate_performance(
model,
gallery_loader,
query_loader,
device,
use_gt=cfg.EVAL_USE_GT,
use_cache=cfg.EVAL_USE_CACHE,
use_cbgm=cfg.EVAL_USE_CBGM,
gallery_size=cfg.EVAL_GALLERY_SIZE,
)
if tfboard:
tfboard.close()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Total training time {total_time_str}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train a person search network.")
parser.add_argument("--cfg", dest="cfg_file", help="Path to configuration file.")
parser.add_argument(
"--eval", action="store_true", help="Evaluate the performance of a given checkpoint."
)
parser.add_argument(
"--resume", action="store_true", help="Resume from the specified checkpoint."
)
parser.add_argument("--ckpt", help="Path to checkpoint to resume or evaluate.")
parser.add_argument(
"opts", nargs=argparse.REMAINDER, help="Modify config options using the command-line"
)
args = parser.parse_args()
main(args)
| 5,228 | 32.735484 | 128 | py |
COAT | COAT-main/models/resnet.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
from collections import OrderedDict
import torch.nn.functional as F
import torchvision
from torch import nn
class Backbone(nn.Sequential):
def __init__(self, resnet):
super(Backbone, self).__init__(
OrderedDict(
[
["conv1", resnet.conv1],
["bn1", resnet.bn1],
["relu", resnet.relu],
["maxpool", resnet.maxpool],
["layer1", resnet.layer1], # res2
["layer2", resnet.layer2], # res3
["layer3", resnet.layer3], # res4
]
)
)
self.out_channels = 1024
def forward(self, x):
# using the forward method from nn.Sequential
feat = super(Backbone, self).forward(x)
return OrderedDict([["feat_res4", feat]])
class Res5Head(nn.Sequential):
def __init__(self, resnet):
super(Res5Head, self).__init__(OrderedDict([["layer4", resnet.layer4]])) # res5
self.out_channels = [1024, 2048]
def forward(self, x):
feat = super(Res5Head, self).forward(x)
x = F.adaptive_max_pool2d(x, 1)
feat = F.adaptive_max_pool2d(feat, 1)
return OrderedDict([["feat_res4", x], ["feat_res5", feat]])
def build_resnet(name="resnet50", pretrained=True):
resnet = torchvision.models.resnet.__dict__[name](pretrained=pretrained)
# freeze layers
resnet.conv1.weight.requires_grad_(False)
resnet.bn1.weight.requires_grad_(False)
resnet.bn1.bias.requires_grad_(False)
return Backbone(resnet), Res5Head(resnet)
| 1,800 | 32.351852 | 88 | py |
COAT | COAT-main/models/transformer.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import math
import random
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.mask import exchange_token, exchange_patch, get_mask_box, jigsaw_token, cutout_patch, erase_patch, mixup_patch, jigsaw_patch
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class TransformerHead(nn.Module):
def __init__(
self,
cfg,
trans_names,
kernel_size,
use_feature_mask,
):
super(TransformerHead, self).__init__()
d_model = cfg.MODEL.TRANSFORMER.DIM_MODEL
# Mask parameters
self.use_feature_mask = use_feature_mask
mask_shape = cfg.MODEL.MASK_SHAPE
mask_size = cfg.MODEL.MASK_SIZE
mask_mode = cfg.MODEL.MASK_MODE
self.bypass_mask = exchange_patch(mask_shape, mask_size, mask_mode)
self.get_mask_box = get_mask_box(mask_shape, mask_size, mask_mode)
self.transformer_encoder = Transformers(
cfg=cfg,
trans_names=trans_names,
kernel_size=kernel_size,
use_feature_mask=use_feature_mask,
)
self.conv0 = conv1x1(1024, 1024)
self.conv1 = conv1x1(1024, d_model)
self.conv2 = conv1x1(d_model, 2048)
def forward(self, box_features):
mask_box = self.get_mask_box(box_features)
if self.use_feature_mask:
skip_features = self.conv0(box_features)
if self.training:
skip_features = self.bypass_mask(skip_features)
else:
skip_features = box_features
trans_features = {}
trans_features["before_trans"] = F.adaptive_max_pool2d(skip_features, 1)
box_features = self.conv1(box_features)
box_features = self.transformer_encoder((box_features,mask_box))
box_features = self.conv2(box_features)
trans_features["after_trans"] = F.adaptive_max_pool2d(box_features, 1)
return trans_features
class Transformers(nn.Module):
def __init__(
self,
cfg,
trans_names,
kernel_size,
use_feature_mask,
):
super(Transformers, self).__init__()
d_model = cfg.MODEL.TRANSFORMER.DIM_MODEL
self.feature_aug_type = cfg.MODEL.FEATURE_AUG_TYPE
self.use_feature_mask = use_feature_mask
# If no conv before transformer, we do not use scales
if not cfg.MODEL.TRANSFORMER.USE_PATCH2VEC:
trans_names = ['scale1']
kernel_size = [(1,1)]
self.trans_names = trans_names
self.scale_size = len(self.trans_names)
hidden = d_model//(2*self.scale_size)
# kernel_size: (padding, stride)
kernels = {
(1,1): [(0,0),(1,1)],
(3,3): [(1,1),(1,1)]
}
padding = []
stride = []
for ksize in kernel_size:
if ksize not in [(1,1),(3,3)]:
raise ValueError('Undefined kernel size.')
padding.append(kernels[ksize][0])
stride.append(kernels[ksize][1])
self.use_output_layer = cfg.MODEL.TRANSFORMER.USE_OUTPUT_LAYER
self.use_global_shortcut = cfg.MODEL.TRANSFORMER.USE_GLOBAL_SHORTCUT
self.blocks = nn.ModuleDict()
for tname, ksize, psize, ssize in zip(self.trans_names, kernel_size, padding, stride):
transblock = Transformer(
cfg, d_model//self.scale_size, ksize, psize, ssize, hidden, use_feature_mask
)
self.blocks[tname] = nn.Sequential(transblock)
self.output_linear = nn.Sequential(
nn.Conv2d(d_model, d_model, kernel_size=3, padding=1),
nn.LeakyReLU(0.2, inplace=True)
)
self.mask_para = [cfg.MODEL.MASK_SHAPE, cfg.MODEL.MASK_SIZE, cfg.MODEL.MASK_MODE]
def forward(self, inputs):
trans_feat = []
enc_feat, mask_box = inputs
if self.training and self.use_feature_mask and self.feature_aug_type == 'exchange_patch':
feature_mask = exchange_patch(self.mask_para[0], self.mask_para[1], self.mask_para[2])
enc_feat = feature_mask(enc_feat)
for tname, feat in zip(self.trans_names, torch.chunk(enc_feat, len(self.trans_names), dim=1)):
feat = self.blocks[tname]((feat, mask_box))
trans_feat.append(feat)
trans_feat = torch.cat(trans_feat, 1)
if self.use_output_layer:
trans_feat = self.output_linear(trans_feat)
if self.use_global_shortcut:
trans_feat = enc_feat + trans_feat
return trans_feat
class Transformer(nn.Module):
def __init__(self, cfg, channel, kernel_size, padding, stride, hidden, use_feature_mask
):
super(Transformer, self).__init__()
self.k = kernel_size[0]
stack_num = cfg.MODEL.TRANSFORMER.ENCODER_LAYERS
num_head = cfg.MODEL.TRANSFORMER.N_HEAD
dropout = cfg.MODEL.TRANSFORMER.DROPOUT
output_size = (14,14)
token_size = tuple(map(lambda x,y:x//y, output_size, stride))
blocks = []
self.transblock = TransformerBlock(token_size, hidden=hidden, num_head=num_head, dropout=dropout)
for _ in range(stack_num):
blocks.append(self.transblock)
self.transformer = nn.Sequential(*blocks)
self.patch2vec = nn.Conv2d(channel, hidden, kernel_size=kernel_size, stride=stride, padding=padding)
self.vec2patch = Vec2Patch(channel, hidden, output_size, kernel_size, stride, padding)
self.use_local_shortcut = cfg.MODEL.TRANSFORMER.USE_LOCAL_SHORTCUT
self.use_feature_mask = use_feature_mask
self.feature_aug_type = cfg.MODEL.FEATURE_AUG_TYPE
self.use_patch2vec = cfg.MODEL.TRANSFORMER.USE_PATCH2VEC
def forward(self, inputs):
enc_feat, mask_box = inputs
b, c, h, w = enc_feat.size()
trans_feat = self.patch2vec(enc_feat)
_, c, h, w = trans_feat.size()
trans_feat = trans_feat.view(b, c, -1).permute(0, 2, 1)
# For 1x1 & 3x3 kernels, exchange tokens
if self.training and self.use_feature_mask:
if self.feature_aug_type == 'exchange_token':
feature_mask = exchange_token()
trans_feat = feature_mask(trans_feat, mask_box)
elif self.feature_aug_type == 'cutout_patch':
feature_mask = cutout_patch()
trans_feat = feature_mask(trans_feat)
elif self.feature_aug_type == 'erase_patch':
feature_mask = erase_patch()
trans_feat = feature_mask(trans_feat)
elif self.feature_aug_type == 'mixup_patch':
feature_mask = mixup_patch()
trans_feat = feature_mask(trans_feat)
if self.use_feature_mask:
if self.feature_aug_type == 'jigsaw_patch':
feature_mask = jigsaw_patch()
trans_feat = feature_mask(trans_feat)
elif self.feature_aug_type == 'jigsaw_token':
feature_mask = jigsaw_token()
trans_feat = feature_mask(trans_feat)
trans_feat = self.transformer(trans_feat)
trans_feat = self.vec2patch(trans_feat)
if self.use_local_shortcut:
trans_feat = enc_feat + trans_feat
return trans_feat
class TransformerBlock(nn.Module):
"""
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
def __init__(self, tokensize, hidden=128, num_head=4, dropout=0.1):
super().__init__()
self.attention = MultiHeadedAttention(tokensize, d_model=hidden, head=num_head, p=dropout)
self.ffn = FeedForward(hidden, p=dropout)
self.norm1 = nn.LayerNorm(hidden)
self.norm2 = nn.LayerNorm(hidden)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.norm1(x)
x = x + self.dropout(self.attention(x))
y = self.norm2(x)
x = x + self.ffn(y)
return x
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def __init__(self, p=0.1):
super(Attention, self).__init__()
self.dropout = nn.Dropout(p=p)
def forward(self, query, key, value):
scores = torch.matmul(query, key.transpose(-2, -1)
) / math.sqrt(query.size(-1))
p_attn = F.softmax(scores, dim=-1)
p_attn = self.dropout(p_attn)
p_val = torch.matmul(p_attn, value)
return p_val, p_attn
class Vec2Patch(nn.Module):
def __init__(self, channel, hidden, output_size, kernel_size, stride, padding):
super(Vec2Patch, self).__init__()
self.relu = nn.LeakyReLU(0.2, inplace=True)
c_out = reduce((lambda x, y: x * y), kernel_size) * channel
self.embedding = nn.Linear(hidden, c_out)
self.to_patch = torch.nn.Fold(output_size=output_size, kernel_size=kernel_size, stride=stride, padding=padding)
h, w = output_size
def forward(self, x):
feat = self.embedding(x)
b, n, c = feat.size()
feat = feat.permute(0, 2, 1)
feat = self.to_patch(feat)
return feat
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, tokensize, d_model, head, p=0.1):
super().__init__()
self.query_embedding = nn.Linear(d_model, d_model)
self.value_embedding = nn.Linear(d_model, d_model)
self.key_embedding = nn.Linear(d_model, d_model)
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention(p=p)
self.head = head
self.h, self.w = tokensize
def forward(self, x):
b, n, c = x.size()
c_h = c // self.head
key = self.key_embedding(x)
query = self.query_embedding(x)
value = self.value_embedding(x)
key = key.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
query = query.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
value = value.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
att, _ = self.attention(query, key, value)
att = att.permute(0, 2, 1, 3).contiguous().view(b, n, c)
output = self.output_linear(att)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, p=0.1):
super(FeedForward, self).__init__()
self.conv = nn.Sequential(
nn.Linear(d_model, d_model * 4),
nn.ReLU(inplace=True),
nn.Dropout(p=p),
nn.Linear(d_model * 4, d_model),
nn.Dropout(p=p))
def forward(self, x):
x = self.conv(x)
return x
| 10,997 | 35.538206 | 135 | py |
COAT | COAT-main/models/coat.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.ops import MultiScaleRoIAlign
from torchvision.ops import boxes as box_ops
from torchvision.models.detection import _utils as det_utils
from loss.oim import OIMLoss
from models.resnet import build_resnet
from models.transformer import TransformerHead
class COAT(nn.Module):
def __init__(self, cfg):
super(COAT, self).__init__()
backbone, _ = build_resnet(name="resnet50", pretrained=True)
anchor_generator = AnchorGenerator(
sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)
)
head = RPNHead(
in_channels=backbone.out_channels,
num_anchors=anchor_generator.num_anchors_per_location()[0],
)
pre_nms_top_n = dict(
training=cfg.MODEL.RPN.PRE_NMS_TOPN_TRAIN, testing=cfg.MODEL.RPN.PRE_NMS_TOPN_TEST
)
post_nms_top_n = dict(
training=cfg.MODEL.RPN.POST_NMS_TOPN_TRAIN, testing=cfg.MODEL.RPN.POST_NMS_TOPN_TEST
)
rpn = RegionProposalNetwork(
anchor_generator=anchor_generator,
head=head,
fg_iou_thresh=cfg.MODEL.RPN.POS_THRESH_TRAIN,
bg_iou_thresh=cfg.MODEL.RPN.NEG_THRESH_TRAIN,
batch_size_per_image=cfg.MODEL.RPN.BATCH_SIZE_TRAIN,
positive_fraction=cfg.MODEL.RPN.POS_FRAC_TRAIN,
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=cfg.MODEL.RPN.NMS_THRESH,
)
box_head = TransformerHead(
cfg=cfg,
trans_names=cfg.MODEL.TRANSFORMER.NAMES_1ST,
kernel_size=cfg.MODEL.TRANSFORMER.KERNEL_SIZE_1ST,
use_feature_mask=cfg.MODEL.TRANSFORMER.USE_MASK_1ST,
)
box_head_2nd = TransformerHead(
cfg=cfg,
trans_names=cfg.MODEL.TRANSFORMER.NAMES_2ND,
kernel_size=cfg.MODEL.TRANSFORMER.KERNEL_SIZE_2ND,
use_feature_mask=cfg.MODEL.TRANSFORMER.USE_MASK_2ND,
)
box_head_3rd = TransformerHead(
cfg=cfg,
trans_names=cfg.MODEL.TRANSFORMER.NAMES_3RD,
kernel_size=cfg.MODEL.TRANSFORMER.KERNEL_SIZE_3RD,
use_feature_mask=cfg.MODEL.TRANSFORMER.USE_MASK_3RD,
)
faster_rcnn_predictor = FastRCNNPredictor(2048, 2)
box_roi_pool = MultiScaleRoIAlign(
featmap_names=["feat_res4"], output_size=14, sampling_ratio=2
)
box_predictor = BBoxRegressor(2048, num_classes=2, bn_neck=cfg.MODEL.ROI_HEAD.BN_NECK)
roi_heads = CascadedROIHeads(
cfg=cfg,
# Cascade Transformer Head
faster_rcnn_predictor=faster_rcnn_predictor,
box_head_2nd=box_head_2nd,
box_head_3rd=box_head_3rd,
# parent class
box_roi_pool=box_roi_pool,
box_head=box_head,
box_predictor=box_predictor,
fg_iou_thresh=cfg.MODEL.ROI_HEAD.POS_THRESH_TRAIN,
bg_iou_thresh=cfg.MODEL.ROI_HEAD.NEG_THRESH_TRAIN,
batch_size_per_image=cfg.MODEL.ROI_HEAD.BATCH_SIZE_TRAIN,
positive_fraction=cfg.MODEL.ROI_HEAD.POS_FRAC_TRAIN,
bbox_reg_weights=None,
score_thresh=cfg.MODEL.ROI_HEAD.SCORE_THRESH_TEST,
nms_thresh=cfg.MODEL.ROI_HEAD.NMS_THRESH_TEST,
detections_per_img=cfg.MODEL.ROI_HEAD.DETECTIONS_PER_IMAGE_TEST,
)
transform = GeneralizedRCNNTransform(
min_size=cfg.INPUT.MIN_SIZE,
max_size=cfg.INPUT.MAX_SIZE,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
)
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
self.transform = transform
self.eval_feat = cfg.EVAL_FEATURE
# loss weights
self.lw_rpn_reg = cfg.SOLVER.LW_RPN_REG
self.lw_rpn_cls = cfg.SOLVER.LW_RPN_CLS
self.lw_rcnn_reg_1st = cfg.SOLVER.LW_RCNN_REG_1ST
self.lw_rcnn_cls_1st = cfg.SOLVER.LW_RCNN_CLS_1ST
self.lw_rcnn_reg_2nd = cfg.SOLVER.LW_RCNN_REG_2ND
self.lw_rcnn_cls_2nd = cfg.SOLVER.LW_RCNN_CLS_2ND
self.lw_rcnn_reg_3rd = cfg.SOLVER.LW_RCNN_REG_3RD
self.lw_rcnn_cls_3rd = cfg.SOLVER.LW_RCNN_CLS_3RD
self.lw_rcnn_reid_2nd = cfg.SOLVER.LW_RCNN_REID_2ND
self.lw_rcnn_reid_3rd = cfg.SOLVER.LW_RCNN_REID_3RD
def inference(self, images, targets=None, query_img_as_gallery=False):
original_image_sizes = [img.shape[-2:] for img in images]
images, targets = self.transform(images, targets)
features = self.backbone(images.tensors)
if query_img_as_gallery:
assert targets is not None
if targets is not None and not query_img_as_gallery:
# query
boxes = [t["boxes"] for t in targets]
box_features = self.roi_heads.box_roi_pool(features, boxes, images.image_sizes)
box_features_2nd = self.roi_heads.box_head_2nd(box_features)
embeddings_2nd, _ = self.roi_heads.embedding_head_2nd(box_features_2nd)
box_features_3rd = self.roi_heads.box_head_3rd(box_features)
embeddings_3rd, _ = self.roi_heads.embedding_head_3rd(box_features_3rd)
if self.eval_feat == 'concat':
embeddings = torch.cat((embeddings_2nd, embeddings_3rd), dim=1)
elif self.eval_feat == 'stage2':
embeddings = embeddings_2nd
elif self.eval_feat == 'stage3':
embeddings = embeddings_3rd
else:
raise Exception("Unknown evaluation feature name")
return embeddings.split(1, 0)
else:
# gallery
boxes, _ = self.rpn(images, features, targets)
detections = self.roi_heads(features, boxes, images.image_sizes, targets, query_img_as_gallery)[0]
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
return detections
def forward(self, images, targets=None, query_img_as_gallery=False):
if not self.training:
return self.inference(images, targets, query_img_as_gallery)
images, targets = self.transform(images, targets)
features = self.backbone(images.tensors)
boxes, rpn_losses = self.rpn(images, features, targets)
_, rcnn_losses, feats_reid_2nd, targets_reid_2nd, feats_reid_3rd, targets_reid_3rd = self.roi_heads(features, boxes, images.image_sizes, targets)
# rename rpn losses to be consistent with detection losses
rpn_losses["loss_rpn_reg"] = rpn_losses.pop("loss_rpn_box_reg")
rpn_losses["loss_rpn_cls"] = rpn_losses.pop("loss_objectness")
losses = {}
losses.update(rcnn_losses)
losses.update(rpn_losses)
# apply loss weights
losses["loss_rpn_reg"] *= self.lw_rpn_reg
losses["loss_rpn_cls"] *= self.lw_rpn_cls
losses["loss_rcnn_reg_1st"] *= self.lw_rcnn_reg_1st
losses["loss_rcnn_cls_1st"] *= self.lw_rcnn_cls_1st
losses["loss_rcnn_reg_2nd"] *= self.lw_rcnn_reg_2nd
losses["loss_rcnn_cls_2nd"] *= self.lw_rcnn_cls_2nd
losses["loss_rcnn_reg_3rd"] *= self.lw_rcnn_reg_3rd
losses["loss_rcnn_cls_3rd"] *= self.lw_rcnn_cls_3rd
losses["loss_rcnn_reid_2nd"] *= self.lw_rcnn_reid_2nd
losses["loss_rcnn_reid_3rd"] *= self.lw_rcnn_reid_3rd
return losses, feats_reid_2nd, targets_reid_2nd, feats_reid_3rd, targets_reid_3rd
class CascadedROIHeads(RoIHeads):
'''
https://github.com/pytorch/vision/blob/master/torchvision/models/detection/roi_heads.py
'''
def __init__(
self,
cfg,
faster_rcnn_predictor,
box_head_2nd,
box_head_3rd,
*args,
**kwargs
):
super(CascadedROIHeads, self).__init__(*args, **kwargs)
# ROI head
self.use_diff_thresh=cfg.MODEL.ROI_HEAD.USE_DIFF_THRESH
self.nms_thresh_1st = cfg.MODEL.ROI_HEAD.NMS_THRESH_TEST_1ST
self.nms_thresh_2nd = cfg.MODEL.ROI_HEAD.NMS_THRESH_TEST_2ND
self.nms_thresh_3rd = cfg.MODEL.ROI_HEAD.NMS_THRESH_TEST_3RD
self.fg_iou_thresh_1st = cfg.MODEL.ROI_HEAD.POS_THRESH_TRAIN
self.bg_iou_thresh_1st = cfg.MODEL.ROI_HEAD.NEG_THRESH_TRAIN
self.fg_iou_thresh_2nd = cfg.MODEL.ROI_HEAD.POS_THRESH_TRAIN_2ND
self.bg_iou_thresh_2nd = cfg.MODEL.ROI_HEAD.NEG_THRESH_TRAIN_2ND
self.fg_iou_thresh_3rd = cfg.MODEL.ROI_HEAD.POS_THRESH_TRAIN_3RD
self.bg_iou_thresh_3rd = cfg.MODEL.ROI_HEAD.NEG_THRESH_TRAIN_3RD
# Regression head
self.box_predictor_1st = faster_rcnn_predictor
self.box_predictor_2nd = self.box_predictor
self.box_predictor_3rd = deepcopy(self.box_predictor)
# Transformer head
self.box_head_1st = self.box_head
self.box_head_2nd = box_head_2nd
self.box_head_3rd = box_head_3rd
# feature mask
self.use_feature_mask = cfg.MODEL.USE_FEATURE_MASK
self.feature_mask_size = cfg.MODEL.FEATURE_MASK_SIZE
# Feature embedding
embedding_dim = cfg.MODEL.EMBEDDING_DIM
self.embedding_head_2nd = NormAwareEmbedding(featmap_names=["before_trans", "after_trans"], in_channels=[1024, 2048], dim=embedding_dim)
self.embedding_head_3rd = deepcopy(self.embedding_head_2nd)
# OIM
num_pids = cfg.MODEL.LOSS.LUT_SIZE
num_cq_size = cfg.MODEL.LOSS.CQ_SIZE
oim_momentum = cfg.MODEL.LOSS.OIM_MOMENTUM
oim_scalar = cfg.MODEL.LOSS.OIM_SCALAR
self.reid_loss_2nd = OIMLoss(embedding_dim, num_pids, num_cq_size, oim_momentum, oim_scalar)
self.reid_loss_3rd = deepcopy(self.reid_loss_2nd)
# rename the method inherited from parent class
self.postprocess_proposals = self.postprocess_detections
# evaluation
self.eval_feat = cfg.EVAL_FEATURE
def forward(self, features, boxes, image_shapes, targets=None, query_img_as_gallery=False):
"""
Arguments:
features (List[Tensor])
boxes (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
cws = True
gt_det_2nd = None
gt_det_3rd = None
feats_reid_2nd = None
feats_reid_3rd = None
targets_reid_2nd = None
targets_reid_3rd = None
if self.training:
if self.use_diff_thresh:
self.proposal_matcher = det_utils.Matcher(
self.fg_iou_thresh_1st,
self.bg_iou_thresh_1st,
allow_low_quality_matches=False)
boxes, _, box_pid_labels_1st, box_reg_targets_1st = self.select_training_samples(
boxes, targets
)
# ------------------- The first stage ------------------ #
box_features_1st = self.box_roi_pool(features, boxes, image_shapes)
box_features_1st = self.box_head_1st(box_features_1st)
box_cls_scores_1st, box_regs_1st = self.box_predictor_1st(box_features_1st["after_trans"])
if self.training:
boxes = self.get_boxes(box_regs_1st, boxes, image_shapes)
boxes = [boxes_per_image.detach() for boxes_per_image in boxes]
if self.use_diff_thresh:
self.proposal_matcher = det_utils.Matcher(
self.fg_iou_thresh_2nd,
self.bg_iou_thresh_2nd,
allow_low_quality_matches=False)
boxes, _, box_pid_labels_2nd, box_reg_targets_2nd = self.select_training_samples(boxes, targets)
else:
orig_thresh = self.nms_thresh # 0.4
self.nms_thresh = self.nms_thresh_1st
boxes, scores, _ = self.postprocess_proposals(
box_cls_scores_1st, box_regs_1st, boxes, image_shapes
)
if not self.training and query_img_as_gallery:
# When regarding the query image as gallery, GT boxes may be excluded
# from detected boxes. To avoid this, we compulsorily include GT in the
# detection results. Additionally, CWS should be disabled as the
# confidences of these people in query image are 1
cws = False
gt_box = [targets[0]["boxes"]]
gt_box_features = self.box_roi_pool(features, gt_box, image_shapes)
gt_box_features = self.box_head_2nd(gt_box_features)
embeddings, _ = self.embedding_head_2nd(gt_box_features)
gt_det_2nd = {"boxes": targets[0]["boxes"], "embeddings": embeddings}
# no detection predicted by Faster R-CNN head in test phase
if boxes[0].shape[0] == 0:
assert not self.training
boxes = gt_det_2nd["boxes"] if gt_det_2nd else torch.zeros(0, 4)
labels = torch.ones(1).type_as(boxes) if gt_det_2nd else torch.zeros(0)
scores = torch.ones(1).type_as(boxes) if gt_det_2nd else torch.zeros(0)
if self.eval_feat == 'concat':
embeddings = torch.cat((gt_det_2nd["embeddings"], gt_det_2nd["embeddings"]), dim=1) if gt_det_2nd else torch.zeros(0, 512)
elif self.eval_feat == 'stage2' or self.eval_feat == 'stage3':
embeddings = gt_det_2nd["embeddings"] if gt_det_2nd else torch.zeros(0, 256)
else:
raise Exception("Unknown evaluation feature name")
return [dict(boxes=boxes, labels=labels, scores=scores, embeddings=embeddings)], []
# --------------------- The second stage -------------------- #
box_features = self.box_roi_pool(features, boxes, image_shapes)
box_features = self.box_head_2nd(box_features)
box_regs_2nd = self.box_predictor_2nd(box_features["after_trans"])
box_embeddings_2nd, box_cls_scores_2nd = self.embedding_head_2nd(box_features)
if box_cls_scores_2nd.dim() == 0:
box_cls_scores_2nd = box_cls_scores_2nd.unsqueeze(0)
if self.training:
boxes = self.get_boxes(box_regs_2nd, boxes, image_shapes)
boxes = [boxes_per_image.detach() for boxes_per_image in boxes]
if self.use_diff_thresh:
self.proposal_matcher = det_utils.Matcher(
self.fg_iou_thresh_3rd,
self.bg_iou_thresh_3rd,
allow_low_quality_matches=False)
boxes, _, box_pid_labels_3rd, box_reg_targets_3rd = self.select_training_samples(boxes, targets)
else:
self.nms_thresh = self.nms_thresh_2nd
if self.eval_feat != 'stage2':
boxes, scores, _, _ = self.postprocess_boxes(
box_cls_scores_2nd,
box_regs_2nd,
box_embeddings_2nd,
boxes,
image_shapes,
fcs=scores,
gt_det=None,
cws=cws,
)
if not self.training and query_img_as_gallery and self.eval_feat != 'stage2':
cws = False
gt_box = [targets[0]["boxes"]]
gt_box_features = self.box_roi_pool(features, gt_box, image_shapes)
gt_box_features = self.box_head_3rd(gt_box_features)
embeddings, _ = self.embedding_head_3rd(gt_box_features)
gt_det_3rd = {"boxes": targets[0]["boxes"], "embeddings": embeddings}
# no detection predicted by Faster R-CNN head in test phase
if boxes[0].shape[0] == 0 and self.eval_feat != 'stage2':
assert not self.training
boxes = gt_det_3rd["boxes"] if gt_det_3rd else torch.zeros(0, 4)
labels = torch.ones(1).type_as(boxes) if gt_det_3rd else torch.zeros(0)
scores = torch.ones(1).type_as(boxes) if gt_det_3rd else torch.zeros(0)
if self.eval_feat == 'concat':
embeddings = torch.cat((gt_det_2nd["embeddings"], gt_det_3rd["embeddings"]), dim=1) if gt_det_3rd else torch.zeros(0, 512)
elif self.eval_feat == 'stage3':
embeddings = gt_det_2nd["embeddings"] if gt_det_3rd else torch.zeros(0, 256)
else:
raise Exception("Unknown evaluation feature name")
return [dict(boxes=boxes, labels=labels, scores=scores, embeddings=embeddings)], []
# --------------------- The third stage -------------------- #
box_features = self.box_roi_pool(features, boxes, image_shapes)
if not self.training:
box_features_2nd = self.box_head_2nd(box_features)
box_embeddings_2nd, _ = self.embedding_head_2nd(box_features_2nd)
box_features = self.box_head_3rd(box_features)
box_regs_3rd = self.box_predictor_3rd(box_features["after_trans"])
box_embeddings_3rd, box_cls_scores_3rd = self.embedding_head_3rd(box_features)
if box_cls_scores_3rd.dim() == 0:
box_cls_scores_3rd = box_cls_scores_3rd.unsqueeze(0)
result, losses = [], {}
if self.training:
box_labels_1st = [y.clamp(0, 1) for y in box_pid_labels_1st]
box_labels_2nd = [y.clamp(0, 1) for y in box_pid_labels_2nd]
box_labels_3rd = [y.clamp(0, 1) for y in box_pid_labels_3rd]
losses = detection_losses(
box_cls_scores_1st,
box_regs_1st,
box_labels_1st,
box_reg_targets_1st,
box_cls_scores_2nd,
box_regs_2nd,
box_labels_2nd,
box_reg_targets_2nd,
box_cls_scores_3rd,
box_regs_3rd,
box_labels_3rd,
box_reg_targets_3rd,
)
loss_rcnn_reid_2nd, feats_reid_2nd, targets_reid_2nd = self.reid_loss_2nd(box_embeddings_2nd, box_pid_labels_2nd)
loss_rcnn_reid_3rd, feats_reid_3rd, targets_reid_3rd = self.reid_loss_3rd(box_embeddings_3rd, box_pid_labels_3rd)
losses.update(loss_rcnn_reid_2nd=loss_rcnn_reid_2nd)
losses.update(loss_rcnn_reid_3rd=loss_rcnn_reid_3rd)
else:
if self.eval_feat == 'stage2':
boxes, scores, embeddings_2nd, labels = self.postprocess_boxes(
box_cls_scores_2nd,
box_regs_2nd,
box_embeddings_2nd,
boxes,
image_shapes,
fcs=scores,
gt_det=gt_det_2nd,
cws=cws,
)
else:
self.nms_thresh = self.nms_thresh_3rd
_, _, embeddings_2nd, _ = self.postprocess_boxes(
box_cls_scores_3rd,
box_regs_3rd,
box_embeddings_2nd,
boxes,
image_shapes,
fcs=scores,
gt_det=gt_det_2nd,
cws=cws,
)
boxes, scores, embeddings_3rd, labels = self.postprocess_boxes(
box_cls_scores_3rd,
box_regs_3rd,
box_embeddings_3rd,
boxes,
image_shapes,
fcs=scores,
gt_det=gt_det_3rd,
cws=cws,
)
# set to original thresh after finishing postprocess
self.nms_thresh = orig_thresh
num_images = len(boxes)
for i in range(num_images):
if self.eval_feat == 'concat':
embeddings = torch.cat((embeddings_2nd[i],embeddings_3rd[i]), dim=1)
elif self.eval_feat == 'stage2':
embeddings = embeddings_2nd[i]
elif self.eval_feat == 'stage3':
embeddings = embeddings_3rd[i]
else:
raise Exception("Unknown evaluation feature name")
result.append(
dict(
boxes=boxes[i],
labels=labels[i],
scores=scores[i],
embeddings=embeddings
)
)
return result, losses, feats_reid_2nd, targets_reid_2nd, feats_reid_3rd, targets_reid_3rd
def get_boxes(self, box_regression, proposals, image_shapes):
"""
Get boxes from proposals.
"""
boxes_per_image = [len(boxes_in_image) for boxes_in_image in proposals]
pred_boxes = self.box_coder.decode(box_regression, proposals)
pred_boxes = pred_boxes.split(boxes_per_image, 0)
all_boxes = []
for boxes, image_shape in zip(pred_boxes, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# remove predictions with the background label
boxes = boxes[:, 1:].reshape(-1, 4)
all_boxes.append(boxes)
return all_boxes
def postprocess_boxes(
self,
class_logits,
box_regression,
embeddings,
proposals,
image_shapes,
fcs=None,
gt_det=None,
cws=True,
):
"""
Similar to RoIHeads.postprocess_detections, but can handle embeddings and implement
First Classification Score (FCS).
"""
device = class_logits.device
boxes_per_image = [len(boxes_in_image) for boxes_in_image in proposals]
pred_boxes = self.box_coder.decode(box_regression, proposals)
if fcs is not None:
# Fist Classification Score (FCS)
pred_scores = fcs[0]
else:
pred_scores = torch.sigmoid(class_logits)
if cws:
# Confidence Weighted Similarity (CWS)
embeddings = embeddings * pred_scores.view(-1, 1)
# split boxes and scores per image
pred_boxes = pred_boxes.split(boxes_per_image, 0)
pred_scores = pred_scores.split(boxes_per_image, 0)
pred_embeddings = embeddings.split(boxes_per_image, 0)
all_boxes = []
all_scores = []
all_labels = []
all_embeddings = []
for boxes, scores, embeddings, image_shape in zip(
pred_boxes, pred_scores, pred_embeddings, image_shapes
):
boxes = box_ops.clip_boxes_to_image(boxes, image_shape)
# create labels for each prediction
labels = torch.ones(scores.size(0), device=device)
# remove predictions with the background label
boxes = boxes[:, 1:]
scores = scores.unsqueeze(1)
labels = labels.unsqueeze(1)
# batch everything, by making every class prediction be a separate instance
boxes = boxes.reshape(-1, 4)
scores = scores.flatten()
labels = labels.flatten()
embeddings = embeddings.reshape(-1, self.embedding_head_2nd.dim)
# remove low scoring boxes
inds = torch.nonzero(scores > self.score_thresh).squeeze(1)
boxes, scores, labels, embeddings = (
boxes[inds],
scores[inds],
labels[inds],
embeddings[inds],
)
# remove empty boxes
keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)
boxes, scores, labels, embeddings = (
boxes[keep],
scores[keep],
labels[keep],
embeddings[keep],
)
if gt_det is not None:
# include GT into the detection results
boxes = torch.cat((boxes, gt_det["boxes"]), dim=0)
labels = torch.cat((labels, torch.tensor([1.0]).to(device)), dim=0)
scores = torch.cat((scores, torch.tensor([1.0]).to(device)), dim=0)
embeddings = torch.cat((embeddings, gt_det["embeddings"]), dim=0)
# non-maximum suppression, independently done per class
keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.detections_per_img]
boxes, scores, labels, embeddings = (
boxes[keep],
scores[keep],
labels[keep],
embeddings[keep],
)
all_boxes.append(boxes)
all_scores.append(scores)
all_labels.append(labels)
all_embeddings.append(embeddings)
return all_boxes, all_scores, all_embeddings, all_labels
class NormAwareEmbedding(nn.Module):
"""
Implements the Norm-Aware Embedding proposed in
Chen, Di, et al. "Norm-aware embedding for efficient person search." CVPR 2020.
"""
def __init__(self, featmap_names=["feat_res4", "feat_res5"], in_channels=[1024, 2048], dim=256):
super(NormAwareEmbedding, self).__init__()
self.featmap_names = featmap_names
self.in_channels = in_channels
self.dim = dim
self.projectors = nn.ModuleDict()
indv_dims = self._split_embedding_dim()
for ftname, in_channel, indv_dim in zip(self.featmap_names, self.in_channels, indv_dims):
proj = nn.Sequential(nn.Linear(in_channel, indv_dim), nn.BatchNorm1d(indv_dim))
init.normal_(proj[0].weight, std=0.01)
init.normal_(proj[1].weight, std=0.01)
init.constant_(proj[0].bias, 0)
init.constant_(proj[1].bias, 0)
self.projectors[ftname] = proj
self.rescaler = nn.BatchNorm1d(1, affine=True)
def forward(self, featmaps):
"""
Arguments:
featmaps: OrderedDict[Tensor], and in featmap_names you can choose which
featmaps to use
Returns:
tensor of size (BatchSize, dim), L2 normalized embeddings.
tensor of size (BatchSize, ) rescaled norm of embeddings, as class_logits.
"""
assert len(featmaps) == len(self.featmap_names)
if len(featmaps) == 1:
k, v = featmaps.items()[0]
v = self._flatten_fc_input(v)
embeddings = self.projectors[k](v)
norms = embeddings.norm(2, 1, keepdim=True)
embeddings = embeddings / norms.expand_as(embeddings).clamp(min=1e-12)
norms = self.rescaler(norms).squeeze()
return embeddings, norms
else:
outputs = []
for k, v in featmaps.items():
v = self._flatten_fc_input(v)
outputs.append(self.projectors[k](v))
embeddings = torch.cat(outputs, dim=1)
norms = embeddings.norm(2, 1, keepdim=True)
embeddings = embeddings / norms.expand_as(embeddings).clamp(min=1e-12)
norms = self.rescaler(norms).squeeze()
return embeddings, norms
def _flatten_fc_input(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
return x.flatten(start_dim=1)
return x
def _split_embedding_dim(self):
parts = len(self.in_channels)
tmp = [self.dim // parts] * parts
if sum(tmp) == self.dim:
return tmp
else:
res = self.dim % parts
for i in range(1, res + 1):
tmp[-i] += 1
assert sum(tmp) == self.dim
return tmp
class BBoxRegressor(nn.Module):
"""
Bounding box regression layer.
"""
def __init__(self, in_channels, num_classes=2, bn_neck=True):
"""
Args:
in_channels (int): Input channels.
num_classes (int, optional): Defaults to 2 (background and pedestrian).
bn_neck (bool, optional): Whether to use BN after Linear. Defaults to True.
"""
super(BBoxRegressor, self).__init__()
if bn_neck:
self.bbox_pred = nn.Sequential(
nn.Linear(in_channels, 4 * num_classes), nn.BatchNorm1d(4 * num_classes)
)
init.normal_(self.bbox_pred[0].weight, std=0.01)
init.normal_(self.bbox_pred[1].weight, std=0.01)
init.constant_(self.bbox_pred[0].bias, 0)
init.constant_(self.bbox_pred[1].bias, 0)
else:
self.bbox_pred = nn.Linear(in_channels, 4 * num_classes)
init.normal_(self.bbox_pred.weight, std=0.01)
init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
if list(x.shape[2:]) != [1, 1]:
x = F.adaptive_avg_pool2d(x, output_size=1)
x = x.flatten(start_dim=1)
bbox_deltas = self.bbox_pred(x)
return bbox_deltas
def detection_losses(
box_cls_scores_1st,
box_regs_1st,
box_labels_1st,
box_reg_targets_1st,
box_cls_scores_2nd,
box_regs_2nd,
box_labels_2nd,
box_reg_targets_2nd,
box_cls_scores_3rd,
box_regs_3rd,
box_labels_3rd,
box_reg_targets_3rd,
):
# --------------------- The first stage -------------------- #
box_labels_1st = torch.cat(box_labels_1st, dim=0)
box_reg_targets_1st = torch.cat(box_reg_targets_1st, dim=0)
loss_rcnn_cls_1st = F.cross_entropy(box_cls_scores_1st, box_labels_1st)
# get indices that correspond to the regression targets for the
# corresponding ground truth labels, to be used with advanced indexing
sampled_pos_inds_subset = torch.nonzero(box_labels_1st > 0).squeeze(1)
labels_pos = box_labels_1st[sampled_pos_inds_subset]
N = box_cls_scores_1st.size(0)
box_regs_1st = box_regs_1st.reshape(N, -1, 4)
loss_rcnn_reg_1st = F.smooth_l1_loss(
box_regs_1st[sampled_pos_inds_subset, labels_pos],
box_reg_targets_1st[sampled_pos_inds_subset],
reduction="sum",
)
loss_rcnn_reg_1st = loss_rcnn_reg_1st / box_labels_1st.numel()
# --------------------- The second stage -------------------- #
box_labels_2nd = torch.cat(box_labels_2nd, dim=0)
box_reg_targets_2nd = torch.cat(box_reg_targets_2nd, dim=0)
loss_rcnn_cls_2nd = F.binary_cross_entropy_with_logits(box_cls_scores_2nd, box_labels_2nd.float())
sampled_pos_inds_subset = torch.nonzero(box_labels_2nd > 0).squeeze(1)
labels_pos = box_labels_2nd[sampled_pos_inds_subset]
N = box_cls_scores_2nd.size(0)
box_regs_2nd = box_regs_2nd.reshape(N, -1, 4)
loss_rcnn_reg_2nd = F.smooth_l1_loss(
box_regs_2nd[sampled_pos_inds_subset, labels_pos],
box_reg_targets_2nd[sampled_pos_inds_subset],
reduction="sum",
)
loss_rcnn_reg_2nd = loss_rcnn_reg_2nd / box_labels_2nd.numel()
# --------------------- The third stage -------------------- #
box_labels_3rd = torch.cat(box_labels_3rd, dim=0)
box_reg_targets_3rd = torch.cat(box_reg_targets_3rd, dim=0)
loss_rcnn_cls_3rd = F.binary_cross_entropy_with_logits(box_cls_scores_3rd, box_labels_3rd.float())
sampled_pos_inds_subset = torch.nonzero(box_labels_3rd > 0).squeeze(1)
labels_pos = box_labels_3rd[sampled_pos_inds_subset]
N = box_cls_scores_3rd.size(0)
box_regs_3rd = box_regs_3rd.reshape(N, -1, 4)
loss_rcnn_reg_3rd = F.smooth_l1_loss(
box_regs_3rd[sampled_pos_inds_subset, labels_pos],
box_reg_targets_3rd[sampled_pos_inds_subset],
reduction="sum",
)
loss_rcnn_reg_3rd = loss_rcnn_reg_3rd / box_labels_3rd.numel()
return dict(
loss_rcnn_cls_1st=loss_rcnn_cls_1st,
loss_rcnn_reg_1st=loss_rcnn_reg_1st,
loss_rcnn_cls_2nd=loss_rcnn_cls_2nd,
loss_rcnn_reg_2nd=loss_rcnn_reg_2nd,
loss_rcnn_cls_3rd=loss_rcnn_cls_3rd,
loss_rcnn_reg_3rd=loss_rcnn_reg_3rd,
)
| 32,368 | 41.25718 | 153 | py |
COAT | COAT-main/datasets/base.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import torch
from PIL import Image
class BaseDataset:
"""
Base class of person search dataset.
"""
def __init__(self, root, transforms, split):
self.root = root
self.transforms = transforms
self.split = split
assert self.split in ("train", "gallery", "query")
self.annotations = self._load_annotations()
def _load_annotations(self):
"""
For each image, load its annotation that is a dictionary with the following keys:
img_name (str): image name
img_path (str): image path
boxes (np.array[N, 4]): ground-truth boxes in (x1, y1, x2, y2) format
pids (np.array[N]): person IDs corresponding to these boxes
cam_id (int): camera ID (only for PRW dataset)
"""
raise NotImplementedError
def __getitem__(self, index):
anno = self.annotations[index]
img = Image.open(anno["img_path"]).convert("RGB")
boxes = torch.as_tensor(anno["boxes"], dtype=torch.float32)
labels = torch.as_tensor(anno["pids"], dtype=torch.int64)
target = {"img_name": anno["img_name"], "boxes": boxes, "labels": labels}
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.annotations)
| 1,559 | 35.27907 | 89 | py |
COAT | COAT-main/datasets/prw.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import os.path as osp
import re
import numpy as np
from scipy.io import loadmat
from .base import BaseDataset
class PRW(BaseDataset):
def __init__(self, root, transforms, split):
self.name = "PRW"
self.img_prefix = osp.join(root, "frames")
super(PRW, self).__init__(root, transforms, split)
def _get_cam_id(self, img_name):
match = re.search(r"c\d", img_name).group().replace("c", "")
return int(match)
def _load_queries(self):
query_info = osp.join(self.root, "query_info.txt")
with open(query_info, "rb") as f:
raw = f.readlines()
queries = []
for line in raw:
linelist = str(line, "utf-8").split(" ")
pid = int(linelist[0])
x, y, w, h = (
float(linelist[1]),
float(linelist[2]),
float(linelist[3]),
float(linelist[4]),
)
roi = np.array([x, y, x + w, y + h]).astype(np.int32)
roi = np.clip(roi, 0, None) # several coordinates are negative
img_name = linelist[5][:-2] + ".jpg"
queries.append(
{
"img_name": img_name,
"img_path": osp.join(self.img_prefix, img_name),
"boxes": roi[np.newaxis, :],
"pids": np.array([pid]),
"cam_id": self._get_cam_id(img_name),
}
)
return queries
def _load_split_img_names(self):
"""
Load the image names for the specific split.
"""
assert self.split in ("train", "gallery")
if self.split == "train":
imgs = loadmat(osp.join(self.root, "frame_train.mat"))["img_index_train"]
else:
imgs = loadmat(osp.join(self.root, "frame_test.mat"))["img_index_test"]
return [img[0][0] + ".jpg" for img in imgs]
def _load_annotations(self):
if self.split == "query":
return self._load_queries()
annotations = []
imgs = self._load_split_img_names()
for img_name in imgs:
anno_path = osp.join(self.root, "annotations", img_name)
anno = loadmat(anno_path)
box_key = "box_new"
if box_key not in anno.keys():
box_key = "anno_file"
if box_key not in anno.keys():
box_key = "anno_previous"
rois = anno[box_key][:, 1:]
ids = anno[box_key][:, 0]
rois = np.clip(rois, 0, None) # several coordinates are negative
assert len(rois) == len(ids)
rois[:, 2:] += rois[:, :2]
ids[ids == -2] = 5555 # assign pid = 5555 for unlabeled people
annotations.append(
{
"img_name": img_name,
"img_path": osp.join(self.img_prefix, img_name),
"boxes": rois.astype(np.int32),
# (training pids) 1, 2,..., 478, 480, 481, 482, 483, 932, 5555
"pids": ids.astype(np.int32),
"cam_id": self._get_cam_id(img_name),
}
)
return annotations
| 3,413 | 33.836735 | 85 | py |
COAT | COAT-main/datasets/cuhk_sysu.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import os.path as osp
import numpy as np
from scipy.io import loadmat
from .base import BaseDataset
class CUHKSYSU(BaseDataset):
def __init__(self, root, transforms, split):
self.name = "CUHK-SYSU"
self.img_prefix = osp.join(root, "Image", "SSM")
super(CUHKSYSU, self).__init__(root, transforms, split)
def _load_queries(self):
# TestG50: a test protocol, 50 gallery images per query
protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
protoc = protoc["TestG50"].squeeze()
queries = []
for item in protoc["Query"]:
img_name = str(item["imname"][0, 0][0])
roi = item["idlocate"][0, 0][0].astype(np.int32)
roi[2:] += roi[:2]
queries.append(
{
"img_name": img_name,
"img_path": osp.join(self.img_prefix, img_name),
"boxes": roi[np.newaxis, :],
"pids": np.array([-100]), # dummy pid
}
)
return queries
def _load_split_img_names(self):
"""
Load the image names for the specific split.
"""
assert self.split in ("train", "gallery")
# gallery images
gallery_imgs = loadmat(osp.join(self.root, "annotation", "pool.mat"))
gallery_imgs = gallery_imgs["pool"].squeeze()
gallery_imgs = [str(a[0]) for a in gallery_imgs]
if self.split == "gallery":
return gallery_imgs
# all images
all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
all_imgs = all_imgs["Img"].squeeze()
all_imgs = [str(a[0][0]) for a in all_imgs]
# training images = all images - gallery images
training_imgs = sorted(list(set(all_imgs) - set(gallery_imgs)))
return training_imgs
def _load_annotations(self):
if self.split == "query":
return self._load_queries()
# load all images and build a dict from image to boxes
all_imgs = loadmat(osp.join(self.root, "annotation", "Images.mat"))
all_imgs = all_imgs["Img"].squeeze()
name_to_boxes = {}
name_to_pids = {}
unlabeled_pid = 5555 # default pid for unlabeled people
for img_name, _, boxes in all_imgs:
img_name = str(img_name[0])
boxes = np.asarray([b[0] for b in boxes[0]])
boxes = boxes.reshape(boxes.shape[0], 4) # (x1, y1, w, h)
valid_index = np.where((boxes[:, 2] > 0) & (boxes[:, 3] > 0))[0]
assert valid_index.size > 0, "Warning: {} has no valid boxes.".format(img_name)
boxes = boxes[valid_index]
name_to_boxes[img_name] = boxes.astype(np.int32)
name_to_pids[img_name] = unlabeled_pid * np.ones(boxes.shape[0], dtype=np.int32)
def set_box_pid(boxes, box, pids, pid):
for i in range(boxes.shape[0]):
if np.all(boxes[i] == box):
pids[i] = pid
return
# assign a unique pid from 1 to N for each identity
if self.split == "train":
train = loadmat(osp.join(self.root, "annotation/test/train_test/Train.mat"))
train = train["Train"].squeeze()
for index, item in enumerate(train):
scenes = item[0, 0][2].squeeze()
for img_name, box, _ in scenes:
img_name = str(img_name[0])
box = box.squeeze().astype(np.int32)
set_box_pid(name_to_boxes[img_name], box, name_to_pids[img_name], index + 1)
else:
protoc = loadmat(osp.join(self.root, "annotation/test/train_test/TestG50.mat"))
protoc = protoc["TestG50"].squeeze()
for index, item in enumerate(protoc):
# query
im_name = str(item["Query"][0, 0][0][0])
box = item["Query"][0, 0][1].squeeze().astype(np.int32)
set_box_pid(name_to_boxes[im_name], box, name_to_pids[im_name], index + 1)
# gallery
gallery = item["Gallery"].squeeze()
for im_name, box, _ in gallery:
im_name = str(im_name[0])
if box.size == 0:
break
box = box.squeeze().astype(np.int32)
set_box_pid(name_to_boxes[im_name], box, name_to_pids[im_name], index + 1)
annotations = []
imgs = self._load_split_img_names()
for img_name in imgs:
boxes = name_to_boxes[img_name]
boxes[:, 2:] += boxes[:, :2] # (x1, y1, w, h) -> (x1, y1, x2, y2)
pids = name_to_pids[img_name]
annotations.append(
{
"img_name": img_name,
"img_path": osp.join(self.img_prefix, img_name),
"boxes": boxes,
"pids": pids,
}
)
return annotations
| 5,228 | 41.860656 | 96 | py |
COAT | COAT-main/datasets/__init__.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
from .build import build_test_loader, build_train_loader
| 250 | 40.833333 | 66 | py |
COAT | COAT-main/datasets/build.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import torch
from utils.transforms import build_transforms
from utils.utils import create_small_table
from .cuhk_sysu import CUHKSYSU
from .prw import PRW
def print_statistics(dataset):
"""
Print dataset statistics.
"""
num_imgs = len(dataset.annotations)
num_boxes = 0
pid_set = set()
for anno in dataset.annotations:
num_boxes += anno["boxes"].shape[0]
for pid in anno["pids"]:
pid_set.add(pid)
statistics = {
"dataset": dataset.name,
"split": dataset.split,
"num_images": num_imgs,
"num_boxes": num_boxes,
}
if dataset.name != "CUHK-SYSU" or dataset.split != "query":
pid_list = sorted(list(pid_set))
if dataset.split == "query":
num_pids, min_pid, max_pid = len(pid_list), min(pid_list), max(pid_list)
statistics.update(
{
"num_labeled_pids": num_pids,
"min_labeled_pid": int(min_pid),
"max_labeled_pid": int(max_pid),
}
)
else:
unlabeled_pid = pid_list[-1]
pid_list = pid_list[:-1] # remove unlabeled pid
num_pids, min_pid, max_pid = len(pid_list), min(pid_list), max(pid_list)
statistics.update(
{
"num_labeled_pids": num_pids,
"min_labeled_pid": int(min_pid),
"max_labeled_pid": int(max_pid),
"unlabeled_pid": int(unlabeled_pid),
}
)
print(f"=> {dataset.name}-{dataset.split} loaded:\n" + create_small_table(statistics))
def build_dataset(dataset_name, root, transforms, split, verbose=True):
if dataset_name == "CUHK-SYSU":
dataset = CUHKSYSU(root, transforms, split)
elif dataset_name == "PRW":
dataset = PRW(root, transforms, split)
else:
raise NotImplementedError(f"Unknow dataset: {dataset_name}")
if verbose:
print_statistics(dataset)
return dataset
def collate_fn(batch):
return tuple(zip(*batch))
def build_train_loader(cfg):
transforms = build_transforms(cfg, is_train=True)
dataset = build_dataset(cfg.INPUT.DATASET, cfg.INPUT.DATA_ROOT, transforms, "train")
return torch.utils.data.DataLoader(
dataset,
batch_size=cfg.INPUT.BATCH_SIZE_TRAIN,
shuffle=True,
num_workers=cfg.INPUT.NUM_WORKERS_TRAIN,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn,
)
def build_test_loader(cfg):
transforms = build_transforms(cfg, is_train=False)
gallery_set = build_dataset(cfg.INPUT.DATASET, cfg.INPUT.DATA_ROOT, transforms, "gallery")
query_set = build_dataset(cfg.INPUT.DATASET, cfg.INPUT.DATA_ROOT, transforms, "query")
gallery_loader = torch.utils.data.DataLoader(
gallery_set,
batch_size=cfg.INPUT.BATCH_SIZE_TEST,
shuffle=False,
num_workers=cfg.INPUT.NUM_WORKERS_TEST,
pin_memory=True,
collate_fn=collate_fn,
)
query_loader = torch.utils.data.DataLoader(
query_set,
batch_size=cfg.INPUT.BATCH_SIZE_TEST,
shuffle=False,
num_workers=cfg.INPUT.NUM_WORKERS_TEST,
pin_memory=True,
collate_fn=collate_fn,
)
return gallery_loader, query_loader
| 3,537 | 32.695238 | 94 | py |
COAT | COAT-main/loss/softmax_loss.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import torch
from torch import nn
import torch.nn.functional as F
class SoftmaxLoss(nn.Module):
def __init__(self, cfg):
super(SoftmaxLoss, self).__init__()
self.feat_dim = cfg.MODEL.EMBEDDING_DIM
self.num_classes = cfg.MODEL.LOSS.LUT_SIZE
self.bottleneck = nn.BatchNorm1d(self.feat_dim)
self.bottleneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(self.feat_dim, self.num_classes, bias=False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier.apply(weights_init_classifier)
def forward(self, inputs, labels):
"""
Args:
inputs: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (num_classes).
"""
assert inputs.size(0) == labels.size(0), "features.size(0) is not equal to labels.size(0)"
target = labels.clone()
target[target >= self.num_classes] = 5554
feat = self.bottleneck(inputs)
score = self.classifier(feat)
loss = F.cross_entropy(score, target, ignore_index=5554)
return loss
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
| 2,087 | 32.142857 | 98 | py |
COAT | COAT-main/loss/oim.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import torch
import torch.nn.functional as F
from torch import autograd, nn
class OIM(autograd.Function):
@staticmethod
def forward(ctx, inputs, targets, lut, cq, header, momentum):
ctx.save_for_backward(inputs, targets, lut, cq, header, momentum)
outputs_labeled = inputs.mm(lut.t())
outputs_unlabeled = inputs.mm(cq.t())
return torch.cat([outputs_labeled, outputs_unlabeled], dim=1)
@staticmethod
def backward(ctx, grad_outputs):
inputs, targets, lut, cq, header, momentum = ctx.saved_tensors
grad_inputs = None
if ctx.needs_input_grad[0]:
grad_inputs = grad_outputs.mm(torch.cat([lut, cq], dim=0))
if grad_inputs.dtype == torch.float16:
grad_inputs = grad_inputs.to(torch.float32)
for x, y in zip(inputs, targets):
if y < len(lut):
lut[y] = momentum * lut[y] + (1.0 - momentum) * x
lut[y] /= lut[y].norm()
else:
cq[header] = x
header = (header + 1) % cq.size(0)
return grad_inputs, None, None, None, None, None
def oim(inputs, targets, lut, cq, header, momentum=0.5):
return OIM.apply(inputs, targets, lut, cq, torch.tensor(header), torch.tensor(momentum))
class OIMLoss(nn.Module):
def __init__(self, num_features, num_pids, num_cq_size, oim_momentum, oim_scalar):
super(OIMLoss, self).__init__()
self.num_features = num_features
self.num_pids = num_pids
self.num_unlabeled = num_cq_size
self.momentum = oim_momentum
self.oim_scalar = oim_scalar
self.register_buffer("lut", torch.zeros(self.num_pids, self.num_features))
self.register_buffer("cq", torch.zeros(self.num_unlabeled, self.num_features))
self.header_cq = 0
def forward(self, inputs, roi_label):
# merge into one batch, background label = 0
targets = torch.cat(roi_label)
label = targets - 1 # background label = -1
inds = label >= 0
label = label[inds]
inputs = inputs[inds.unsqueeze(1).expand_as(inputs)].view(-1, self.num_features)
projected = oim(inputs, label, self.lut, self.cq, self.header_cq, momentum=self.momentum)
# projected - Tensor [M, lut+cq], e.g., [M, 482+500]=[M, 982]
projected *= self.oim_scalar
self.header_cq = (
self.header_cq + (label >= self.num_pids).long().sum().item()
) % self.num_unlabeled
loss_oim = F.cross_entropy(projected, label, ignore_index=5554)
return loss_oim, inputs, label
| 2,848 | 36 | 97 | py |
COAT | COAT-main/utils/utils.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import datetime
import errno
import json
import os
import os.path as osp
import pickle
import random
import time
from collections import defaultdict, deque
import numpy as np
import torch
import torch.distributed as dist
from tabulate import tabulate
# -------------------------------------------------------- #
# Logger #
# -------------------------------------------------------- #
class SmoothedValue(object):
"""
Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
# -------------------------------------------------------- #
# Distributed training #
# -------------------------------------------------------- #
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
# -------------------------------------------------------- #
# File operation #
# -------------------------------------------------------- #
def filename(path):
return osp.splitext(osp.basename(path))[0]
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def read_json(fpath):
with open(fpath, "r") as f:
obj = json.load(f)
return obj
def write_json(obj, fpath):
mkdir(osp.dirname(fpath))
_obj = obj.copy()
for k, v in _obj.items():
if isinstance(v, np.ndarray):
_obj.pop(k)
with open(fpath, "w") as f:
json.dump(_obj, f, indent=4, separators=(",", ": "))
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
# -------------------------------------------------------- #
# Misc #
# -------------------------------------------------------- #
def create_small_table(small_dict):
"""
Create a small table using the keys of small_dict as headers. This is only
suitable for small dictionaries.
Args:
small_dict (dict): a result dictionary of only a few items.
Returns:
str: the table as a string.
"""
keys, values = tuple(zip(*small_dict.items()))
table = tabulate(
[values],
headers=keys,
tablefmt="pipe",
floatfmt=".3f",
stralign="center",
numalign="center",
)
return table
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def resume_from_ckpt(ckpt_path, model, optimizer=None, lr_scheduler=None):
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt["model"], strict=False)
if optimizer is not None:
optimizer.load_state_dict(ckpt["optimizer"])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(ckpt["lr_scheduler"])
print(f"loaded checkpoint {ckpt_path}")
print(f"model was trained for {ckpt['epoch']} epochs")
return ckpt["epoch"]
def set_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
| 13,088 | 28.951945 | 99 | py |
COAT | COAT-main/utils/km.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import random
import numpy as np
zero_threshold = 0.00000001
class KMNode(object):
def __init__(self, id, exception=0, match=None, visit=False):
self.id = id
self.exception = exception
self.match = match
self.visit = visit
class KuhnMunkres(object):
def __init__(self):
self.matrix = None
self.x_nodes = []
self.y_nodes = []
self.minz = float("inf")
self.x_length = 0
self.y_length = 0
self.index_x = 0
self.index_y = 1
def __del__(self):
pass
def set_matrix(self, x_y_values):
xs = set()
ys = set()
for x, y, value in x_y_values:
xs.add(x)
ys.add(y)
if len(xs) < len(ys):
self.index_x = 0
self.index_y = 1
else:
self.index_x = 1
self.index_y = 0
xs, ys = ys, xs
x_dic = {x: i for i, x in enumerate(xs)}
y_dic = {y: j for j, y in enumerate(ys)}
self.x_nodes = [KMNode(x) for x in xs]
self.y_nodes = [KMNode(y) for y in ys]
self.x_length = len(xs)
self.y_length = len(ys)
self.matrix = np.zeros((self.x_length, self.y_length))
for row in x_y_values:
x = row[self.index_x]
y = row[self.index_y]
value = row[2]
x_index = x_dic[x]
y_index = y_dic[y]
self.matrix[x_index, y_index] = value
for i in range(self.x_length):
self.x_nodes[i].exception = max(self.matrix[i, :])
def km(self):
for i in range(self.x_length):
while True:
self.minz = float("inf")
self.set_false(self.x_nodes)
self.set_false(self.y_nodes)
if self.dfs(i):
break
self.change_exception(self.x_nodes, -self.minz)
self.change_exception(self.y_nodes, self.minz)
def dfs(self, i):
x_node = self.x_nodes[i]
x_node.visit = True
for j in range(self.y_length):
y_node = self.y_nodes[j]
if not y_node.visit:
t = x_node.exception + y_node.exception - self.matrix[i][j]
if abs(t) < zero_threshold:
y_node.visit = True
if y_node.match is None or self.dfs(y_node.match):
x_node.match = j
y_node.match = i
return True
else:
if t >= zero_threshold:
self.minz = min(self.minz, t)
return False
def set_false(self, nodes):
for node in nodes:
node.visit = False
def change_exception(self, nodes, change):
for node in nodes:
if node.visit:
node.exception += change
def get_connect_result(self):
ret = []
for i in range(self.x_length):
x_node = self.x_nodes[i]
j = x_node.match
y_node = self.y_nodes[j]
x_id = x_node.id
y_id = y_node.id
value = self.matrix[i][j]
if self.index_x == 1 and self.index_y == 0:
x_id, y_id = y_id, x_id
ret.append((x_id, y_id, value))
return ret
def get_max_value_result(self):
ret = -100
for i in range(self.x_length):
j = self.x_nodes[i].match
ret = max(ret, self.matrix[i][j])
return ret
def run_kuhn_munkres(x_y_values):
process = KuhnMunkres()
process.set_matrix(x_y_values)
process.km()
return process.get_connect_result(), process.get_max_value_result()
def test():
values = []
random.seed(0)
for i in range(500):
for j in range(1000):
value = random.random()
values.append((i, j, value))
return run_kuhn_munkres(values)
if __name__ == "__main__":
values = [(1, 1, 3), (1, 3, 4), (2, 1, 2), (2, 2, 1), (2, 3, 3), (3, 2, 4), (3, 3, 5)]
print(run_kuhn_munkres(values))
| 4,297 | 27.463576 | 90 | py |
COAT | COAT-main/utils/mask.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import random
import torch
class exchange_token:
def __init__(self):
pass
def __call__(self, features, mask_box):
b, hw, c = features.size()
assert hw == 14*14
new_idx, mask_x1, mask_x2, mask_y1, mask_y2 = mask_box
features = features.view(b, 14, 14, c)
features[:, mask_x1 : mask_x2, mask_y1 : mask_y2, :] = features[new_idx, mask_x1 : mask_x2, mask_y1 : mask_y2, :]
features = features.view(b, hw, c)
return features
class jigsaw_token:
def __init__(self, shift=5, group=2, begin=1):
self.shift = shift
self.group = group
self.begin = begin
def __call__(self, features):
batchsize = features.size(0)
dim = features.size(2)
num_tokens = features.size(1)
if num_tokens == 196:
self.group = 2
elif num_tokens == 25:
self.group = 5
else:
raise Exception("Jigsaw - Unwanted number of tokens")
# Shift Operation
feature_random = torch.cat([features[:, self.begin-1+self.shift:, :], features[:, self.begin-1:self.begin-1+self.shift, :]], dim=1)
x = feature_random
# Patch Shuffle Operation
try:
x = x.view(batchsize, self.group, -1, dim)
except:
raise Exception("Jigsaw - Unwanted number of groups")
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, -1, dim)
return x
class get_mask_box:
def __init__(self, shape='stripe', mask_size=2, mode='random_direct'):
self.shape = shape
self.mask_size = mask_size
self.mode = mode
def __call__(self, features):
# Stripe mask
if self.shape == 'stripe':
if self.mode == 'horizontal':
mask_box = self.hstripe(features, self.mask_size)
elif self.mode == 'vertical':
mask_box = self.vstripe(features, self.mask_size)
elif self.mode == 'random_direction':
if random.random() < 0.5:
mask_box = self.hstripe(features, self.mask_size)
else:
mask_box = self.vstripe(features, self.mask_size)
else:
raise Exception("Unknown stripe mask mode name")
# Square mask
elif self.shape == 'square':
if self.mode == 'random_size':
self.mask_size = 4 if random.random() < 0.5 else 5
mask_box = self.square(features, self.mask_size)
# Random stripe/square mask
elif self.shape == 'random':
random_num = random.random()
if random_num < 0.25:
mask_box = self.hstripe(features, 2)
elif random_num < 0.5 and random_num >= 0.25:
mask_box = self.vstripe(features, 2)
elif random_num < 0.75 and random_num >= 0.5:
mask_box = self.square(features, 4)
else:
mask_box = self.square(features, 5)
else:
raise Exception("Unknown mask shape name")
return mask_box
def hstripe(self, features, mask_size):
"""
"""
# horizontal stripe
mask_x1 = 0
mask_x2 = features.shape[2]
y1_max = features.shape[3] - mask_size
mask_y1 = torch.randint(y1_max, (1,))
mask_y2 = mask_y1 + mask_size
new_idx = torch.randperm(features.shape[0])
mask_box = (new_idx, mask_x1, mask_x2, mask_y1, mask_y2)
return mask_box
def vstripe(self, features, mask_size):
"""
"""
# vertical stripe
mask_y1 = 0
mask_y2 = features.shape[3]
x1_max = features.shape[2] - mask_size
mask_x1 = torch.randint(x1_max, (1,))
mask_x2 = mask_x1 + mask_size
new_idx = torch.randperm(features.shape[0])
mask_box = (new_idx, mask_x1, mask_x2, mask_y1, mask_y2)
return mask_box
def square(self, features, mask_size):
"""
"""
# square
x1_max = features.shape[2] - mask_size
y1_max = features.shape[3] - mask_size
mask_x1 = torch.randint(x1_max, (1,))
mask_y1 = torch.randint(y1_max, (1,))
mask_x2 = mask_x1 + mask_size
mask_y2 = mask_y1 + mask_size
new_idx = torch.randperm(features.shape[0])
mask_box = (new_idx, mask_x1, mask_x2, mask_y1, mask_y2)
return mask_box
class exchange_patch:
def __init__(self, shape='stripe', mask_size=2, mode='random_direct'):
self.shape = shape
self.mask_size = mask_size
self.mode = mode
def __call__(self, features):
# Stripe mask
if self.shape == 'stripe':
if self.mode == 'horizontal':
features = self.xpatch_hstripe(features, self.mask_size)
elif self.mode == 'vertical':
features = self.xpatch_vstripe(features, self.mask_size)
elif self.mode == 'random_direction':
if random.random() < 0.5:
features = self.xpatch_hstripe(features, self.mask_size)
else:
features = self.xpatch_vstripe(features, self.mask_size)
else:
raise Exception("Unknown stripe mask mode name")
# Square mask
elif self.shape == 'square':
if self.mode == 'random_size':
self.mask_size = 4 if random.random() < 0.5 else 5
features = self.xpatch_square(features, self.mask_size)
# Random stripe/square mask
elif self.shape == 'random':
random_num = random.random()
if random_num < 0.25:
features = self.xpatch_hstripe(features, 2)
elif random_num < 0.5 and random_num >= 0.25:
features = self.xpatch_vstripe(features, 2)
elif random_num < 0.75 and random_num >= 0.5:
features = self.xpatch_square(features, 4)
else:
features = self.xpatch_square(features, 5)
else:
raise Exception("Unknown mask shape name")
return features
def xpatch_hstripe(self, features, mask_size):
"""
"""
# horizontal stripe
y1_max = features.shape[3] - mask_size
num_masks = 1
for i in range(num_masks):
mask_y1 = torch.randint(y1_max, (1,))
mask_y2 = mask_y1 + mask_size
new_idx = torch.randperm(features.shape[0])
features[:, :, :, mask_y1 : mask_y2] = features[new_idx, :, :, mask_y1 : mask_y2]
return features
def xpatch_vstripe(self, features, mask_size):
"""
"""
# vertical stripe
x1_max = features.shape[2] - mask_size
num_masks = 1
for i in range(num_masks):
mask_x1 = torch.randint(x1_max, (1,))
mask_x2 = mask_x1 + mask_size
new_idx = torch.randperm(features.shape[0])
features[:, :, mask_x1 : mask_x2, :] = features[new_idx, :, mask_x1 : mask_x2, :]
return features
def xpatch_square(self, features, mask_size):
"""
"""
# square
x1_max = features.shape[2] - mask_size
y1_max = features.shape[3] - mask_size
num_masks = 1
for i in range(num_masks):
mask_x1 = torch.randint(x1_max, (1,))
mask_y1 = torch.randint(y1_max, (1,))
mask_x2 = mask_x1 + mask_size
mask_y2 = mask_y1 + mask_size
new_idx = torch.randperm(features.shape[0])
features[:, :, mask_x1 : mask_x2, mask_y1 : mask_y2] = features[new_idx, :, mask_x1 : mask_x2, mask_y1 : mask_y2]
return features
class cutout_patch:
def __init__(self, mask_size=2):
self.mask_size = mask_size
def __call__(self, features):
if random.random() < 0.5:
y1_max = features.shape[3] - self.mask_size
num_masks = 1
for i in range(num_masks):
mask_y1 = torch.randint(y1_max, (features.shape[0],))
mask_y2 = mask_y1 + self.mask_size
for k in range(features.shape[0]):
features[k, :, :, mask_y1[k] : mask_y2[k]] = 0
else:
x1_max = features.shape[3] - self.mask_size
num_masks = 1
for i in range(num_masks):
mask_x1 = torch.randint(x1_max, (features.shape[0],))
mask_x2 = mask_x1 + self.mask_size
for k in range(features.shape[0]):
features[k, :, mask_x1[k] : mask_x2[k], :] = 0
return features
class erase_patch:
def __init__(self, mask_size=2):
self.mask_size = mask_size
def __call__(self, features):
std, mean = torch.std_mean(features.detach())
dim = features.shape[1]
if random.random() < 0.5:
y1_max = features.shape[3] - self.mask_size
num_masks = 1
for i in range(num_masks):
mask_y1 = torch.randint(y1_max, (features.shape[0],))
mask_y2 = mask_y1 + self.mask_size
for k in range(features.shape[0]):
features[k, :, :, mask_y1[k] : mask_y2[k]] = torch.normal(mean.repeat(dim,14,2), std.repeat(dim,14,2))
else:
x1_max = features.shape[3] - self.mask_size
num_masks = 1
for i in range(num_masks):
mask_x1 = torch.randint(x1_max, (features.shape[0],))
mask_x2 = mask_x1 + self.mask_size
for k in range(features.shape[0]):
features[k, :, mask_x1[k] : mask_x2[k], :] = torch.normal(mean.repeat(dim,2,14), std.repeat(dim,2,14))
return features
class mixup_patch:
def __init__(self, mask_size=2):
self.mask_size = mask_size
def __call__(self, features):
lam = random.uniform(0, 1)
if random.random() < 0.5:
y1_max = features.shape[3] - self.mask_size
num_masks = 1
for i in range(num_masks):
mask_y1 = torch.randint(y1_max, (1,))
mask_y2 = mask_y1 + self.mask_size
new_idx = torch.randperm(features.shape[0])
features[:, :, :, mask_y1 : mask_y2] = lam*features[:, :, :, mask_y1 : mask_y2] + (1-lam)*features[new_idx, :, :, mask_y1 : mask_y2]
else:
x1_max = features.shape[2] - self.mask_size
num_masks = 1
for i in range(num_masks):
mask_x1 = torch.randint(x1_max, (1,))
mask_x2 = mask_x1 + self.mask_size
new_idx = torch.randperm(features.shape[0])
features[:, :, mask_x1 : mask_x2, :] = lam*features[:, :, mask_x1 : mask_x2, :] + (1-lam)*features[new_idx, :, mask_x1 : mask_x2, :]
return features
class jigsaw_patch:
def __init__(self, shift=5, group=2):
self.shift = shift
self.group = group
def __call__(self, features):
batchsize = features.size(0)
dim = features.size(1)
features = features.view(batchsize, dim, -1)
# Shift Operation
feature_random = torch.cat([features[:, :, self.shift:], features[:, :, :self.shift]], dim=2)
x = feature_random
# Patch Shuffle Operation
try:
x = x.view(batchsize, dim, self.group, -1)
except:
x = torch.cat([x, x[:, -2:-1, :]], dim=1)
x = x.view(batchsize, self.group, -1, dim)
x = torch.transpose(x, 2, 3).contiguous()
x = x.view(batchsize, dim, -1)
x = x.view(batchsize, dim, 14, 14)
return x
| 11,890 | 35.47546 | 148 | py |
COAT | COAT-main/utils/transforms.py | # This file is part of COAT, and is distributed under the
# OSI-approved BSD 3-Clause License. See top-level LICENSE file or
# https://github.com/Kitware/COAT/blob/master/LICENSE for details.
import random
import math
import torch
import numpy as np
from copy import deepcopy
from torchvision.transforms import functional as F
def mixup_data(images, alpha=0.8):
if alpha > 0. and alpha < 1.:
lam = random.uniform(alpha, 1)
else:
lam = 1.
batch_size = len(images)
min_x = 9999
min_y = 9999
for i in range(batch_size):
min_x = min(min_x, images[i].shape[1])
min_y = min(min_y, images[i].shape[2])
shuffle_images = deepcopy(images)
random.shuffle(shuffle_images)
mixed_images = deepcopy(images)
for i in range(batch_size):
mixed_images[i][:, :min_x, :min_y] = lam * images[i][:, :min_x, :min_y] + (1 - lam) * shuffle_images[i][:, :min_x, :min_y]
return mixed_images
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip:
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
return image, target
class Cutout(object):
"""Randomly mask out one or more patches from an image.
https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes=2, length=100):
self.n_holes = n_holes
self.length = length
def __call__(self, img, target):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img = img * mask
return img, target
class RandomErasing(object):
'''
https://github.com/zhunzhong07/CamStyle/blob/master/reid/utils/data/transforms.py
'''
def __init__(self, EPSILON=0.5, mean=[0.485, 0.456, 0.406]):
self.EPSILON = EPSILON
self.mean = mean
def __call__(self, img, target):
if random.uniform(0, 1) > self.EPSILON:
return img, target
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(0.02, 0.2) * area
aspect_ratio = random.uniform(0.3, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size()[2] and h <= img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
return img, target
return img, target
class ToTensor:
def __call__(self, image, target):
# convert [0, 255] to [0, 1]
image = F.to_tensor(image)
return image, target
def build_transforms(cfg, is_train):
transforms = []
transforms.append(ToTensor())
if is_train:
transforms.append(RandomHorizontalFlip())
if cfg.INPUT.IMAGE_CUTOUT:
transforms.append(Cutout())
if cfg.INPUT.IMAGE_ERASE:
transforms.append(RandomErasing())
return Compose(transforms)
| 4,443 | 29.648276 | 130 | py |
keras | keras-master/keras/backend_config.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend config API."""
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
@keras_export('keras.backend.epsilon')
@tf.__internal__.dispatch.add_dispatch_support
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> tf.keras.backend.epsilon()
1e-07
"""
return _EPSILON
@keras_export('keras.backend.set_epsilon')
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Args:
value: float. New value of epsilon.
Example:
>>> tf.keras.backend.epsilon()
1e-07
>>> tf.keras.backend.set_epsilon(1e-5)
>>> tf.keras.backend.epsilon()
1e-05
>>> tf.keras.backend.set_epsilon(1e-7)
"""
global _EPSILON
_EPSILON = value
@keras_export('keras.backend.floatx')
def floatx():
"""Returns the default float type, as a string.
E.g. `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> tf.keras.backend.floatx()
'float32'
"""
return _FLOATX
@keras_export('keras.backend.set_floatx')
def set_floatx(value):
"""Sets the default float type.
Note: It is not recommended to set this to float16 for training, as this will
likely cause numeric stability issues. Instead, mixed precision, which is
using a mix of float16 and float32, can be used by calling
`tf.keras.mixed_precision.experimental.set_policy('mixed_float16')`. See the
[mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for details.
Args:
value: String; `'float16'`, `'float32'`, or `'float64'`.
Example:
>>> tf.keras.backend.floatx()
'float32'
>>> tf.keras.backend.set_floatx('float64')
>>> tf.keras.backend.floatx()
'float64'
>>> tf.keras.backend.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
accepted_dtypes = {'float16', 'float32', 'float64'}
if value not in accepted_dtypes:
raise ValueError(
f'Unknown `floatx` value: {value}. Expected one of {accepted_dtypes}')
_FLOATX = str(value)
@keras_export('keras.backend.image_data_format')
@tf.__internal__.dispatch.add_dispatch_support
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
>>> tf.keras.backend.image_data_format()
'channels_last'
"""
return _IMAGE_DATA_FORMAT
@keras_export('keras.backend.set_image_data_format')
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Args:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
>>> tf.keras.backend.image_data_format()
'channels_last'
>>> tf.keras.backend.set_image_data_format('channels_first')
>>> tf.keras.backend.image_data_format()
'channels_first'
>>> tf.keras.backend.set_image_data_format('channels_last')
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
accepted_formats = {'channels_last', 'channels_first'}
if data_format not in accepted_formats:
raise ValueError(
f'Unknown `data_format`: {data_format}. '
f'Expected one of {accepted_formats}')
_IMAGE_DATA_FORMAT = str(data_format)
| 4,297 | 27.091503 | 80 | py |
keras | keras-master/keras/metrics_confusion_matrix_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
import tensorflow.compat.v2 as tf
import json
from absl.testing import parameterized
import numpy as np
from keras import combinations
from keras import layers
from keras import metrics
from keras import models
from keras.utils import metrics_utils
from tensorflow.python.platform import tf_logging
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class FalsePositivesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertLen(fp_obj.variables, 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertLen(fp_obj2.variables, 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r'Threshold values must be in \[0, 1\]. Received: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r'Threshold values must be in \[0, 1\]. Received: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class FalseNegativesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertLen(fn_obj.variables, 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertLen(fn_obj2.variables, 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TrueNegativesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertLen(tn_obj.variables, 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertLen(tn_obj2.variables, 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TruePositivesTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertLen(tp_obj.variables, 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertLen(tp_obj2.variables, 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_pred = tf.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = tf.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PrecisionTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertLen(p_obj.variables, 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertLen(p_obj2.variables, 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = tf.random.uniform(shape=(10, 3))
y_true = tf.random.uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = tf.constant([1, 0, 1, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs)
y_true = tf.constant(1 - inputs)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = tf.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = tf.constant([0, 0, 0, 0])
y_true = tf.constant([0, 0, 0, 0])
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = tf.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=tf.float32)
weights = tf.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=tf.float32)
weights = tf.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = tf.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = tf.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = tf.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=tf.constant([[1, 4, 2, 3, 5]])))
y_pred2 = tf.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = tf.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=tf.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = tf.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_pred = tf.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = tf.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_pred = tf.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RecallTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertLen(r_obj.variables, 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertLen(r_obj2.variables, 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = tf.random.uniform(shape=(10, 3))
y_true = tf.random.uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = tf.constant([1, 0, 1, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs)
y_true = tf.constant(1 - inputs)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = tf.constant([0, 0, 0, 0])
y_true = tf.constant([0, 0, 0, 0])
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = tf.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = tf.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=tf.float32)
weights = tf.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = tf.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=tf.float32)
weights = tf.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = tf.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = tf.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = tf.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=tf.constant([[1, 4, 2, 3, 5]])))
y_pred2 = tf.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = tf.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=tf.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = tf.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = tf.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_pred = tf.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = tf.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = tf.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_pred = tf.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = tf.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SensitivityAtSpecificityTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4,
num_thresholds=100,
class_id=12,
name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = tf.random.uniform((10, 3),
maxval=1,
dtype=tf.float32,
seed=1)
y_true = tf.random.uniform((10, 3),
maxval=2,
dtype=tf.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
with self.test_session():
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegex(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, 'Argument `num_thresholds` must be an integer > 0'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SpecificityAtSensitivityTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4,
num_thresholds=100,
class_id=12,
name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = tf.random.uniform((10, 3),
maxval=1,
dtype=tf.float32,
seed=1)
y_true = tf.random.uniform((10, 3),
maxval=2,
dtype=tf.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(1.0)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.2, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.SpecificityAtSensitivity(0.4, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, 'Argument `num_thresholds` must be an integer > 0'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PrecisionAtRecallTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.PrecisionAtRecall(
0.4, num_thresholds=100, class_id=12, name='precision_at_recall_1')
self.assertEqual(s_obj.name, 'precision_at_recall_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.recall, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.PrecisionAtRecall.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'precision_at_recall_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.recall, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.PrecisionAtRecall(0.7)
y_pred = tf.random.uniform((10, 3),
maxval=1,
dtype=tf.float32,
seed=1)
y_true = tf.random.uniform((10, 3),
maxval=2,
dtype=tf.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_precision, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.PrecisionAtRecall(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_recall(self):
s_obj = metrics.PrecisionAtRecall(0.8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# For 0.5 < decision threshold < 0.6.
self.assertAlmostEqual(2.0/3, self.evaluate(result))
def test_unweighted_low_recall(self):
s_obj = metrics.PrecisionAtRecall(0.6)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.PrecisionAtRecall(0.6, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.PrecisionAtRecall(7.0/8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [2, 1, 2, 1, 2, 1, 2, 2, 1, 2]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
# For 0.0 < decision threshold < 0.2.
self.assertAlmostEqual(0.7, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(ValueError,
r'`recall` must be in the range \[0, 1\].'):
metrics.PrecisionAtRecall(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, 'Argument `num_thresholds` must be an integer > 0'):
metrics.PrecisionAtRecall(0.4, num_thresholds=-1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RecallAtPrecisionTest(tf.test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.RecallAtPrecision(
0.4, num_thresholds=100, class_id=12, name='recall_at_precision_1')
self.assertEqual(s_obj.name, 'recall_at_precision_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.precision, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.RecallAtPrecision.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'recall_at_precision_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.precision, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_value_is_idempotent(self):
s_obj = metrics.RecallAtPrecision(0.7)
y_pred = tf.random.uniform((10, 3),
maxval=1,
dtype=tf.float32,
seed=1)
y_true = tf.random.uniform((10, 3),
maxval=2,
dtype=tf.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_recall, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.RecallAtPrecision(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = tf.constant(inputs, dtype=tf.float32)
y_true = tf.constant(inputs)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_precision(self):
s_obj = metrics.RecallAtPrecision(0.75)
pred_values = [
0.05, 0.1, 0.2, 0.3, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.9, 0.95
]
label_values = [0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2, 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6, 1/6].
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The precision 0.75 can be reached at thresholds 0.4<=t<0.45.
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_low_precision(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3)
pred_values = [
0.05, 0.1, 0.2, 0.3, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.9, 0.95
]
label_values = [0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2, 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6, 1/6].
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The precision 5/7 can be reached at thresholds 00.3<=t<0.35.
self.assertAlmostEqual(5. / 6, self.evaluate(result))
def test_unweighted_class_id(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3, class_id=2)
pred_values = [
0.05, 0.1, 0.2, 0.3, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.9, 0.95
]
label_values = [0, 2, 0, 0, 0, 2, 2, 0, 2, 2, 0, 2]
# precisions: [1/2, 6/11, 1/2, 5/9, 5/8, 5/7, 2/3, 3/5, 3/5, 2/3, 1/2, 1].
# recalls: [1, 1, 5/6, 5/6, 5/6, 5/6, 2/3, 1/2, 1/2, 1/3, 1/6, 1/6].
y_pred = tf.transpose([pred_values] * 3)
y_true = tf.one_hot(label_values, depth=3)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The precision 5/7 can be reached at thresholds 00.3<=t<0.35.
self.assertAlmostEqual(5. / 6, self.evaluate(result))
@parameterized.parameters([tf.bool, tf.int32, tf.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.RecallAtPrecision(0.75)
pred_values = [0.1, 0.2, 0.3, 0.5, 0.6, 0.9, 0.9]
label_values = [0, 1, 0, 0, 0, 1, 1]
weight_values = [1, 2, 1, 2, 1, 2, 1]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.cast(label_values, dtype=label_dtype)
weights = tf.constant(weight_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.6, self.evaluate(result))
def test_unachievable_precision(self):
s_obj = metrics.RecallAtPrecision(2.0 / 3)
pred_values = [0.1, 0.2, 0.3, 0.9]
label_values = [1, 1, 0, 0]
y_pred = tf.constant(pred_values, dtype=tf.float32)
y_true = tf.constant(label_values)
self.evaluate(tf.compat.v1.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
# The highest possible precision is 1/2 which is below the required
# value, expect 0 recall.
self.assertAlmostEqual(0, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(ValueError,
r'`precision` must be in the range \[0, 1\].'):
metrics.RecallAtPrecision(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, 'Argument `num_thresholds` must be an integer > 0'):
metrics.RecallAtPrecision(0.4, num_thresholds=-1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AUCTest(tf.test.TestCase, parameterized.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = tf.constant([0, 0.5, 0.3, 0.9], dtype=tf.float32)
epsilon = 1e-12
self.y_pred_logits = -tf.math.log(1.0 / (self.y_pred + epsilon) - 1.0)
self.y_true = tf.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=100,
curve='PR',
summation_method='majoring',
name='auc_1')
auc_obj.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj.name, 'auc_1')
self.assertLen(auc_obj.variables, 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertNotIn('thresholds', old_config)
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
auc_obj2.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertLen(auc_obj2.variables, 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertNotIn('thresholds', new_config)
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=None,
curve='PR',
summation_method='majoring',
name='auc_1',
thresholds=[0.3, 0.5])
auc_obj.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj.name, 'auc_1')
self.assertLen(auc_obj.variables, 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
auc_obj2.update_state(self.y_true, self.y_pred)
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertLen(auc_obj2.variables, 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_unweighted_from_logits(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred_logits)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='majoring')
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='minoring')
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='majoring')
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='minoring')
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR')
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, 'Argument `num_thresholds` must be an integer > 1'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegex(
ValueError, 'Argument `num_thresholds` must be an integer > 1.'):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegex(ValueError,
'Invalid AUC curve value: "Invalid".'):
metrics.AUC(curve='Invalid')
def test_invalid_summation_method(self):
with self.assertRaisesRegex(
ValueError, 'Invalid AUC summation method value: "Invalid".'):
metrics.AUC(summation_method='Invalid')
def test_extra_dims(self):
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.setup()
logits = special.expit(-np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]],
dtype=np.float32))
labels = np.array([[[1, 0, 0], [1, 0, 0]], [[0, 1, 1], [0, 1, 1]]],
dtype=np.int64)
auc_obj = metrics.AUC()
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(labels, logits)
self.assertEqual(self.evaluate(result), 0.5)
except ImportError as e:
tf_logging.warning('Cannot test special functions: %s' % str(e))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MultiAUCTest(tf.test.TestCase, parameterized.TestCase):
def setup(self):
self.num_thresholds = 5
self.y_pred = tf.constant(
np.array([[0, 0.5, 0.3, 0.9], [0.1, 0.2, 0.3, 0.4]]).T,
dtype=tf.float32)
epsilon = 1e-12
self.y_pred_logits = -tf.math.log(1.0 / (self.y_pred + epsilon) - 1.0)
self.y_true_good = tf.constant(
np.array([[0, 0, 1, 1], [0, 0, 1, 1]]).T)
self.y_true_bad = tf.constant(
np.array([[0, 0, 1, 1], [1, 1, 0, 0]]).T)
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.25, 0.5, 0.75, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [[1, 1, 1, 1], [1, 1, 1, 1]]
# y_pred when threshold = 0.25 : [[0, 1, 1, 1], [0, 0, 1, 1]]
# y_pred when threshold = 0.5 : [[0, 0, 0, 1], [0, 0, 0, 0]]
# y_pred when threshold = 0.75 : [[0, 0, 0, 1], [0, 0, 0, 0]]
# y_pred when threshold = 1 + 1e-7 : [[0, 0, 0, 0], [0, 0, 0, 0]]
# for y_true_good, over thresholds:
# tp = [[2, 2, 1, 1, 0], [2, 2, 0, 0, 0]]
# fp = [[2, 1, 0, 0 , 0], [2, 0, 0 ,0, 0]]
# fn = [[0, 0, 1, 1, 2], [0, 0, 2, 2, 2]]
# tn = [[0, 1, 2, 2, 2], [0, 2, 2, 2, 2]]
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
# for y_true_bad:
# tp = [[2, 2, 1, 1, 0], [2, 0, 0, 0, 0]]
# fp = [[2, 1, 0, 0 , 0], [2, 2, 0 ,0, 0]]
# fn = [[0, 0, 1, 1, 2], [0, 2, 2, 2, 2]]
# tn = [[0, 1, 2, 2, 2], [0, 0, 2, 2, 2]]
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 0, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 1, 0, 0, 0]]
# for y_true_good with sample_weights:
# tp = [[7, 7, 4, 4, 0], [7, 7, 0, 0, 0]]
# fp = [[3, 2, 0, 0, 0], [3, 0, 0, 0, 0]]
# fn = [[0, 0, 3, 3, 7], [0, 0, 7, 7, 7]]
# tn = [[0, 1, 3, 3, 3], [0, 3, 3, 3, 3]]
# tpr = [[1, 1, 0.57, 0.57, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.67, 0, 0, 0], [1, 0, 0, 0, 0]]
def test_value_is_idempotent(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=5, multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true_good, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_true_good)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted_all_correct_flat(self):
self.setup()
auc_obj = metrics.AUC(multi_label=False)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_true_good)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds,
multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 + 1.0) / 2.0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_unweighted_from_logits(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=True,
from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred_logits)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 + 1.0) / 2.0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_sample_weight_flat(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, multi_label=False)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred, sample_weight=[1, 2, 3, 4])
# tpr = [1, 1, 0.2857, 0.2857, 0]
# fpr = [1, 0.3333, 0, 0, 0]
expected_result = 1.0 - (0.3333 * (1.0 - 0.2857) / 2.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_full_sample_weight_flat(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, multi_label=False)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
sw = np.arange(4 * 2)
sw = sw.reshape(4, 2)
result = auc_obj(self.y_true_good, self.y_pred, sample_weight=sw)
# tpr = [1, 1, 0.2727, 0.2727, 0]
# fpr = [1, 0.3333, 0, 0, 0]
expected_result = 1.0 - (0.3333 * (1.0 - 0.2727) / 2.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_label_weights(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=True,
label_weights=[0.75, 0.25])
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [[1, 1, 0.5, 0.5, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.5, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = (0.875 * 0.75 + 1.0 * 0.25) / (0.75 + 0.25)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_label_weights_flat(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
multi_label=False,
label_weights=[0.75, 0.25])
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tpr = [1, 1, 0.375, 0.375, 0]
# fpr = [1, 0.375, 0, 0, 0]
expected_result = 1.0 - ((1.0 - 0.375) * 0.375 / 2.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-2)
def test_unweighted_flat(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, multi_label=False)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tp = [4, 4, 1, 1, 0]
# fp = [4, 1, 0, 0, 0]
# fn = [0, 0, 3, 3, 4]
# tn = [0, 3, 4, 4, 4]
# tpr = [1, 1, 0.25, 0.25, 0]
# fpr = [1, 0.25, 0, 0, 0]
expected_result = 1.0 - (3.0 / 32.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_unweighted_flat_from_logits(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, multi_label=False, from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred_logits)
# tp = [4, 4, 1, 1, 0]
# fp = [4, 1, 0, 0, 0]
# fn = [0, 0, 3, 3, 4]
# tn = [0, 3, 4, 4, 4]
# tpr = [1, 1, 0.25, 0.25, 0]
# fpr = [1, 0.25, 0, 0, 0]
expected_result = 1.0 - (3.0 / 32.0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
with self.test_session():
self.setup()
# Verify that when specified, thresholds are used instead of
# num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5],
multi_label=True)
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true_good, self.y_pred)
# tp = [[2, 1, 0], [2, 0, 0]]
# fp = [2, 0, 0], [2, 0, 0]]
# fn = [[0, 1, 2], [0, 2, 2]]
# tn = [[0, 2, 2], [0, 2, 2]]
# tpr = [[1, 0.5, 0], [1, 0, 0]]
# fpr = [[1, 0, 0], [1, 0, 0]]
# auc by slice = [0.75, 0.5]
expected_result = (0.75 + 0.5) / 2.0
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds,
multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
result = auc_obj(
self.y_true_good, self.y_pred, sample_weight=self.sample_weight)
# tpr = [[1, 1, 0.57, 0.57, 0], [1, 1, 0, 0, 0]]
# fpr = [[1, 0.67, 0, 0, 0], [1, 0, 0, 0, 0]]
expected_result = 1.0 - 0.5 * 0.43 * 0.67
self.assertAllClose(self.evaluate(result), expected_result, 1e-1)
def test_pr_interpolation_unweighted(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR',
multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
good_result = auc_obj(self.y_true_good, self.y_pred)
with self.subTest(name='good'):
# PR AUCs are 0.917 and 1.0 respectively
self.assertAllClose(self.evaluate(good_result), (0.91667 + 1.0) / 2.0,
1e-1)
bad_result = auc_obj(self.y_true_bad, self.y_pred)
with self.subTest(name='bad'):
# PR AUCs are 0.917 and 0.5 respectively
self.assertAllClose(self.evaluate(bad_result), (0.91667 + 0.5) / 2.0,
1e-1)
def test_pr_interpolation(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR',
multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
good_result = auc_obj(self.y_true_good, self.y_pred,
sample_weight=self.sample_weight)
# PR AUCs are 0.939 and 1.0 respectively
self.assertAllClose(self.evaluate(good_result), (0.939 + 1.0) / 2.0,
1e-1)
def test_keras_model_compiles(self):
inputs = layers.Input(shape=(10,))
output = layers.Dense(3, activation='sigmoid')(inputs)
model = models.Model(inputs=inputs, outputs=output)
model.compile(
loss='binary_crossentropy',
metrics=[metrics.AUC(multi_label=True)]
)
def test_reset_state(self):
with self.test_session():
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds,
multi_label=True)
self.evaluate(tf.compat.v1.variables_initializer(auc_obj.variables))
auc_obj(self.y_true_good, self.y_pred)
auc_obj.reset_state()
self.assertAllEqual(auc_obj.true_positives, np.zeros((5, 2)))
@combinations.generate(combinations.combine(mode=['eager']))
class ThresholdsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([
metrics.TruePositives(),
metrics.TrueNegatives(),
metrics.FalsePositives(),
metrics.FalseNegatives(),
metrics.Precision(),
metrics.Recall(),
metrics.SensitivityAtSpecificity(0.5),
metrics.SpecificityAtSensitivity(0.5),
metrics.PrecisionAtRecall(0.5),
metrics.RecallAtPrecision(0.5),
metrics.AUC()])
def test_with_default_thresholds(self, metric_obj):
# By default, the thresholds will be evenly distributed if there are more
# than 1. In case there is only 1 thresholds, then we expect
# _thresholds_distributed_evenly to be false.
expected = len(metric_obj.thresholds) > 1
self.assertEqual(metric_obj._thresholds_distributed_evenly, expected)
@parameterized.parameters([
metrics.TruePositives,
metrics.TrueNegatives,
metrics.FalsePositives,
metrics.FalseNegatives,
metrics.Precision,
metrics.Recall])
def test_with_manual_thresholds(self, metric_cls):
even_thresholds = [0.0, 0.25, 0.5, 0.75, 1.0]
metric_obj = metric_cls(thresholds=even_thresholds)
self.assertTrue(metric_obj._thresholds_distributed_evenly)
uneven_thresholds = [0.0, 0.45, 1.0]
metric_obj = metric_cls(thresholds=uneven_thresholds)
self.assertFalse(metric_obj._thresholds_distributed_evenly)
def test_manual_thresholds_auc(self):
# The AUC metric handles manual thresholds input differently (it will add
# 0.0 and 1.0 for user).
even_thresholds = [0.25, 0.5, 0.75]
auc = metrics.AUC(thresholds=even_thresholds)
self.assertTrue(auc._thresholds_distributed_evenly)
# Test for save model
cloned = metrics.AUC.from_config(auc.get_config())
self.assertTrue(cloned._thresholds_distributed_evenly)
uneven_thresholds = [0.45,]
auc = metrics.AUC(thresholds=uneven_thresholds)
self.assertFalse(auc._thresholds_distributed_evenly)
cloned = metrics.AUC.from_config(auc.get_config())
self.assertFalse(cloned._thresholds_distributed_evenly)
@parameterized.parameters([
metrics.TruePositives,
metrics.TrueNegatives,
metrics.FalsePositives,
metrics.FalseNegatives,
metrics.Precision,
metrics.Recall,
metrics.AUC])
def test_even_thresholds_correctness(self, metric_cls):
with tf.compat.forward_compatibility_horizon(2021, 6, 9):
# make sure the old approach and new approach produce same result
# for evenly distributed thresholds
y_true = np.random.randint(2, size=(10,))
y_pred = np.random.rand(10)
even_thresholds = [0.0, 0.25, 0.5, 0.75, 1.0]
if metric_cls == metrics.AUC:
even_thresholds = even_thresholds[1:-1]
metric_obj = metric_cls(thresholds=even_thresholds)
metric_obj.update_state(y_true, y_pred)
result1 = metric_obj.result()
metric_obj2 = metric_cls(thresholds=even_thresholds)
# Force to use the old approach
metric_obj2._thresholds_distributed_evenly = False
metric_obj2.update_state(y_true, y_pred)
result2 = metric_obj2.result()
self.assertAllClose(result1, result2)
# Check all the variables are the same, eg tp, tn, fp, fn
for v1, v2 in zip(metric_obj.variables, metric_obj2.variables):
self.assertAllClose(v1, v2)
@parameterized.parameters([
metrics.SensitivityAtSpecificity,
metrics.SpecificityAtSensitivity,
metrics.PrecisionAtRecall,
metrics.RecallAtPrecision])
def test_even_thresholds_correctness_2(self, metric_cls):
with tf.compat.forward_compatibility_horizon(2021, 6, 9):
y_true = np.random.randint(2, size=(10,))
y_pred = np.random.rand(10)
metric_obj = metric_cls(0.5)
metric_obj.update_state(y_true, y_pred)
result1 = metric_obj.result()
metric_obj2 = metric_cls(0.5)
# Force to use the old approach
metric_obj2._thresholds_distributed_evenly = False
metric_obj2.update_state(y_true, y_pred)
result2 = metric_obj2.result()
self.assertAllClose(result1, result2)
# Check all the variables are the same, eg tp, tn, fp, fn
for v1, v2 in zip(metric_obj.variables, metric_obj2.variables):
self.assertAllClose(v1, v2)
if __name__ == '__main__':
tf.test.main()
| 79,387 | 40.827187 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.