repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
RecSys_PyTorch | RecSys_PyTorch-master/models/BaseModel.py | import torch.nn as nn
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def forward(self, *input):
pass
def fit(self, *input):
pass
def predict(self, eval_users, eval_pos, test_batch_size):
pass | 278 | 18.928571 | 61 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/SLIMElastic.py | """
Xia Ning et al., SLIM: Sparse Linear Methods for Top-N Recommender Systems. ICDM 2011.
http://glaros.dtc.umn.edu/gkhome/fetch/papers/SLIM2011icdm.pdf
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
from tqdm import tqdm
from sklearn.linear_model import ElasticNet
from .BaseModel import BaseModel
class SLIM(BaseModel):
def __init__(self, dataset, hparams, device):
super(SLIM, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.l1_reg = hparams['l1_reg']
self.l2_reg = hparams['l2_reg']
self.topk = hparams['topk']
self.device = device
alpha = self.l1_reg + self.l2_reg
l1_ratio = self.l1_reg / alpha
self.slim = ElasticNet(alpha=alpha,
l1_ratio=l1_ratio,
positive=True,
fit_intercept=False,
copy_X=False,
precompute=True,
selection='random',
max_iter=300,
tol=1e-3)
def fit_slim(self, train_matrix, num_blocks=10000000):
num_items = train_matrix.shape[1]
# Use array as it reduces memory requirements compared to lists
rows = np.zeros(num_blocks, dtype=np.int32)
cols = np.zeros(num_blocks, dtype=np.int32)
values = np.zeros(num_blocks, dtype=np.float32)
numCells = 0
tqdm_iterator = tqdm(range(num_items), desc='# items covered', total=num_items)
for item in tqdm_iterator:
y = train_matrix[:, item].toarray()
# set the j-th column of X to zero
start_pos = train_matrix.indptr[item]
end_pos = train_matrix.indptr[item + 1]
current_item_data_backup = train_matrix.data[start_pos: end_pos].copy()
train_matrix.data[start_pos: end_pos] = 0.0
self.slim.fit(train_matrix, y)
# Select topK values
# Sorting is done in three steps. Faster then plain np.argsort for higher number of items
# - Partition the data to extract the set of relevant items
# - Sort only the relevant items
# - Get the original item index
# nonzero_model_coef_index = self.model.coef_.nonzero()[0]
# nonzero_model_coef_value = self.model.coef_[nonzero_model_coef_index]
nonzero_model_coef_index = self.slim.sparse_coef_.indices
nonzero_model_coef_value = self.slim.sparse_coef_.data
local_topK = min(len(nonzero_model_coef_value)-1, self.topk)
relevant_items_partition = (-nonzero_model_coef_value).argpartition(local_topK)[0:local_topK]
relevant_items_partition_sorting = np.argsort(-nonzero_model_coef_value[relevant_items_partition])
ranking = relevant_items_partition[relevant_items_partition_sorting]
for index in range(len(ranking)):
if numCells == len(rows):
rows = np.concatenate((rows, np.zeros(num_blocks, dtype=np.int32)))
cols = np.concatenate((cols, np.zeros(num_blocks, dtype=np.int32)))
values = np.concatenate((values, np.zeros(num_blocks, dtype=np.float32)))
rows[numCells] = nonzero_model_coef_index[ranking[index]]
cols[numCells] = item
values[numCells] = nonzero_model_coef_value[ranking[index]]
numCells += 1
train_matrix.data[start_pos:end_pos] = current_item_data_backup
self.W_sparse = sp.csr_matrix((values[:numCells], (rows[:numCells], cols[:numCells])), shape=(num_items, num_items), dtype=np.float32)
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
train_matrix = dataset.train_data.tocsc()
self.fit_slim(train_matrix)
output = train_matrix.tocsr() @ self.W_sparse
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output.toarray()))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.W_sparse)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds | 5,363 | 38.441176 | 142 | py |
RecSys_PyTorch | RecSys_PyTorch-master/models/EASE.py | """
Harald Steck, Embarrassingly Shallow Autoencoders for Sparse Data. WWW 2019.
https://arxiv.org/pdf/1905.03375
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .BaseModel import BaseModel
class EASE(BaseModel):
def __init__(self, dataset, hparams, device):
super(EASE, self).__init__()
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.reg = hparams['reg']
self.device = device
self.to(self.device)
def forward(self, rating_matrix):
G = (rating_matrix.T @ rating_matrix).toarray()
diag = np.diag_indices(G.shape[0])
G[diag] += self.reg
P = np.linalg.inv(G)
self.enc_w = P / (-np.diag(P))
self.enc_w[diag] = 0
# Calculate the output matrix for prediction
output = rating_matrix @ self.enc_w
return output
def fit(self, dataset, exp_config, evaluator=None, early_stop=None, loggers=None):
self.train()
# Solve EASE
train_matrix = dataset.train_data
output = self.forward(train_matrix)
loss = F.binary_cross_entropy(torch.tensor(train_matrix.toarray()), torch.tensor(output))
if evaluator is not None:
scores = evaluator.evaluate(self)
else:
scores = None
if loggers is not None:
if evaluator is not None:
for logger in loggers:
logger.log_metrics(scores, epoch=1)
return {'scores': scores, 'loss': loss}
def predict(self, eval_users, eval_pos, test_batch_size):
input_matrix = eval_pos.toarray()
preds = np.zeros_like(input_matrix)
num_data = input_matrix.shape[0]
num_batches = int(np.ceil(num_data / test_batch_size))
perm = list(range(num_data))
for b in range(num_batches):
if (b + 1) * test_batch_size >= num_data:
batch_idx = perm[b * test_batch_size:]
else:
batch_idx = perm[b * test_batch_size: (b + 1) * test_batch_size]
test_batch_matrix = input_matrix[batch_idx]
batch_pred_matrix = (test_batch_matrix @ self.enc_w)
preds[batch_idx] = batch_pred_matrix
preds[eval_pos.nonzero()] = float('-inf')
return preds | 2,379 | 30.733333 | 97 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/base.py | import abc
from typing import MutableMapping
from argparse import Namespace
import torch
import numpy as np
class Logger(abc.ABC):
def __init__(self):
super().__init__()
def setup_logger(self):
pass
# @abc.abstractmethod
# def log_hparams(self, hparams):
# raise NotImplementedError('setup_logger is not implemented.')
# @abc.abstractmethod
# def log_metrics(self, metrics, epoch=None):
# raise NotImplementedError('setup_logger is not implemented.')
def log_image(self, image_name, image, epoch=None):
pass
def log_artifact(self, artifact, destination=None):
pass
def save(self):
pass
def add_dict_prefix(self, dictionary, prefix=None):
if prefix:
return {prefix + k: v for k, v in dictionary.items()}
else:
return dictionary
# def _flatten_dict(params: Dict[str, Any], delimiter: str = '/') -> Dict[str, Any]:
def _flatten_dict(self, params, delimiter= '/'):
"""
Flatten hierarchical dict, e.g. ``{'a': {'b': 'c'}} -> {'a/b': 'c'}``.
Args:
params: Dictionary containing the hyperparameters
delimiter: Delimiter to express the hierarchy. Defaults to ``'/'``.
Returns:
Flattened dict.
Examples:
>>> LightningLoggerBase._flatten_dict({'a': {'b': 'c'}})
{'a/b': 'c'}
>>> LightningLoggerBase._flatten_dict({'a': {'b': 123}})
{'a/b': 123}
"""
def _dict_generator(input_dict, prefixes=None):
prefixes = prefixes[:] if prefixes else []
if isinstance(input_dict, MutableMapping):
for key, value in input_dict.items():
if isinstance(value, (MutableMapping, Namespace)):
value = vars(value) if isinstance(value, Namespace) else value
for d in _dict_generator(value, prefixes + [key]):
yield d
else:
yield prefixes + [key, value if value is not None else str(None)]
else:
yield prefixes + [input_dict if input_dict is None else str(input_dict)]
return {delimiter.join(keys): val for *keys, val in _dict_generator(params)}
# def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]:
def _sanitize_params(self, params):
"""
Returns params with non-primitvies converted to strings for logging.
>>> params = {"float": 0.3,
... "int": 1,
... "string": "abc",
... "bool": True,
... "list": [1, 2, 3],
... "namespace": Namespace(foo=3),
... "layer": torch.nn.BatchNorm1d}
>>> import pprint
>>> pprint.pprint(LightningLoggerBase._sanitize_params(params)) # doctest: +NORMALIZE_WHITESPACE
{'bool': True,
'float': 0.3,
'int': 1,
'layer': "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>",
'list': '[1, 2, 3]',
'namespace': 'Namespace(foo=3)',
'string': 'abc'}
"""
return {k: v if type(v) in [bool, int, float, str, torch.Tensor] else str(v) for k, v in params.items()}
if __name__ == '__main__':
test_dict = {
'a': 1,
'b': [1,2,3],
'c': '[1, 2, 3]',
'd': {
'd1': 123,
'd2': 1
}
}
logger = Logger()
flattened = logger._flatten_dict(test_dict)
sanitized = logger._sanitize_params(flattened)
print(sanitized) | 3,641 | 33.685714 | 112 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/file_logger.py | import os
import logging
from time import strftime, sleep
from loggers.base import Logger
class FileLogger(Logger):
def __init__(self, log_dir):
log_file_format = "[%(lineno)d]%(asctime)s: %(message)s"
log_console_format = "%(message)s"
# Main logger
self.log_dir = log_dir
self.logger = logging.getLogger(log_dir)
self.logger.setLevel(logging.INFO)
self.logger.propagate = False
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(log_console_format))
self.logger.addHandler(console_handler)
file_handler = logging.FileHandler(os.path.join(log_dir, 'experiments.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(log_file_format))
self.logger.addHandler(file_handler)
def log_metrics(self, metrics, epoch=None, prefix=None):
log_str = ''
if epoch is not None:
log_str += '[epoch %3d]' % epoch
metric_str_list = ['%s=%.4f' % (k, v) for k, v in metrics.items()]
log_str += ', '.join(metric_str_list)
self.info(log_str)
def save(self):
pass
def info(self, msg):
self.logger.info(msg)
def close(self):
for handle in self.logger.handlers:
handle.close()
self.logger.removeHandler(handle) | 1,467 | 30.234043 | 84 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/tensorboard.py | import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams as hparams_tb
from logger.base import Logger
class TensorboardLogger(Logger):
def __init__(self,
log_dir:str,
experiment_name:str,
hparams:dict,
log_graph:bool=False):
self.log_dir = log_dir
self.experiment_name = experiment_name
self.log_graph = log_graph
self.hparams = hparams
self.initialize()
@property
def system_property(self):
return self.experiment.get_system_properties()
def initialize(self):
self.experiment = SummaryWriter(log_dir=self.log_dir)
def add_dict_prefix(self, metrics, metric_prefix):
return {f'{metric_prefix}/{k}':v for k, v in metrics.items()}
def log_hparams(self, hparams, metrics=None):
self.hparams.update(hparams)
flattened = self._flatten_dict(self.hparams)
for k, v in flattened.items():
if isinstance(k, (int, float, str, bool, torch.tensor)):
flattened[k] = str(v)
if metrics:
self.experiment.add_hparams(flattened, dict(metrics))
def _log_metric(self, metric_name, value, epoch=None):
# metric_name = self.add_metric_prefix(metric_name)
if epoch is None:
self.experiment.add_scalar(metric_name, value)
else:
self.experiment.add_scalar(metric_name, value, epoch)
def log_metrics(self, metrics, epoch=None, prefix=None):
if prefix is not None:
metrics = self.add_dict_prefix(metrics, prefix)
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
try:
self._log_metric(k, v, epoch)
except Exception as e:
m = f'\n you tried to log {v} which is not currently supported. Try a dict or a scalar/tensor.'
type(e)(e.message + m)
def log_image(self, image_name, image, epoch=None):
if epoch is None:
self.experiment.add_image(image_name, image)
else:
self.experiment.add_image(image_name, image, epoch)
def log_artifact(self, artifact, destination=None):
pass | 2,333 | 34.907692 | 111 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/neptune.py | import neptune
class NeptuneLogger:
def __init__(self,
api_key:str,
project_name:str,
experiment_name:str,
description:str,
tags:str,
hparams:dict,
upload_source_files:list=None,
hostname:str='my-server',
offline:bool=False):
self.api_key = api_key
self.project_name = project_name
self.experiment_name = experiment_name
self.description = description
self.tags = tags
self.hparams = hparams
self.upload_source_files=upload_source_files
self.hostname = hostname
self.offline = offline
self.initialize()
@property
def system_property(self):
return self.experiment.get_system_properties()
def initialize(self):
# Get experiment
if self.offline:
project = neptune.Session(backend=neptune.OfflineBackend()).get_project('dry-run/project')
else:
session = neptune.Session.with_default_backend(api_token=self.api_key)
project = session.get_project(self.project_name)
exp = project.create_experiment(
name=self.experiment_name,
description=self.description,
params=self.hparams,
tags=self.tags,
upload_source_files=self.upload_source_files,
hostname=self.hostname)
self.experiment = exp
def log_hparams(self, hparams):
for key, val in hparams.items():
self.experiment.set_property(key, val)
def _log_metric(self, metric_name, value, epoch=None):
if epoch is None:
self.experiment.log_metric(metric_name, value)
else:
self.experiment.log_metric(metric_name, epoch, value)
def log_metrics(self, metrics, epoch=None):
for k, v in metrics.items():
self._log_metric(k, v, epoch)
def log_image(self, image_name, image, epoch=None):
if epoch is None:
self.experiment.log_image(image_name, image)
else:
self.experiment.log_image(image_name, epoch, image)
def log_artifact(self, artifact, destination=None):
self.experiment.log_artifact(artifact, destination) | 2,314 | 32.550725 | 102 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/console_logger.py | from loggers.base import Logger
class ConsoleLogger(Logger):
def __init__(self, log_dir):
self.log_dir = log_dir
def log_metrics(self, metrics, epoch=None, prefix=None):
log_str = ''
if epoch is not None:
log_str += '[epoch %3d]' % epoch
metric_str_list = ['%s=%.4f' % (k, v) for k, v in metrics.items()]
log_str += ', '.join(metric_str_list)
print(log_str)
def save(self):
pass
def info(self, msg):
print(msg)
def close(self):
for handle in self.logger.handlers:
handle.close()
self.logger.removeHandler(handle) | 667 | 24.692308 | 74 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/csv_logger.py | import io
import os
import csv
from time import strftime
from collections import OrderedDict
from loggers.base import Logger
class CSVLogger(Logger):
LOG_FILE = 'results.csv'
def __init__(self, log_dir):
self.log_dir = log_dir
self.hparams = {}
self.metrics_history = None
self.headers = None
def log_metrics(self, metrics, epoch=None, prefix=None):
if prefix:
metrics = self.add_dict_prefix(metrics)
if self.metrics_history is None:
self.metrics_history = []
self.headers = list(metrics.keys())
if 'epoch' not in self.headers:
self.headers = ['epoch'] + self.headers
for key in self.headers:
if key not in metrics:
metrics[key] = ' - '
for key in metrics:
if key not in self.headers:
self.headers.append(key)
if epoch is None:
epoch = len(self.metrics_history)
metrics['epoch'] = epoch
self.metrics_history.append(self.ensure_ordered_dict(metrics))
def check_columns(self, d):
pass
def ensure_ordered_dict(self, d):
if isinstance(d, OrderedDict):
return d
else:
return OrderedDict(d)
def save(self):
if not self.metrics_history:
return
if 'train_loss' in self.headers:
idx = self.headers.index('train_loss')
self.headers = [self.headers.pop(idx)] + self.headers
if 'elapsed' in self.headers:
idx = self.headers.index('elapsed')
self.headers = [self.headers.pop(idx)] + self.headers
if 'epoch' in self.headers:
idx = self.headers.index('epoch')
self.headers = [self.headers.pop(idx)] + self.headers
log_file = os.path.join(self.log_dir, self.LOG_FILE)
with io.open(log_file, 'w', newline='') as f:
self.writer = csv.DictWriter(f, fieldnames=self.headers)
self.writer.writeheader()
self.writer.writerows(self.metrics_history) | 2,156 | 29.380282 | 70 | py |
RecSys_PyTorch | RecSys_PyTorch-master/loggers/__init__.py | from .csv_logger import CSVLogger
from .file_logger import FileLogger
from .neptune import NeptuneLogger
# from .tensorboard import TensorboardLogger
from .console_logger import ConsoleLogger | 191 | 37.4 | 44 | py |
RecSys_PyTorch | RecSys_PyTorch-master/utils/stats.py | import numpy as np
class Statistics:
def __init__(self, name='AVG'):
self.name = name
self.history = []
self.sum = 0
self.cnt = 0
def update(self, val):
if isinstance(val, list):
self.history += val
self.sum += sum(val)
self.cnt += len(val)
elif isinstance(val, (int, float, np.int, np.float)):
self.history.append(val)
self.sum += val
self.cnt += 1
else:
raise TypeError("\'val\' should be float, int or list of them.")
@property
def mean_std(self):
# mean = self.sum / self.cnt
mean = np.mean(self.history, dtype=np.float32)
std = np.std(self.history, dtype=np.float32)
return mean, std
@property
def mean(self):
# return self.sum / self.cnt
return np.mean(self.history, dtype=np.float32)
@property
def std(self):
return np.std(self.history, dtype=np.float32)
def __repr__(self):
return '%s: mean=%.4f, std=%.4f' % (self.name, self.mean, self.std) | 1,095 | 27.102564 | 76 | py |
RecSys_PyTorch | RecSys_PyTorch-master/utils/types.py | import pandas as pd
import scipy.sparse as sp
from typing import Tuple
def df_to_sparse(df: pd.DataFrame, shape: Tuple[int, int]) -> sp.csr_matrix:
users = df.user
items = df.item
ratings = df.rating
sp_matrix = sp.csr_matrix((ratings, (users, items)), shape=shape)
return sp_matrix
def sparse_to_dict(sparse: sp.csr_matrix) -> dict:
if isinstance(sparse, dict):
return sparse
ret_dict = {}
dim1 = sparse.shape[0]
for i in range(dim1):
ret_dict[i] = sparse.indices[sparse.indptr[i]: sparse.indptr[i+1]]
return ret_dict | 582 | 26.761905 | 76 | py |
RecSys_PyTorch | RecSys_PyTorch-master/utils/config.py | import os
import sys
import warnings
from collections import OrderedDict
from configparser import ConfigParser
class Config:
def __init__(self, main_conf_path):
self.main_config = self.read_main_config(main_conf_path)
exp_config = self.main_config['Experiment']
self.model_config = self.read_model_config(exp_config['model_name'])
self.read_system_arguments()
def read_main_config(self, main_conf_path):
main_config = OrderedDict()
config = ConfigParser()
config.read(main_conf_path)
for section in config.sections():
section_config = OrderedDict(config[section].items())
main_config[section] = self.type_ensurance(section_config)
return main_config
def save_main_config(self, path):
config_to_write = ConfigParser()
config_to_write.read_dict(self.main_config)
with open(path, 'w') as f:
config_to_write.write(f)
del config_to_write
def read_model_config(self, model_name):
model_config = OrderedDict()
model_config_path = os.path.join('conf', model_name + '.cfg')
config = ConfigParser()
config.read(model_config_path)
for section in config.sections():
section_config = OrderedDict(config[section].items())
model_config[section] = self.type_ensurance(section_config)
return model_config
def save_model_config(self, path):
config_to_write = ConfigParser()
config_to_write.read_dict(self.model_config)
with open(path, 'w') as f:
config_to_write.write(f)
del config_to_write
def read_system_arguments(self):
# apply system arguments if exist
argv = sys.argv[1:]
if len(argv) > 0:
cmd_arg = dict()
argvs = ' '.join(sys.argv[1:]).split(' ')
for i in range(0, len(argvs), 2):
arg_name, arg_value = argvs[i], argvs[i+1]
arg_name = arg_name.strip('-')
cmd_arg[arg_name] = arg_value
self.update_params(self.type_ensurance(cmd_arg))
def type_ensurance(self, config):
BOOLEAN = {'true': True, 'false': False,
'True': True, 'False': False}
for k, v in config.items():
if isinstance(v, (int, float, list)):
value = v
else:
try:
value = eval(v)
if not isinstance(value, (str, int, float, list, tuple)):
value = v
except:
if v.lower() in BOOLEAN:
v = BOOLEAN[v.lower()]
value = v
config[k] = value
return config
def get_param(self, section, param):
if section in self.main_config:
section = self.main_config[section]
elif section in self.model_config:
section = self.model_config[section]
else:
raise NameError("There are not the parameter named '%s'" % section)
if param in section:
value = section[param]
else:
raise NameError("There are not the parameter named '%s'" % param)
return value
def update_params(self, params):
# for now, assume 'params' is dictionary
params = self.type_ensurance(params)
for k, v in params.items():
updated=False
for section in self.main_config:
if k in self.main_config[section]:
self.main_config[section][k] = v
updated = True
if k == 'model_name':
self.model_config = self.read_model_config(v)
break
if not updated:
for section in self.model_config:
if k in self.model_config[section]:
self.model_config[section][k] = v
updated = True
break
if not updated:
warnings.warn(f'{k}={v} is not recongnized on main/model config. \n Have no effect on Config.')
def __getitem__(self, item):
if not isinstance(item, str):
raise TypeError("index must be a str")
if item in self.main_config:
section = self.main_config[item]
elif item in self.model_config:
section = self.model_config[item]
else:
raise NameError("There are not the parameter named '%s'" % item)
return section
def __str__(self):
config_str = '\n'
config_str += '>>>>> Main Config\n'
for section in self.main_config:
config_str += '[%s]\n' % section
config_str += '\n'.join(['{}: {}'.format(k, self.main_config[section][k]) for k in self.main_config[section]])
config_str += '\n\n'
config_str += '>>>>> model Config\n'
for section in self.model_config:
config_str += '[%s]\n' % section
config_str += '\n'.join(['{}: {}'.format(k, self.model_config[section][k]) for k in self.model_config[section]])
config_str += '\n\n'
return config_str
if __name__ == '__main__':
param = Config('../main_config.cfg')
print(param)
| 5,362 | 32.51875 | 124 | py |
RecSys_PyTorch | RecSys_PyTorch-master/utils/general.py | import os
import math
import time
import datetime
import random
import numpy as np
import torch
def make_log_dir(save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
existing_dirs = os.listdir(save_dir)
if len(existing_dirs) == 0:
idx = 0
else:
idx_list = sorted([int(d.split('_')[0]) for d in existing_dirs])
idx = idx_list[-1] + 1
cur_log_dir = '%d_%s' % (idx, time.strftime('%Y%m%d-%H%M'))
full_log_dir = os.path.join(save_dir, cur_log_dir)
if not os.path.exists(full_log_dir):
os.mkdir(full_log_dir)
else:
full_log_dir = make_log_dir(save_dir)
return full_log_dir
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def getlocaltime():
date = time.strftime('%y-%m-%d', time.localtime())
current_time = time.strftime('%H:%M:%S', time.localtime())
def seconds_to_hms(second):
return str(datetime.timedelta(seconds=second)) | 1,163 | 24.866667 | 72 | py |
RecSys_PyTorch | RecSys_PyTorch-master/utils/result_table.py | import numpy as np
from collections import OrderedDict
class ResultTable:
"""
Class to save and show result neatly.
First column is always 'NAME' column.
"""
def __init__(self, table_name='table', header=None, splitter='||', int_formatter='%3d', float_formatter='%.4f'):
"""
Initialize table setting.
:param list header: list of string, table headers.
:param str splitter:
:param str int_formatter:
:param str float_formatter:
"""
self.table_name = table_name
self.header = header
if self.header is not None:
self.set_headers(self.header)
self.num_rows = 0
self.splitter = splitter
self.int_formatter = int_formatter
self.float_formatter = float_formatter
def set_headers(self, header):
"""
Set table headers as given and clear all data.
:param list header: list of header strings
:return: None
"""
self.header = ['NAME'] + header
self.data = OrderedDict([(h, []) for h in self.header])
self.max_len = OrderedDict([(h, len(h)) for h in self.header])
# {h: len(h) for h in self.header}
def add_row(self, row_name, row_dict):
"""
Add new row into the table.
:param str row_name: name of the row, which will be the first column
:param dict row_dict: dictionary containing column name as a key and column value as value.
:return: None
"""
# If header is not defined, fetch from input dict
if self.header is None:
self.set_headers(list(row_dict.keys()))
# If input dict has new column, make one
for key in row_dict:
if key not in self.data:
self.data[key] = ['-'] * self.num_rows
self.header.append(key)
for h in self.header:
if h == 'NAME':
self.data['NAME'].append(row_name)
self.max_len[h] = max(self.max_len['NAME'], len(row_name))
else:
# If input dict doesn't have values for table header, make empty value.
if h not in row_dict:
row_dict[h] = '-'
# convert input dict to string
d = row_dict[h]
if isinstance(d, (int, np.integer)):
d_str = self.int_formatter % d
elif isinstance(d, (float, np.float)):
d_str = self.float_formatter % d
elif isinstance(d, str):
d_str = d
else:
print('Table add row WARNING: Type %s converted to string' % type(d))
d_str = str(d)
# raise NotImplementedError('Type %s not implemented.' % type(d))
self.data[h].append(d_str)
self.max_len[h] = max(self.max_len[h], len(d_str))
self.num_rows += 1
def row_to_line(self, row_values):
"""
Convert a row into string form
:param list row_values: list of row values as string
:return: string form of a row
"""
value_str = []
for i, header in enumerate(self.header):
max_length = self.max_len[header]
length = len(row_values[i])
diff = max_length - length
# Center align
# left_space = diff // 2
# right_space = diff - left_space
# s = ' ' * left_space + row_values[i] + ' ' * right_space
# Left align
s = row_values[i] + ' ' * diff
value_str.append(s)
# for i, max_length in enumerate(self.max_len.values()):
# length = len(row_values[i])
# diff = max_length - length
#
# # Center align
# # left_space = diff // 2
# # right_space = diff - left_space
# # s = ' ' * left_space + row_values[i] + ' ' * right_space
#
# # Left align
# s = row_values[i] + ' ' * diff
# value_str.append(s)
return self.splitter + ' ' + (' %s ' % self.splitter).join(value_str) + ' ' + self.splitter
def to_string(self):
"""
Convert a table into string form
:return: string form of the table
"""
size_per_col = {h: self.max_len[h] + 2 + len(self.splitter) for h in self.header}
line_len = sum([size_per_col[c] for c in size_per_col]) + len(self.splitter)
table_str = '\n'
# TABLE NAME
table_str += self.table_name + '\n'
# HEADER
line = self.row_to_line(self.header)
table_str += '=' * line_len + '\n'
table_str += line + '\n'
table_str += self.splitter + '-' * (line_len - len(self.splitter) * 2) + self.splitter + '\n'
# DATA
for row_values in zip(*self.data.values()):
line = self.row_to_line(row_values)
table_str += line + '\n'
table_str += '=' * line_len + '\n'
return table_str
def show(self):
print(self.to_string())
@property
def shape(self):
return (self.num_rows, self.num_cols)
@property
def num_cols(self):
return len(self.header) | 5,286 | 32.251572 | 116 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/generators.py | import torch
import numpy as np
class MatrixGenerator:
def __init__(self, input_matrix, return_index=False, batch_size=32, shuffle=True,
matrix_as_numpy=False, index_as_numpy=False, device=None):
super().__init__()
self.input_matrix = input_matrix
self.return_index = return_index
self._num_data = self.input_matrix.shape[0]
self.batch_size = batch_size
self.shuffle = shuffle
self.matrix_as_numpy = matrix_as_numpy
self.index_as_numpy = index_as_numpy
self.device = device
def __len__(self):
return int(np.ceil(self._num_data / self.batch_size))
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self._num_data)
else:
perm = np.arange(self._num_data, dtype=np.int32)
for b, st in enumerate(range(0, self._num_data, self.batch_size)):
ed = min(st + self.batch_size, self._num_data)
batch_idx = perm[st:ed]
if self.matrix_as_numpy:
batch_input = self.input_matrix[batch_idx].toarray()
else:
batch_input = torch.tensor(self.input_matrix[batch_idx].toarray(),
dtype=torch.float32, device=self.device)
if self.return_index:
if not self.index_as_numpy:
batch_idx = torch.tensor(batch_idx, dtype=torch.int64, device=self.device)
yield batch_input, batch_idx
else:
yield batch_input
class PointwiseGenerator:
def __init__(self, input_matrix, return_rating=True, as_numpy=False, negative_sample=True, num_negatives=1, batch_size=32, shuffle=True, device=None):
super().__init__()
self.input_matrix = input_matrix
self.return_rating = return_rating
self.negative_sample = negative_sample
self.num_negatives = num_negatives
self.as_numpy = as_numpy
self.batch_size = batch_size
self.shuffle = shuffle
self.device = device
self._construct()
def _construct(self):
num_users, num_items = self.input_matrix.shape
self.users = []
self.items = []
self.ratings = []
for u in range(num_users):
u_items = self.input_matrix[u].indices
u_ratings = self.input_matrix[u].data
self.users += [u] * len(u_items)
self.items += u_items.tolist()
if self.return_rating:
self.ratings += u_ratings.tolist()
self.users = np.array(self.users)
self.items = np.array(self.items)
self.ratings = np.array(self.ratings)
self._num_data = len(self.users)
def sample_negatives(self, users):
num_users, num_items = self.input_matrix.shape
users = []
negatives = []
for u in range(num_users):
u_pos_items = self.input_matrix[u].indices
prob = np.ones(num_items)
prob[u_pos_items] = 0.0
prob = prob / sum(prob)
neg_samples = np.random.choice(num_items, size=self.num_negatives, replace=False, p=prob)
users += [u] * len(neg_samples)
negatives += neg_samples.tolist()
users = np.array(users)
negatives = np.array(negatives)
ratings = np.zeros_like(users)
return users, negatives, ratings
def __len__(self):
return int(np.ceil(self._num_data / self.batch_size))
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self._num_data)
else:
perm = np.arange(self._num_data)
for b, st in enumerate(range(0, self._num_data, self.batch_size)):
ed = min(st + self.batch_size, self._num_data)
batch_idx = perm[st:ed]
batch_users = self.users[batch_idx]
batch_items = self.items[batch_idx]
if self.return_rating:
batch_ratings = self.ratings[batch_idx]
if self.negative_sample and self.num_negatives > 0:
neg_users, neg_items, neg_ratings = self.sample_negatives(batch_users)
batch_users = np.concatenate((batch_users, neg_users))
batch_items = np.concatenate((batch_items, neg_items))
batch_ratings = np.concatenate((batch_ratings, neg_ratings))
if not self.as_numpy:
batch_users = torch.tensor(batch_users, dtype=torch.long, device=self.device)
batch_items = torch.tensor(batch_items, dtype=torch.long, device=self.device)
batch_ratings = torch.tensor(batch_ratings, dtype=torch.float32, device=self.device)
yield batch_users, batch_items, batch_ratings
else:
if not self.as_numpy:
batch_users = torch.tensor(batch_users, dtype=torch.long, device=self.device)
batch_items = torch.tensor(batch_items, dtype=torch.long, device=self.device)
yield batch_users, batch_items
class PairwiseGenerator:
def __init__(self, input_matrix, as_numpy=False, num_positives_per_user=-1, num_negatives=1, batch_size=32, shuffle=True, device=None):
self.input_matrix = input_matrix
self.num_positives_per_user = num_positives_per_user
self.num_negatives = num_negatives
self.as_numpy = as_numpy
self.batch_size = batch_size
self.shuffle = shuffle
self.device = device
self._construct()
def _construct(self):
num_users, num_items = self.input_matrix.shape
# self.users = []
# self.items = []
# for u in range(num_users):
# u_items = self.input_matrix[u].indices
# self.users += [u] * len(u_items)
# self.items += u_items.tolist()
# self.users = np.array(self.users)
# self.items = np.array(self.items)
self._data = self.sample_negatives()
self._num_data = len(self._data[0])
def sample_negatives(self):
num_users, num_items = self.input_matrix.shape
users = []
positives = []
negatives = []
for u in range(num_users):
u_pos_items = self.input_matrix[u].indices
num_pos_user = len(u_pos_items)
prob = np.ones(num_items)
prob[u_pos_items] = 0.0
prob = prob / sum(prob)
if self.num_positives_per_user > 0 and self.num_positives_per_user < num_pos_user:
# subsample
pos_sampled = np.random.choice(num_items, size=self.num_positives_per_user, replace=False)
neg_sampled = np.random.choice(num_items, size=self.num_positives_per_user, replace=False, p=prob)
else:
# sample all
pos_sampled = u_pos_items
neg_sampled = np.random.choice(num_items, size=num_pos_user, replace=False, p=prob)
assert len(pos_sampled) == len(neg_sampled)
users += [u] * len(neg_sampled)
positives += pos_sampled.tolist()
negatives += neg_sampled.tolist()
users = np.array(users)
positives = np.array(positives)
negatives = np.array(negatives)
return users, positives, negatives
def __len__(self):
return int(np.ceil(self._num_data / self.batch_size))
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self._num_data)
else:
perm = np.arange(self._num_data)
for b, st in enumerate(range(0, self._num_data, self.batch_size)):
ed = min(st + self.batch_size, self._num_data)
batch_idx = perm[st:ed]
batch_users = self._data[0][batch_idx]
batch_pos = self._data[1][batch_idx]
batch_neg = self._data[2][batch_idx]
if not self.as_numpy:
batch_users = torch.tensor(batch_users, dtype=torch.long, device=self.device)
batch_pos = torch.tensor(batch_pos, dtype=torch.long, device=self.device)
batch_neg = torch.tensor(batch_neg, dtype=torch.long, device=self.device)
yield batch_users, batch_pos, batch_neg | 8,480 | 36.861607 | 154 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/data_batcher.py | import torch
import numpy as np
class BatchSampler:
def __init__(self, data_size, batch_size, drop_remain=False, shuffle=False):
self.data_size = data_size
self.batch_size = batch_size
self.drop_remain = drop_remain
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self.data_size)
else:
perm = range(self.data_size)
batch_idx = []
for idx in perm:
batch_idx.append(idx)
if len(batch_idx) == self.batch_size:
yield batch_idx
batch_idx = []
if len(batch_idx) > 0 and not self.drop_remain:
yield batch_idx
def __len__(self):
if self.drop_remain:
return self.data_size // self.batch_size
else:
return int(np.ceil(self.data_size / self.batch_size))
class DataBatcher:
def __init__(self, *data_source, batch_size, drop_remain=False, shuffle=False):
self.data_source = list(data_source)
self.batch_size = batch_size
self.drop_remain = drop_remain
self.shuffle = shuffle
for i, d in enumerate(self.data_source):
if isinstance(d, list):
self.data_source[i] = np.array(d)
self.data_size = len(self.data_source[0])
if len(self.data_source)> 1:
flag = np.all([len(src) == self.data_size for src in self.data_source])
if not flag:
raise ValueError("All elements in data_source should have same lengths")
self.sampler = BatchSampler(self.data_size, self.batch_size, self.drop_remain, self.shuffle)
self.iterator = iter(self.sampler)
self.n=0
def __next__(self):
batch_idx = next(self.iterator)
batch_data = tuple([data[batch_idx] for data in self.data_source])
if len(batch_data) == 1:
batch_data = batch_data[0]
return batch_data
def __iter__(self):
return self
def __len__(self):
return len(self.sampler) | 2,085 | 30.606061 | 100 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/data_loader.py | import math
import pickle
import numpy as np
import scipy.sparse as sp
def load_data_and_info(data_file, info_file, cv_flag, split_type):
with open(data_file, 'rb') as f:
data_dict = pickle.load(f)
with open(info_file, 'rb') as f:
info_dict = pickle.load(f)
user_id_dict = info_dict['user_id_dict']
user_to_num_items = info_dict['user_to_num_items']
item_id_dict = info_dict['item_id_dict']
item_to_num_users = info_dict['item_to_num_users']
num_users = data_dict['num_users']
num_items = data_dict['num_items']
if cv_flag:
k_fold_files = data_dict['k_fold_files']
return k_fold_files, user_id_dict, user_to_num_items, item_id_dict, item_to_num_users
elif split_type == 'hold-user-out':
train_dict = data_dict['train']
valid_input = data_dict['valid_input']
valid_target = data_dict['valid_target']
test_input = data_dict['test_input']
test_target = data_dict['test_target']
return train_dict, valid_input, valid_target, test_input, test_target, user_id_dict, user_to_num_items, item_id_dict, item_to_num_users
else:
train_sp_matrix = data_dict['train']
valid_sp_matrix = data_dict['valid']
test_sp_matrix = data_dict['test']
return train_sp_matrix, valid_sp_matrix, test_sp_matrix, user_id_dict, user_to_num_items, item_id_dict, item_to_num_users
# def load_data_and_info(data_file, info_file, implicit=True):
# with open(data_file, 'rb') as f:
# data_dict = pickle.load(f)
# with open(info_file, 'rb') as f:
# info_dict = pickle.load(f)
# train, valid, test = data_dict['train'], data_dict['valid'], data_dict['test']
# user_id_dict, user_to_num_items, item_id_dict, item_to_num_users = info_dict['user_id_dict'], info_dict['user_to_num_items'], info_dict['item_id_dict'], info_dict['item_to_num_users']
# for train_u in train:
# IRTs_user = train[train_u]
# irts = []
# for irt in IRTs_user:
# if implicit:
# irts.append((irt[0], 1))
# else:
# irts.append((irt[0], irt[1]))
# train[train_u] = irts
# for valid_u in valid:
# IRTs_user = valid[valid_u]
# irts = []
# for irt in IRTs_user:
# if implicit:
# irts.append((irt[0], 1))
# else:
# irts.append((irt[0], irt[1]))
# valid[valid_u] = irts
# for test_u in test:
# IRTs_user = test[test_u]
# irts = []
# for irt in IRTs_user:
# if implicit:
# irts.append((irt[0], 1))
# else:
# irts.append((irt[0], irt[1]))
# test[test_u] = irts
# return train, valid, test, user_id_dict, user_to_num_items, item_id_dict, item_to_num_users | 2,896 | 33.903614 | 189 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/dataset.py | import os
import pandas as pd
import numpy as np
import scipy.sparse as sp
from typing import List, Dict, Union, Optional
from pathlib import Path
from .preprocess import split_into_tr_val_te
from utils.types import df_to_sparse
class UIRTDataset(object):
def __init__(self, data_path:str, dataname:Optional[str]=None, separator:str=',', binarize_threshold:Union[int, float]=0.0, implicit:bool=True,
min_item_per_user:int=0, min_user_per_item:int=0, protocol:str='holdout', generalization:str='weak',
holdout_users:Union[int, float]=0.1, valid_ratio:Union[int, float]=0.1, test_ratio:Union[int, float]=0.2,
leave_k:int=1, split_random:bool=True, cache_dir:str='cache', seed:int=1234):
self.data_path = Path(data_path)
self.base_dir = self.data_path.parent
self.dataname = dataname if dataname is not None else self.base_dir.name
self.separator = separator
self.binarize_threshold = binarize_threshold
self.implicit = implicit
self.min_item_per_user = min_item_per_user
self.min_user_per_item = min_user_per_item
self.protocol = protocol
self.generalization = generalization
self.holdout_users = holdout_users
self.valid_ratio = valid_ratio
self.test_ratio = test_ratio
self.leave_k = leave_k
self.split_random = split_random
self.seed = seed
self.cache_dir = cache_dir
self._prepro_cache_dir = None
self._set_preprocessed_cache_dir()
self._prepro_file_dict, self._user2id_file, self._item2id_file = self._ensure_preprocessed()
self._load_preproessed_data()
def _load_preproessed_data(self) -> None:
def transform(df):
if self.implicit:
if self.binarize_threshold > 0:
df = df[df['rating'] >= self.threshold]
df.rating = np.ones(len(df))
return df
print('Load preprocessed data...')
self.user2id = self._load_id_map(self._user2id_file)
self.item2id = self._load_id_map(self._item2id_file)
self.num_users, self.num_items = len(self.user2id), len(self.item2id)
names=['user', 'item', 'rating', 'timestamp']
dtype={'user': int, 'item': int, 'rating': float, 'timestamp': float}
if self.generalization == 'weak':
train_df = transform(pd.read_csv(self._prepro_file_dict['train'], sep=',', names=names, dtype=dtype))
valid_df = transform(pd.read_csv(self._prepro_file_dict['valid'], sep=',', names=names, dtype=dtype))
test_df = transform(pd.read_csv(self._prepro_file_dict['test'], sep=',', names=names, dtype=dtype))
self.train_users = self.valid_users = self.test_users = list(pd.unique(train_df.user))
self.train_data = df_to_sparse(train_df, shape=(self.num_users, self.num_items))
self.valid_target = df_to_sparse(valid_df, shape=(self.num_users, self.num_items))
self.test_target = df_to_sparse(test_df, shape=(self.num_users, self.num_items))
else:
train_df = transform(pd.read_csv(self._prepro_file_dict['train'], sep=',', names=names, dtype=dtype))
valid_input_df = transform(pd.read_csv(self._prepro_file_dict['valid_input'], sep=',', names=names, dtype=dtype))
valid_target_df = transform(pd.read_csv(self._prepro_file_dict['valid_target'], sep=',', names=names, dtype=dtype))
test_input_df = transform(pd.read_csv(self._prepro_file_dict['test_input'], sep=',', names=names, dtype=dtype))
test_target_df = transform(pd.read_csv(self._prepro_file_dict['test_target'], sep=',', names=names, dtype=dtype))
self.train_users = list(pd.unique(train_df.user))
self.valid_users = list(pd.unique(valid_input_df.user))
self.test_users = list(pd.unique(test_input_df.user))
valid_user_ids = [self.user2id[u] for u in self.valid_users]
test_user_ids = [self.user2id[u] for u in self.test_users]
self.train_data = df_to_sparse(train_df, shape=(self.num_users, self.num_items))
self.valid_input = df_to_sparse(valid_input_df, shape=(self.num_users, self.num_items))[valid_user_ids]
self.valid_target = df_to_sparse(valid_target_df, shape=(self.num_users, self.num_items))[valid_user_ids]
self.test_input = df_to_sparse(test_input_df, shape=(self.num_users, self.num_items))[test_user_ids]
self.test_target = df_to_sparse(test_target_df, shape=(self.num_users, self.num_items))[test_user_ids]
def _ensure_preprocessed(self) -> None:
if self.generalization == 'weak':
prepro_dict = {
'train': self._prepro_cache_dir / 'train.csv',
'valid': self._prepro_cache_dir / 'valid.csv',
'test': self._prepro_cache_dir / 'test.csv'
}
else:
prepro_dict = {
'train': self._prepro_cache_dir / 'train.csv',
'valid_input': self._prepro_cache_dir / 'valid_input.csv',
'valid_target': self._prepro_cache_dir / 'valid_target.csv',
'test_input': self._prepro_cache_dir / 'test_input.csv',
'test_target': self._prepro_cache_dir / 'test_target.csv'
}
user2id_file = self._prepro_cache_dir / 'user_map'
item2id_file = self._prepro_cache_dir / 'item_map'
files_to_check = list(prepro_dict.values()) + [user2id_file, item2id_file]
if self._check_preprocssed(files_to_check):
print('Load from preprocssed')
else:
print('Preprocess raw data...')
raw_data = pd.read_csv(self.data_path, sep=self.separator,
names=['user', 'item', 'rating', 'timestamp'],
dtype={'user': int, 'item': int, 'rating': float, 'timestamp': float},
engine='python')
# TODO: handle UI, UIR, UIT via NaN
sample_row = raw_data.iloc[0,:]
if pd.isna(sample_row.rating):
raw_data.rating = np.ones(len(raw_data))
if pd.isna(sample_row.timestamp):
raw_data.timestamp = np.ones(len(raw_data))
# user item id map
raw_num_users = len(pd.unique(raw_data.user))
raw_num_items = len(pd.unique(raw_data.item))
# Filter users
num_items_by_user = raw_data.groupby('user', as_index=False).size()
num_items_by_user = num_items_by_user.set_index('user')
user_filter_idx = raw_data['user'].isin(num_items_by_user.index[num_items_by_user['size'] >= self.min_item_per_user])
raw_data = raw_data[user_filter_idx]
num_items_by_user = raw_data.groupby('user', as_index=False).size()
num_items_by_user = num_items_by_user.set_index('user')
num_users = len(pd.unique(raw_data.user))
print('# user after filter (min %d items): %d' % (self.min_item_per_user, num_users))
# Filter items
num_users_by_item = raw_data.groupby('item', as_index=False).size()
num_users_by_item = num_users_by_item.set_index('item')
item_filter_idx = raw_data['item'].isin(num_users_by_item.index[num_users_by_item['size'] >= self.min_user_per_item])
raw_data = raw_data[item_filter_idx]
num_users_by_item = raw_data.groupby('item', as_index=False).size()
num_users_by_item = num_users_by_item.set_index('item')
num_items = len(pd.unique(raw_data.item))
print('# item after filter (min %d users): %d' % (self.min_user_per_item, num_items))
# Build user old2new id map
# user_frame = num_items_by_user.to_frame()
num_items_by_user.columns = ['item_cnt']
raw_users = list(num_items_by_user.index)
user2id = {u: uid for uid, u in enumerate(raw_users)}
# Build item old2new id map
# item_frame = num_users_by_item.to_frame()
num_users_by_item.columns = ['user_cnt']
raw_items = list(num_users_by_item.index)
item2id = {i: iid for iid, i in enumerate(raw_items)}
# Convert to new id
raw_data.user = [user2id[u] for u in raw_data.user.tolist()]
raw_data.item = [item2id[i] for i in raw_data.item.tolist()]
# preprocess and save
if self.protocol == 'leave_one_out':
prepro_data_dict = split_into_tr_val_te(
data=raw_data,
generalization=self.generalization,
num_valid_items=self.leave_k,
num_test_items=self.leave_k,
holdout_users=self.holdout_users,
split_random=self.split_random,
user2id=user2id,
item2id=item2id)
elif self.protocol == 'holdout':
prepro_data_dict = split_into_tr_val_te(
data=raw_data,
generalization=self.generalization,
num_valid_items=self.valid_ratio,
num_test_items=self.test_ratio,
holdout_users=self.holdout_users,
split_random=self.split_random,
user2id=user2id,
item2id=item2id)
else:
raise ValueError(f'{self.protocol} is not a valid protocol.')
for filename, filepath in prepro_dict.items():
prepro_data_dict[filename].to_csv(filepath, index=False, header=False)
self._save_id_map(user2id, user2id_file)
self._save_id_map(item2id, item2id_file)
return prepro_dict, user2id_file, item2id_file
def _check_preprocssed(self, files_to_check: List) -> bool:
if not self._prepro_cache_dir.exists():
return False
for filepath in files_to_check:
if not filepath.exists():
return False
return True
def _set_preprocessed_cache_dir(self) -> None:
if self._prepro_cache_dir is None:
random_or_not = 'random' if self.split_random else 'time'
if self.protocol == 'leave_one_out':
protocol_name = f'loo_{self.leave_k}_{self.generalization}_{random_or_not}_minUI_{self.min_item_per_user}_{self.min_user_per_item}_seed{self.seed}/'
elif self.protocol == 'holdout':
random_or_not = 'random' if self.split_random else 'time'
valid_ratio_str = '%.2f' % self.valid_ratio
test_ratio_str = '%.2f' % self.test_ratio
protocol_name = f'holdout_{valid_ratio_str}_{test_ratio_str}_{self.generalization}_{random_or_not}_minUI_{self.min_item_per_user}_{self.min_user_per_item}_seed{self.seed}/'
else:
raise ValueError(f'Incorrect protocol passed ({self.protocol}). Choose between leave_one_out, holdout, hold_user_out')
self._prepro_cache_dir = Path(os.path.join(self.base_dir, self.cache_dir, protocol_name))
if not self._prepro_cache_dir.exists():
os.makedirs(self._prepro_cache_dir)
def _load_id_map(self, id_map_file: Path) -> Dict:
old2new = {}
with open(id_map_file, 'rt') as f:
for line in f.readlines():
u, uid = line.strip().split(', ')
old2new[int(u)] = int(uid)
return old2new
def _save_id_map(self, id_map: Dict, id_map_file: Path) -> None:
# Write user/item id map into files
with open(id_map_file, 'wt') as f:
for u, uid in id_map.items():
f.write('%d, %d\n' % (u, uid))
@property
def valid_input(self) -> sp.csr_matrix:
if self.generalization == 'weak':
return self.train_data
else:
return self.valid_input
@property
def test_input(self) -> sp.csr_matrix:
if self.generalization == 'weak':
return self.train_data + self.valid_target
else:
return self.test_input
@property
def num_train_users(self) -> int:
return len(self.train_users)
@property
def num_valid_users(self) -> int:
return len(self.valid_users)
@property
def num_test_users(self) -> int:
return len(self.test_users) | 12,708 | 46.599251 | 188 | py |
RecSys_PyTorch | RecSys_PyTorch-master/data/__init__.py | 0 | 0 | 0 | py | |
RecSys_PyTorch | RecSys_PyTorch-master/data/preprocess.py | import os
import math
import pandas as pd
import numpy as np
from typing import List, Dict, Union
from pathlib import Path
def split_into_tr_val_te(data:pd.DataFrame, generalization:str, num_valid_items:Union[int, float], num_test_items:Union[int, float],
holdout_users:int, split_random:bool, user2id:Dict, item2id:Dict) -> Dict:
prepro_data_dict = {}
if generalization == 'weak':
# Split data into train, valid, test
train_data, test_data = split_input_target_by_users(data, test_ratio=num_valid_items, split_random=split_random)
train_data, valid_data = split_input_target_by_users(train_data, test_ratio=num_test_items, split_random=split_random)
prepro_data_dict['train'] = train_data
prepro_data_dict['valid'] = valid_data
prepro_data_dict['test'] = test_data
else:
user_ids = np.array(list(user2id.values()))
num_users = len(user_ids)
num_valid_users = num_test_users = int(num_users * holdout_users)
num_train_users = num_users - num_valid_users - num_test_users
perm = np.random.permutation(num_users)
train_user_idx = perm[:num_train_users]
valid_user_idx = perm[num_train_users: num_train_users + num_valid_users]
test_user_idx = perm[num_train_users + num_valid_users: num_train_users + num_valid_users + num_test_users]
train_users = user_ids[train_user_idx]
valid_users = user_ids[valid_user_idx]
test_users = user_ids[test_user_idx]
# possible refactor candidate: 3N -> N
train_data = data.loc[data.user.isin(train_users)]
valid_data = data.loc[data.user.isin(valid_users)]
test_data = data.loc[data.user.isin(test_users)]
valid_input, valid_target = split_input_target_by_users(valid_data, test_ratio=num_valid_items, split_random=split_random)
test_input, test_target = split_input_target_by_users(test_data, test_ratio=num_test_items, split_random=split_random)
prepro_data_dict['train'] = train_data
prepro_data_dict['valid_input'] = valid_input
prepro_data_dict['valid_target'] = valid_target
prepro_data_dict['test_input'] = test_input
prepro_data_dict['test_target'] = test_target
return prepro_data_dict
def split_input_target_by_users(df:pd.DataFrame, test_ratio:float=0.2, split_random:bool=True):
df_group = df.groupby('user')
train_list, test_list = [], []
num_zero_train, num_zero_test = 0, 0
for _, group in df_group:
user = pd.unique(group.user)[0]
num_items_user = len(group)
if isinstance(test_ratio, float):
num_test_items = int(math.ceil(test_ratio * num_items_user))
else:
num_test_items = test_ratio
group = group.sort_values(by='timestamp')
idx = np.ones(num_items_user, dtype='bool')
if split_random:
test_idx = np.random.choice(num_items_user, num_test_items, replace=False)
idx[test_idx] = False
else:
idx[-num_test_items:] = False
if len(group[idx]) == 0:
num_zero_train += 1
else:
train_list.append(group[idx])
if len(group[np.logical_not(idx)]) == 0:
num_zero_test += 1
else:
test_list.append(group[np.logical_not(idx)])
train_df = pd.concat(train_list)
test_df = pd.concat(test_list)
# TODO: warn zero train, test users
return train_df, test_df | 3,553 | 38.488889 | 133 | py |
paac | paac-master/emulator_runner.py | from multiprocessing import Process
class EmulatorRunner(Process):
def __init__(self, id, emulators, variables, queue, barrier):
super(EmulatorRunner, self).__init__()
self.id = id
self.emulators = emulators
self.variables = variables
self.queue = queue
self.barrier = barrier
def run(self):
super(EmulatorRunner, self).run()
self._run()
def _run(self):
count = 0
while True:
instruction = self.queue.get()
if instruction is None:
break
for i, (emulator, action) in enumerate(zip(self.emulators, self.variables[-1])):
new_s, reward, episode_over = emulator.next(action)
if episode_over:
self.variables[0][i] = emulator.get_initial_state()
else:
self.variables[0][i] = new_s
self.variables[1][i] = reward
self.variables[2][i] = episode_over
count += 1
self.barrier.put(True)
| 1,070 | 27.945946 | 92 | py |
paac | paac-master/runners.py | import numpy as np
from multiprocessing import Queue
from multiprocessing.sharedctypes import RawArray
from ctypes import c_uint, c_float, c_double
class Runners(object):
NUMPY_TO_C_DTYPE = {np.float32: c_float, np.float64: c_double, np.uint8: c_uint}
def __init__(self, EmulatorRunner, emulators, workers, variables):
self.variables = [self._get_shared(var) for var in variables]
self.workers = workers
self.queues = [Queue() for _ in range(workers)]
self.barrier = Queue()
self.runners = [EmulatorRunner(i, emulators, vars, self.queues[i], self.barrier) for i, (emulators, vars) in
enumerate(zip(np.split(emulators, workers), zip(*[np.split(var, workers) for var in self.variables])))]
def _get_shared(self, array):
"""
Returns a RawArray backed numpy array that can be shared between processes.
:param array: the array to be shared
:return: the RawArray backed numpy array
"""
dtype = self.NUMPY_TO_C_DTYPE[array.dtype.type]
shape = array.shape
shared = RawArray(dtype, array.reshape(-1))
return np.frombuffer(shared, dtype).reshape(shape)
def start(self):
for r in self.runners:
r.start()
def stop(self):
for queue in self.queues:
queue.put(None)
def get_shared_variables(self):
return self.variables
def update_environments(self):
for queue in self.queues:
queue.put(True)
def wait_updated(self):
for wd in range(self.workers):
self.barrier.get()
| 1,621 | 30.803922 | 127 | py |
paac | paac-master/test.py | import os
from train import get_network_and_environment_creator, bool_arg
import logger_utils
import argparse
import numpy as np
import time
import tensorflow as tf
import random
from paac import PAACLearner
def get_save_frame(name):
import imageio
writer = imageio.get_writer(name + '.gif', fps=30)
def get_frame(frame):
writer.append_data(frame)
return get_frame
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', type=str, help="Folder where to save the debugging information.", dest="folder", required=True)
parser.add_argument('-tc', '--test_count', default='1', type=int, help="The amount of tests to run on the given network", dest="test_count")
parser.add_argument('-np', '--noops', default=30, type=int, help="Maximum amount of no-ops to use", dest="noops")
parser.add_argument('-gn', '--gif_name', default=None, type=str, help="If provided, a gif will be produced and stored with this name", dest="gif_name")
parser.add_argument('-gf', '--gif_folder', default='', type=str, help="The folder where to save gifs.", dest="gif_folder")
parser.add_argument('-d', '--device', default='/gpu:0', type=str, help="Device to be used ('/cpu:0', '/gpu:0', '/gpu:1',...)", dest="device")
args = parser.parse_args()
arg_file = os.path.join(args.folder, 'args.json')
device = args.device
for k, v in logger_utils.load_args(arg_file).items():
setattr(args, k, v)
args.max_global_steps = 0
df = args.folder
args.debugging_folder = '/tmp/logs'
args.device = device
args.random_start = False
args.single_life_episodes = False
if args.gif_name:
args.visualize = 1
args.actor_id = 0
rng = np.random.RandomState(int(time.time()))
args.random_seed = rng.randint(1000)
network_creator, env_creator = get_network_and_environment_creator(args)
network = network_creator()
saver = tf.train.Saver()
rewards = []
environments = [env_creator.create_environment(i) for i in range(args.test_count)]
if args.gif_name:
for i, environment in enumerate(environments):
environment.on_new_frame = get_save_frame(os.path.join(args.gif_folder, args.gif_name + str(i)))
config = tf.ConfigProto()
if 'gpu' in args.device:
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
checkpoints_ = os.path.join(df, 'checkpoints')
network.init(checkpoints_, saver, sess)
states = np.asarray([environment.get_initial_state() for environment in environments])
if args.noops != 0:
for i, environment in enumerate(environments):
for _ in range(random.randint(0, args.noops)):
state, _, _ = environment.next(environment.get_noop())
states[i] = state
episodes_over = np.zeros(args.test_count, dtype=np.bool)
rewards = np.zeros(args.test_count, dtype=np.float32)
while not all(episodes_over):
actions, _, _ = PAACLearner.choose_next_actions(network, env_creator.num_actions, states, sess)
for j, environment in enumerate(environments):
state, r, episode_over = environment.next(actions[j])
states[j] = state
rewards[j] += r
episodes_over[j] = episode_over
print('Performed {} tests for {}.'.format(args.test_count, args.game))
print('Mean: {0:.2f}'.format(np.mean(rewards)))
print('Min: {0:.2f}'.format(np.min(rewards)))
print('Max: {0:.2f}'.format(np.max(rewards)))
print('Std: {0:.2f}'.format(np.std(rewards)))
| 3,696 | 39.626374 | 155 | py |
paac | paac-master/policy_v_network.py | from networks import *
class PolicyVNetwork(Network):
def __init__(self, conf):
""" Set up remaining layers, objective and loss functions, gradient
compute and apply ops, network parameter synchronization ops, and
summary ops. """
super(PolicyVNetwork, self).__init__(conf)
self.entropy_regularisation_strength = conf['entropy_regularisation_strength']
with tf.device(conf['device']):
with tf.name_scope(self.name):
self.critic_target_ph = tf.placeholder(
"float32", [None], name='target')
self.adv_actor_ph = tf.placeholder("float", [None], name='advantage')
# Final actor layer
layer_name = 'actor_output'
_, _, self.output_layer_pi = softmax(layer_name, self.output, self.num_actions)
# Final critic layer
_, _, self.output_layer_v = fc('critic_output', self.output, 1, activation="linear")
# Avoiding log(0) by adding a very small quantity (1e-30) to output.
self.log_output_layer_pi = tf.log(tf.add(self.output_layer_pi, tf.constant(1e-30)),
name=layer_name + '_log_policy')
# Entropy: sum_a (-p_a ln p_a)
self.output_layer_entropy = tf.reduce_sum(tf.multiply(
tf.constant(-1.0),
tf.multiply(self.output_layer_pi, self.log_output_layer_pi)), reduction_indices=1)
self.output_layer_v = tf.reshape(self.output_layer_v, [-1])
# Advantage critic
self.critic_loss = tf.subtract(self.critic_target_ph, self.output_layer_v)
log_output_selected_action = tf.reduce_sum(
tf.multiply(self.log_output_layer_pi, self.selected_action_ph),
reduction_indices=1)
self.actor_objective_advantage_term = tf.multiply(log_output_selected_action, self.adv_actor_ph)
self.actor_objective_entropy_term = tf.multiply(self.entropy_regularisation_strength, self.output_layer_entropy)
self.actor_objective_mean = tf.reduce_mean(tf.multiply(tf.constant(-1.0),
tf.add(self.actor_objective_advantage_term, self.actor_objective_entropy_term)),
name='mean_actor_objective')
self.critic_loss_mean = tf.reduce_mean(tf.scalar_mul(0.25, tf.pow(self.critic_loss, 2)), name='mean_critic_loss')
# Loss scaling is used because the learning rate was initially runed tuned to be used with
# max_local_steps = 5 and summing over timesteps, which is now replaced with the mean.
self.loss = tf.scalar_mul(self.loss_scaling, self.actor_objective_mean + self.critic_loss_mean)
class NIPSPolicyVNetwork(PolicyVNetwork, NIPSNetwork):
pass
class NaturePolicyVNetwork(PolicyVNetwork, NatureNetwork):
pass
| 3,081 | 46.415385 | 151 | py |
paac | paac-master/environment.py | import numpy as np
class BaseEnvironment(object):
def get_initial_state(self):
"""
Sets the environment to its initial state.
:return: the initial state
"""
raise NotImplementedError()
def next(self, action):
"""
Appies the current action to the environment.
:param action: one hot vector.
:return: (observation, reward, is_terminal) tuple
"""
raise NotImplementedError()
def get_legal_actions(self):
"""
Get the set of indices of legal actions
:return: a numpy array of the indices of legal actions
"""
raise NotImplementedError()
def get_noop(self):
"""
Gets the no-op action, to be used with self.next
:return: the action
"""
raise NotImplementedError()
def on_new_frame(self, frame):
"""
Called whenever a new frame is available.
:param frame: raw frame
"""
pass
class FramePool(object):
def __init__(self, frame_pool, operation):
self.frame_pool = frame_pool
self.frame_pool_index = 0
self.frames_in_pool = frame_pool.shape[0]
self.operation = operation
def new_frame(self, frame):
self.frame_pool[self.frame_pool_index] = frame
self.frame_pool_index = (self.frame_pool_index + 1) % self.frames_in_pool
def get_processed_frame(self):
return self.operation(self.frame_pool)
class ObservationPool(object):
def __init__(self, observation_pool):
self.observation_pool = observation_pool
self.pool_size = observation_pool.shape[-1]
self.permutation = [self.__shift(list(range(self.pool_size)), i) for i in range(self.pool_size)]
self.current_observation_index = 0
def new_observation(self, observation):
self.observation_pool[:, :, self.current_observation_index] = observation
self.current_observation_index = (self.current_observation_index + 1) % self.pool_size
def get_pooled_observations(self):
return np.copy(self.observation_pool[:, :, self.permutation[self.current_observation_index]])
def __shift(self, seq, n):
n = n % len(seq)
return seq[n:]+seq[:n]
| 2,263 | 28.402597 | 104 | py |
paac | paac-master/paac.py | import time
from multiprocessing import Queue
from multiprocessing.sharedctypes import RawArray
from ctypes import c_uint, c_float
from actor_learner import *
import logging
from emulator_runner import EmulatorRunner
from runners import Runners
import numpy as np
class PAACLearner(ActorLearner):
def __init__(self, network_creator, environment_creator, args):
super(PAACLearner, self).__init__(network_creator, environment_creator, args)
self.workers = args.emulator_workers
@staticmethod
def choose_next_actions(network, num_actions, states, session):
network_output_v, network_output_pi = session.run(
[network.output_layer_v,
network.output_layer_pi],
feed_dict={network.input_ph: states})
action_indices = PAACLearner.__sample_policy_action(network_output_pi)
new_actions = np.eye(num_actions)[action_indices]
return new_actions, network_output_v, network_output_pi
def __choose_next_actions(self, states):
return PAACLearner.choose_next_actions(self.network, self.num_actions, states, self.session)
@staticmethod
def __sample_policy_action(probs):
"""
Sample an action from an action probability distribution output by
the policy network.
"""
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
probs = probs - np.finfo(np.float32).epsneg
action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]
return action_indexes
def _get_shared(self, array, dtype=c_float):
"""
Returns a RawArray backed numpy array that can be shared between processes.
:param array: the array to be shared
:param dtype: the RawArray dtype to use
:return: the RawArray backed numpy array
"""
shape = array.shape
shared = RawArray(dtype, array.reshape(-1))
return np.frombuffer(shared, dtype).reshape(shape)
def train(self):
"""
Main actor learner loop for parallel advantage actor critic learning.
"""
self.global_step = self.init_network()
logging.debug("Starting training at Step {}".format(self.global_step))
counter = 0
global_step_start = self.global_step
total_rewards = []
# state, reward, episode_over, action
variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),
(np.zeros(self.emulator_counts, dtype=np.float32)),
(np.asarray([False] * self.emulator_counts, dtype=np.float32)),
(np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]
self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)
self.runners.start()
shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()
summaries_op = tf.summary.merge_all()
emulator_steps = [0] * self.emulator_counts
total_episode_rewards = self.emulator_counts * [0]
actions_sum = np.zeros((self.emulator_counts, self.num_actions))
y_batch = np.zeros((self.max_local_steps, self.emulator_counts))
adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))
rewards = np.zeros((self.max_local_steps, self.emulator_counts))
states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)
actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))
values = np.zeros((self.max_local_steps, self.emulator_counts))
episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))
start_time = time.time()
while self.global_step < self.max_global_steps:
loop_start_time = time.time()
max_local_steps = self.max_local_steps
for t in range(max_local_steps):
next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)
actions_sum += next_actions
for z in range(next_actions.shape[0]):
shared_actions[z] = next_actions[z]
actions[t] = next_actions
values[t] = readouts_v_t
states[t] = shared_states
# Start updating all environments with next_actions
self.runners.update_environments()
self.runners.wait_updated()
# Done updating all environments, have new states, rewards and is_over
episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)
for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):
total_episode_rewards[e] += actual_reward
actual_reward = self.rescale_reward(actual_reward)
rewards[t, e] = actual_reward
emulator_steps[e] += 1
self.global_step += 1
if episode_over:
total_rewards.append(total_episode_rewards[e])
episode_summary = tf.Summary(value=[
tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),
tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),
])
self.summary_writer.add_summary(episode_summary, self.global_step)
self.summary_writer.flush()
total_episode_rewards[e] = 0
emulator_steps[e] = 0
actions_sum[e] = np.zeros(self.num_actions)
nest_state_value = self.session.run(
self.network.output_layer_v,
feed_dict={self.network.input_ph: shared_states})
estimated_return = np.copy(nest_state_value)
for t in reversed(range(max_local_steps)):
estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]
y_batch[t] = np.copy(estimated_return)
adv_batch[t] = estimated_return - values[t]
flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])
flat_y_batch = y_batch.reshape(-1)
flat_adv_batch = adv_batch.reshape(-1)
flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)
lr = self.get_lr()
feed_dict = {self.network.input_ph: flat_states,
self.network.critic_target_ph: flat_y_batch,
self.network.selected_action_ph: flat_actions,
self.network.adv_actor_ph: flat_adv_batch,
self.learning_rate: lr}
_, summaries = self.session.run(
[self.train_step, summaries_op],
feed_dict=feed_dict)
self.summary_writer.add_summary(summaries, self.global_step)
self.summary_writer.flush()
counter += 1
if counter % (2048 / self.emulator_counts) == 0:
curr_time = time.time()
global_steps = self.global_step
last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])
logging.info("Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}"
.format(global_steps,
self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),
(global_steps - global_step_start) / (curr_time - start_time),
last_ten))
self.save_vars()
self.cleanup()
def cleanup(self):
super(PAACLearner, self).cleanup()
self.runners.stop()
| 8,122 | 42.207447 | 119 | py |
paac | paac-master/atari_emulator.py | import numpy as np
from ale_python_interface import ALEInterface
from scipy.misc import imresize
import random
from environment import BaseEnvironment, FramePool,ObservationPool
IMG_SIZE_X = 84
IMG_SIZE_Y = 84
NR_IMAGES = 4
ACTION_REPEAT = 4
MAX_START_WAIT = 30
FRAMES_IN_POOL = 2
class AtariEmulator(BaseEnvironment):
def __init__(self, actor_id, args):
self.ale = ALEInterface()
self.ale.setInt(b"random_seed", args.random_seed * (actor_id +1))
# For fuller control on explicit action repeat (>= ALE 0.5.0)
self.ale.setFloat(b"repeat_action_probability", 0.0)
# Disable frame_skip and color_averaging
# See: http://is.gd/tYzVpj
self.ale.setInt(b"frame_skip", 1)
self.ale.setBool(b"color_averaging", False)
full_rom_path = args.rom_path + "/" + args.game + ".bin"
self.ale.loadROM(str.encode(full_rom_path))
self.legal_actions = self.ale.getMinimalActionSet()
self.screen_width, self.screen_height = self.ale.getScreenDims()
self.lives = self.ale.lives()
self.random_start = args.random_start
self.single_life_episodes = args.single_life_episodes
self.call_on_new_frame = args.visualize
# Processed historcal frames that will be fed in to the network
# (i.e., four 84x84 images)
self.observation_pool = ObservationPool(np.zeros((IMG_SIZE_X, IMG_SIZE_Y, NR_IMAGES), dtype=np.uint8))
self.rgb_screen = np.zeros((self.screen_height, self.screen_width, 3), dtype=np.uint8)
self.gray_screen = np.zeros((self.screen_height, self.screen_width,1), dtype=np.uint8)
self.frame_pool = FramePool(np.empty((2, self.screen_height,self.screen_width), dtype=np.uint8),
self.__process_frame_pool)
def get_legal_actions(self):
return self.legal_actions
def __get_screen_image(self):
"""
Get the current frame luminance
:return: the current frame
"""
self.ale.getScreenGrayscale(self.gray_screen)
if self.call_on_new_frame:
self.ale.getScreenRGB(self.rgb_screen)
self.on_new_frame(self.rgb_screen)
return np.squeeze(self.gray_screen)
def on_new_frame(self, frame):
pass
def __new_game(self):
""" Restart game """
self.ale.reset_game()
self.lives = self.ale.lives()
if self.random_start:
wait = random.randint(0, MAX_START_WAIT)
for _ in range(wait):
self.ale.act(self.legal_actions[0])
def __process_frame_pool(self, frame_pool):
""" Preprocess frame pool """
img = np.amax(frame_pool, axis=0)
img = imresize(img, (84, 84), interp='nearest')
img = img.astype(np.uint8)
return img
def __action_repeat(self, a, times=ACTION_REPEAT):
""" Repeat action and grab screen into frame pool """
reward = 0
for i in range(times - FRAMES_IN_POOL):
reward += self.ale.act(self.legal_actions[a])
# Only need to add the last FRAMES_IN_POOL frames to the frame pool
for i in range(FRAMES_IN_POOL):
reward += self.ale.act(self.legal_actions[a])
self.frame_pool.new_frame(self.__get_screen_image())
return reward
def get_initial_state(self):
""" Get the initial state """
self.__new_game()
for step in range(NR_IMAGES):
_ = self.__action_repeat(0)
self.observation_pool.new_observation(self.frame_pool.get_processed_frame())
if self.__is_terminal():
raise Exception('This should never happen.')
return self.observation_pool.get_pooled_observations()
def next(self, action):
""" Get the next state, reward, and game over signal """
reward = self.__action_repeat(np.argmax(action))
self.observation_pool.new_observation(self.frame_pool.get_processed_frame())
terminal = self.__is_terminal()
self.lives = self.ale.lives()
observation = self.observation_pool.get_pooled_observations()
return observation, reward, terminal
def __is_terminal(self):
if self.single_life_episodes:
return self.__is_over() or (self.lives > self.ale.lives())
else:
return self.__is_over()
def __is_over(self):
return self.ale.game_over()
def get_noop(self):
return [1.0, 0.0]
| 4,492 | 36.756303 | 110 | py |
paac | paac-master/logger_utils.py | import os
import numpy as np
import time
import json
import tensorflow as tf
def load_args(path):
if path is None:
return {}
with open(path, 'r') as f:
return json.load(f)
def save_args(args, folder, file_name='args.json'):
args = vars(args)
if not os.path.exists(folder):
os.makedirs(folder)
with open(os.path.join(folder, file_name), 'w') as f:
return json.dump(args, f)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
| 977 | 27.764706 | 80 | py |
paac | paac-master/networks.py | import tensorflow as tf
import logging
import numpy as np
def flatten(_input):
shape = _input.get_shape().as_list()
dim = shape[1]*shape[2]*shape[3]
return tf.reshape(_input, [-1,dim], name='_flattened')
def conv2d(name, _input, filters, size, channels, stride, padding = 'VALID', init = "torch"):
w = conv_weight_variable([size,size,channels,filters],
name + '_weights', init = init)
b = conv_bias_variable([filters], size, size, channels,
name + '_biases', init = init)
conv = tf.nn.conv2d(_input, w, strides=[1, stride, stride, 1],
padding=padding, name=name + '_convs')
out = tf.nn.relu(tf.add(conv, b),
name='' + name + '_activations')
return w, b, out
def conv_weight_variable(shape, name, init = "torch"):
if init == "glorot_uniform":
receptive_field_size = np.prod(shape[:2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
d = np.sqrt(6. / (fan_in + fan_out))
else:
w = shape[0]
h = shape[1]
input_channels = shape[2]
d = 1.0 / np.sqrt(input_channels * w * h)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def conv_bias_variable(shape, w, h, input_channels, name, init= "torch"):
if init == "glorot_uniform":
initial = tf.zeros(shape)
else:
d = 1.0 / np.sqrt(input_channels * w * h)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def fc(name, _input, output_dim, activation = "relu", init = "torch"):
input_dim = _input.get_shape().as_list()[1]
w = fc_weight_variable([input_dim, output_dim],
name + '_weights', init = init)
b = fc_bias_variable([output_dim], input_dim,
'' + name + '_biases', init = init)
out = tf.add(tf.matmul(_input, w), b, name= name + '_out')
if activation == "relu":
out = tf.nn.relu(out, name='' + name + '_relu')
return w, b, out
def fc_weight_variable(shape, name, init="torch"):
if init == "glorot_uniform":
fan_in = shape[0]
fan_out = shape[1]
d = np.sqrt(6. / (fan_in + fan_out))
else:
input_channels = shape[0]
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def fc_bias_variable(shape, input_channels, name, init= "torch"):
if init=="glorot_uniform":
initial = tf.zeros(shape, dtype='float32')
else:
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def softmax(name, _input, output_dim):
input_dim = _input.get_shape().as_list()[1]
w = fc_weight_variable([input_dim, output_dim], name + '_weights')
b = fc_bias_variable([output_dim], input_dim, name + '_biases')
out = tf.nn.softmax(tf.add(tf.matmul(_input, w), b), name= name + '_policy')
return w, b, out
def log_softmax( name, _input, output_dim):
input_dim = _input.get_shape().as_list()[1]
w = fc_weight_variable([input_dim, output_dim], name + '_weights')
b = fc_bias_variable([output_dim], input_dim, name + '_biases')
out = tf.nn.log_softmax(tf.add(tf.matmul(_input, w), b), name= name + '_policy')
return w, b, out
class Network(object):
def __init__(self, conf):
self.name = conf['name']
self.num_actions = conf['num_actions']
self.clip_norm = conf['clip_norm']
self.clip_norm_type = conf['clip_norm_type']
self.device = conf['device']
with tf.device(self.device):
with tf.name_scope(self.name):
self.loss_scaling = 5.0
self.input_ph = tf.placeholder(tf.uint8, [None, 84, 84, 4], name='input')
self.selected_action_ph = tf.placeholder("float32", [None, self.num_actions], name="selected_action")
self.input = tf.scalar_mul(1.0/255.0, tf.cast(self.input_ph, tf.float32))
# This class should never be used, must be subclassed
# The output layer
self.output = None
def init(self, checkpoint_folder, saver, session):
last_saving_step = 0
with tf.device('/cpu:0'):
# Initialize network parameters
path = tf.train.latest_checkpoint(checkpoint_folder)
if path is None:
logging.info('Initializing all variables')
session.run(tf.global_variables_initializer())
else:
logging.info('Restoring network variables from previous run')
saver.restore(session, path)
last_saving_step = int(path[path.rindex('-')+1:])
return last_saving_step
class NIPSNetwork(Network):
def __init__(self, conf):
super(NIPSNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope(self.name):
_, _, conv1 = conv2d('conv1', self.input, 16, 8, 4, 4)
_, _, conv2 = conv2d('conv2', conv1, 32, 4, 16, 2)
_, _, fc3 = fc('fc3', flatten(conv2), 256, activation="relu")
self.output = fc3
class NatureNetwork(Network):
def __init__(self, conf):
super(NatureNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope(self.name):
_, _, conv1 = conv2d('conv1', self.input, 32, 8, 4, 4)
_, _, conv2 = conv2d('conv2', conv1, 64, 4, 32, 2)
_, _, conv3 = conv2d('conv3', conv2, 64, 3, 64, 1)
_, _, fc4 = fc('fc4', flatten(conv3), 512, activation="relu")
self.output = fc4
| 5,972 | 34.135294 | 117 | py |
paac | paac-master/environment_creator.py | class EnvironmentCreator(object):
def __init__(self, args):
"""
Creates an object from which new environments can be created
:param args:
"""
from atari_emulator import AtariEmulator
from ale_python_interface import ALEInterface
filename = args.rom_path + "/" + args.game + ".bin"
ale_int = ALEInterface()
ale_int.loadROM(str.encode(filename))
self.num_actions = len(ale_int.getMinimalActionSet())
self.create_environment = lambda i: AtariEmulator(i, args)
| 554 | 28.210526 | 68 | py |
paac | paac-master/train.py | import argparse
import logging
import sys
import signal
import os
import copy
import environment_creator
from paac import PAACLearner
from policy_v_network import NaturePolicyVNetwork, NIPSPolicyVNetwork
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def bool_arg(string):
value = string.lower()
if value == 'true':
return True
elif value == 'false':
return False
else:
raise argparse.ArgumentTypeError("Expected True or False, but got {}".format(string))
def main(args):
logging.debug('Configuration: {}'.format(args))
network_creator, env_creator = get_network_and_environment_creator(args)
learner = PAACLearner(network_creator, env_creator, args)
setup_kill_signal_handler(learner)
logging.info('Starting training')
learner.train()
logging.info('Finished training')
def setup_kill_signal_handler(learner):
main_process_pid = os.getpid()
def signal_handler(signal, frame):
if os.getpid() == main_process_pid:
logging.info('Signal ' + str(signal) + ' detected, cleaning up.')
learner.cleanup()
logging.info('Cleanup completed, shutting down...')
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def get_network_and_environment_creator(args, random_seed=3):
env_creator = environment_creator.EnvironmentCreator(args)
num_actions = env_creator.num_actions
args.num_actions = num_actions
args.random_seed = random_seed
network_conf = {'num_actions': num_actions,
'entropy_regularisation_strength': args.entropy_regularisation_strength,
'device': args.device,
'clip_norm': args.clip_norm,
'clip_norm_type': args.clip_norm_type}
if args.arch == 'NIPS':
network = NIPSPolicyVNetwork
else:
network = NaturePolicyVNetwork
def network_creator(name='local_learning'):
nonlocal network_conf
copied_network_conf = copy.copy(network_conf)
copied_network_conf['name'] = name
return network(copied_network_conf)
return network_creator, env_creator
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-g', default='pong', help='Name of game', dest='game')
parser.add_argument('-d', '--device', default='/gpu:0', type=str, help="Device to be used ('/cpu:0', '/gpu:0', '/gpu:1',...)", dest="device")
parser.add_argument('--rom_path', default='./atari_roms', help='Directory where the game roms are located (needed for ALE environment)', dest="rom_path")
parser.add_argument('-v', '--visualize', default=False, type=bool_arg, help="0: no visualization of emulator; 1: all emulators, for all actors, are visualized; 2: only 1 emulator (for one of the actors) is visualized", dest="visualize")
parser.add_argument('--e', default=0.1, type=float, help="Epsilon for the Rmsprop and Adam optimizers", dest="e")
parser.add_argument('--alpha', default=0.99, type=float, help="Discount factor for the history/coming gradient, for the Rmsprop optimizer", dest="alpha")
parser.add_argument('-lr', '--initial_lr', default=0.0224, type=float, help="Initial value for the learning rate. Default = 0.0224", dest="initial_lr")
parser.add_argument('-lra', '--lr_annealing_steps', default=80000000, type=int, help="Nr. of global steps during which the learning rate will be linearly annealed towards zero", dest="lr_annealing_steps")
parser.add_argument('--entropy', default=0.02, type=float, help="Strength of the entropy regularization term (needed for actor-critic)", dest="entropy_regularisation_strength")
parser.add_argument('--clip_norm', default=3.0, type=float, help="If clip_norm_type is local/global, grads will be clipped at the specified maximum (avaerage) L2-norm", dest="clip_norm")
parser.add_argument('--clip_norm_type', default="global", help="Whether to clip grads by their norm or not. Values: ignore (no clipping), local (layer-wise norm), global (global norm)", dest="clip_norm_type")
parser.add_argument('--gamma', default=0.99, type=float, help="Discount factor", dest="gamma")
parser.add_argument('--max_global_steps', default=80000000, type=int, help="Max. number of training steps", dest="max_global_steps")
parser.add_argument('--max_local_steps', default=5, type=int, help="Number of steps to gain experience from before every update.", dest="max_local_steps")
parser.add_argument('--arch', default='NIPS', help="Which network architecture to use: from the NIPS or NATURE paper", dest="arch")
parser.add_argument('--single_life_episodes', default=False, type=bool_arg, help="If True, training episodes will be terminated when a life is lost (for games)", dest="single_life_episodes")
parser.add_argument('-ec', '--emulator_counts', default=32, type=int, help="The amount of emulators per agent. Default is 32.", dest="emulator_counts")
parser.add_argument('-ew', '--emulator_workers', default=8, type=int, help="The amount of emulator workers per agent. Default is 8.", dest="emulator_workers")
parser.add_argument('-df', '--debugging_folder', default='logs/', type=str, help="Folder where to save the debugging information.", dest="debugging_folder")
parser.add_argument('-rs', '--random_start', default=True, type=bool_arg, help="Whether or not to start with 30 noops for each env. Default True", dest="random_start")
return parser
if __name__ == '__main__':
args = get_arg_parser().parse_args()
import logger_utils
logger_utils.save_args(args, args.debugging_folder)
logging.debug(args)
main(args)
| 5,725 | 51.054545 | 240 | py |
paac | paac-master/actor_learner.py | import numpy as np
from multiprocessing import Process
import tensorflow as tf
import logging
from logger_utils import variable_summaries
import os
CHECKPOINT_INTERVAL = 1000000
class ActorLearner(Process):
def __init__(self, network_creator, environment_creator, args):
super(ActorLearner, self).__init__()
self.global_step = 0
self.max_local_steps = args.max_local_steps
self.num_actions = args.num_actions
self.initial_lr = args.initial_lr
self.lr_annealing_steps = args.lr_annealing_steps
self.emulator_counts = args.emulator_counts
self.device = args.device
self.debugging_folder = args.debugging_folder
self.network_checkpoint_folder = os.path.join(self.debugging_folder, 'checkpoints/')
self.optimizer_checkpoint_folder = os.path.join(self.debugging_folder, 'optimizer_checkpoints/')
self.last_saving_step = 0
self.summary_writer = tf.summary.FileWriter(os.path.join(self.debugging_folder, 'tf'))
self.learning_rate = tf.placeholder(tf.float32, shape=[])
optimizer_variable_names = 'OptimizerVariables'
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=args.alpha, epsilon=args.e,
name=optimizer_variable_names)
self.emulators = np.asarray([environment_creator.create_environment(i)
for i in range(self.emulator_counts)])
self.max_global_steps = args.max_global_steps
self.gamma = args.gamma
self.game = args.game
self.network = network_creator()
# Optimizer
grads_and_vars = self.optimizer.compute_gradients(self.network.loss)
self.flat_raw_gradients = tf.concat([tf.reshape(g, [-1]) for g, v in grads_and_vars], axis=0)
# This is not really an operation, but a list of gradient Tensors.
# When calling run() on it, the value of those Tensors
# (i.e., of the gradients) will be calculated
if args.clip_norm_type == 'ignore':
# Unclipped gradients
global_norm = tf.global_norm([g for g, v in grads_and_vars], name='global_norm')
elif args.clip_norm_type == 'global':
# Clip network grads by network norm
gradients_n_norm = tf.clip_by_global_norm(
[g for g, v in grads_and_vars], args.clip_norm)
global_norm = tf.identity(gradients_n_norm[1], name='global_norm')
grads_and_vars = list(zip(gradients_n_norm[0], [v for g, v in grads_and_vars]))
elif args.clip_norm_type == 'local':
# Clip layer grads by layer norm
gradients = [tf.clip_by_norm(
g, args.clip_norm) for g in grads_and_vars]
grads_and_vars = list(zip(gradients, [v for g, v in grads_and_vars]))
global_norm = tf.global_norm([g for g, v in grads_and_vars], name='global_norm')
else:
raise Exception('Norm type not recognized')
self.flat_clipped_gradients = tf.concat([tf.reshape(g, [-1]) for g, v in grads_and_vars], axis=0)
self.train_step = self.optimizer.apply_gradients(grads_and_vars)
config = tf.ConfigProto()
if 'gpu' in self.device:
logging.debug('Dynamic gpu mem allocation')
config.gpu_options.allow_growth = True
self.session = tf.Session(config=config)
self.network_saver = tf.train.Saver()
self.optimizer_variables = [var for var in tf.global_variables() if optimizer_variable_names in var.name]
self.optimizer_saver = tf.train.Saver(self.optimizer_variables, max_to_keep=1, name='OptimizerSaver')
# Summaries
variable_summaries(self.flat_raw_gradients, 'raw_gradients')
variable_summaries(self.flat_clipped_gradients, 'clipped_gradients')
tf.summary.scalar('global_norm', global_norm)
def save_vars(self, force=False):
if force or self.global_step - self.last_saving_step >= CHECKPOINT_INTERVAL:
self.last_saving_step = self.global_step
self.network_saver.save(self.session, self.network_checkpoint_folder, global_step=self.last_saving_step)
self.optimizer_saver.save(self.session, self.optimizer_checkpoint_folder, global_step=self.last_saving_step)
def rescale_reward(self, reward):
""" Clip immediate reward """
if reward > 1.0:
reward = 1.0
elif reward < -1.0:
reward = -1.0
return reward
def init_network(self):
import os
if not os.path.exists(self.network_checkpoint_folder):
os.makedirs(self.network_checkpoint_folder)
if not os.path.exists(self.optimizer_checkpoint_folder):
os.makedirs(self.optimizer_checkpoint_folder)
last_saving_step = self.network.init(self.network_checkpoint_folder, self.network_saver, self.session)
path = tf.train.latest_checkpoint(self.optimizer_checkpoint_folder)
if path is not None:
logging.info('Restoring optimizer variables from previous run')
self.optimizer_saver.restore(self.session, path)
return last_saving_step
def get_lr(self):
if self.global_step <= self.lr_annealing_steps:
return self.initial_lr - (self.global_step * self.initial_lr / self.lr_annealing_steps)
else:
return 0.0
def cleanup(self):
self.save_vars(True)
self.session.close()
| 5,534 | 41.906977 | 120 | py |
thefloorisdata | thefloorisdata-master/ground_template_from_modis.py | from pyhdf.SD import SD, SDC
import healpy as hp
import numpy as np
from scipy import interpolate
h = 6.62e-34
c = 3e8
k_b = 1.38e-23
T_cmb = 2.72548
def tb2b(tb, nu):
#Convert blackbody temperature to spectral
x = h*nu/(k_b*tb)
return 2*h*nu**3/c**2/(np.exp(x) - 1)
def dBdT(tb, nu):
x = h*nu/(k_b*tb)
slope = 2*k_b*nu**2/c**2*((x/2)/np.sinh(x/2))**2
return slope
def modis_to_healpix(file, field, temp_scale=1., map_scale=0.05, nside=512):
"""
Changes modis data to a healpix map
Arguments:
----------
file : pyhdf.SD
A file with temperature data
Keyword arguments:
----------
map_scale : float
Separation between two points in the hd5 data in degrees (default : 0.05)
nside : int
Healpix nside of the outupt map (default : 512)
"""
data = file.select(field).get()/temp_scale
tht = np.radians(np.arange(0, 180, modis_map_scale))
phi = np.radians(np.arange(0, 360, modis_map_scale))
theta_modis, phi_modis = np.meshgrid(tht, phi)
healp_map = np.zeros(hp.nside2npix(512))
healp_map[hp.ang2pix(512, tht, phi)]=np.swapaxes(data, 1,0)
return healp_map
def rotate_to_point(inmap, lat, lon):
"""
Rotates healpy such that the North Pole is at the point defined by
(lat lon). Returns rotated map
Arguments:
----------
inmap : (12*N*N,) array of floats
Input map in the healpix RING format
lat : float
Latitude of point on the map [-90;90]
lon : float
Longitude of point on the map [-180;80]
"""
nside = hp.npix2nside(inmap.shape[0])
rlon = np.radians(lon)-np.pi
rlat = np.pi/2.-np.radians(lat)
x0, y0, z0 = hp.pix2vec(512, np.arange(hp.nside2npix(512)))
x1 = x0*np.cos(rlat)+z0*np.sin(rlat)
z = -x0*np.sin(rlat)+z0*np.cos(rlat)
x = x1*np.cos(rlon)-y0*np.sin(rlon)
y = x1*np.sin(rlon)+y0*np.cos(rlon)
pix_prime = hp.vec2pix(512, x,y,z)
return inmap[pix_prime]
def telescope_view_angles(nside, h, surf_h=0., R = 6.371e6):
"""
Calculates how the coordinates of a sphere of radius R with a given
nside project on the view of an outside observer located at a distance
h away from the north pole. Returns visible coordinates on the sphere
and corresponding coordinates for the observer.
Arguments:
----------
nside : int
Healpix nside of the input map
h : float
Altitude of observer above reference level in m
Keyword arguments:
----------
surf_h : float
Altitude of ground at pole above reference level (default : 0.)
R : float
Radius of sphere (default : Earth's Radius)
"""
r_ground = R+surf_h
h_abg = h - surf_h
theta_fov = np.arccos((r_ground) / (r_ground + h_abg))
theta, phi = hp.pix2ang(nside, np.arange(hp.nside2npix(nside)))
theta_visible = theta[theta<theta_fov]
phi_visible = phi[theta<theta_fov]
theta_from_tel = np.arctan2(r_ground*np.sin(theta_visible),
r_ground*(1 - np.cos(theta_visible)) + h_abg)
theta_from_tel = np.pi - theta_from_tel #Up is down
phi_from_tel = np.pi - phi_visible #Therefore left is right
return(theta_visible, phi_visible, theta_from_tel, phi_from_tel)
def ground_template(inmap, theta_visible, phi_visible, theta_from_tel,
phi_from_tel, nside_out=128, cmb=True, freq=95.,
frac_bwidth=.2):
"""
Creates a ground template given a world map and sets of coordinates
(see telescope_view_angles) Returns a filled-out ground template.
Arguments:
----------
inmap : (12*N*N,) array of floats
Input map in the healpix RING format
theta_visible : (M,) array of floats
Colatitude of the visible points of inmap in radians
phi_visible : (M,) array of floats
Longitude of the visible points of inmap in radians
theta_from_tel : (M,) array of floats
Correponding colatitude in other coordinate system
phi_from_tel : (M,) array of floats
Correponding longtitude in other coordinate system
Keyword arguments:
----------
nside_out : int
Healpix nside of the output ground template (default : 128)
cmb : bool
Convert the temperatures to CMB temperature units (default : True)
freq : float
Frequency at which temperature is measured, in GHz (default : 95)
frac_bwidth : float
Bandwidth of the measurement, as a fraction of freq (default : 0.2)
"""
nside_world = hp.npix2nside(inmap.shape[0])
ground_map = np.ones(hp.nside2npix(nside_out))*hp.UNSEEN
ground_pix = hp.ang2pix(nside_out, theta_from_tel, phi_from_tel)
ground_map[ground_pix] = inmap[hp.ang2pix(nside_world, theta_visible, phi_visible)]
if cmb:
freq_band=np.linspace(freq*(1-frac_bwidth/2.), freq*(1+frac_bwidth/2.),
201)
for i, tb in enumerate(ground_map):
if tb!=hp.UNSEEN:
bolo = np.trapz(tb2b(tb, freq_band), freq_band)
corr = np.trapz(dBdT(T_cmb, freq_band), freq_band)
ground_map[i] = bolo/corr*1e6
pix_pow = int(np.log2(nside_out))
map_low = hp.ud_grade(ground_map, 2)
for i in range(pix_pow+1):
nside = int(2**i)
map_normal = hp.ud_grade(ground_map, nside)
map_horizon = np.amin(hp.ang2pix(nside, theta_from_tel, phi_visible))
map_normal[map_horizon:] = np.where(
map_normal[map_horizon:]!=hp.UNSEEN, map_normal[map_horizon:],
hp.ud_grade(map_low, nside)[map_horizon:])
map_low = map_normal
return map_normal
def template_from_position(earth_map, lat, lon, h, nside_out=128,
cmb=True, freq=95., frac_bwidth=.2):
"""
Creates a ground template given a world map, a position and an altitude.
Returns a filled-out ground template.
Arguments:
----------
earth_map : (12*N*N,) array of floats
Input map in K, in the healpix RING format
lat : float
Latitude of point on the map [-90;90]
lon : float
Longitude of point on the map [-180;80]
h : float
Altitude of observer above reference level in m
Keyword arguments:
----------
nside_out : int
Healpix nside of the output ground template (default : 128)
cmb : bool
Convert the temperatures to CMB temperature units (default : True)
freq : float
Frequency at which temperature is measured, in GHz (default : 95)
frac_bwidth : float
Bandwidth of the measurement, as a fraction of freq (default : 0.2)
"""
nside_world = hp.npix2nside(earth_map.shape[0])
earth_rot = rotate_to_point(earth_map, lat, lon)
theta_visible, phi_visible, theta_from_tel, phi_from_tel = telescope_view_angles(
nside_world, h, surf_h=0, R=6.371e6)
ground_temp = ground_template(earth_rot, theta_visible, phi_visible,
theta_from_tel, phi_from_tel,
nside_out=nside_out, cmb=cmb, freq=freq,
frac_bwidth=frac_bwidth)
return ground_temp
| 7,210 | 34.17561 | 87 | py |
brainiak | brainiak-master/setup.py | from distutils import sysconfig
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
import os
import site
import sys
import setuptools
from copy import deepcopy
assert sys.version_info >= (3, 5), (
"Please use Python version 3.5 or higher, "
"lower versions are not supported"
)
# https://github.com/pypa/pip/issues/7953#issuecomment-645133255
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
ext_modules = [
Extension(
'brainiak.factoranalysis.tfa_extension',
['brainiak/factoranalysis/tfa_extension.cpp'],
),
Extension(
'brainiak.fcma.fcma_extension',
['brainiak/fcma/src/fcma_extension.cc'],
),
Extension(
'brainiak.fcma.cython_blas',
['brainiak/fcma/cython_blas.pyx'],
),
Extension(
'brainiak.eventseg._utils',
['brainiak/eventseg/_utils.pyx'],
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'unix': ['-g0', '-fopenmp'],
}
# FIXME Workaround for using the Intel compiler by setting the CC env var
# Other uses of ICC (e.g., cc binary linked to icc) are not supported
if (('CC' in os.environ and 'icc' in os.environ['CC'])
or (sysconfig.get_config_var('CC') and 'icc' in sysconfig.get_config_var('CC'))):
c_opts['unix'] += ['-lirc', '-lintlc']
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.9',
'-ftemplate-depth-1024']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' %
self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = deepcopy(opts)
ext.extra_link_args = deepcopy(opts)
lang = ext.language or self.compiler.detect_language(ext.sources)
if lang == 'c++':
ext.extra_compile_args.append(cpp_flag(self.compiler))
ext.extra_link_args.append(cpp_flag(self.compiler))
build_ext.build_extensions(self)
def finalize_options(self):
super().finalize_options()
import numpy
import pybind11
self.include_dirs.extend([
numpy.get_include(),
pybind11.get_include(user=True),
pybind11.get_include(),
])
setup(
name='brainiak',
use_scm_version=True,
setup_requires=[
'cython',
# https://github.com/numpy/numpy/issues/14189
# https://github.com/brainiak/brainiak/issues/493
'numpy!=1.17.*,<1.20',
'pybind11>=1.7',
'scipy!=1.0.0',
'setuptools_scm',
],
install_requires=[
'cython',
# Previous versions fail of the Anaconda package fail on MacOS:
# https://travis-ci.org/brainiak/brainiak/jobs/545838666
'mpi4py>=3',
'nitime',
# https://github.com/numpy/numpy/issues/14189
# https://github.com/brainiak/brainiak/issues/493
'numpy!=1.17.*,<1.20',
'scikit-learn[alldeps]>=0.18',
# See https://github.com/scipy/scipy/pull/8082
'scipy!=1.0.0',
'statsmodels',
'pymanopt',
'theano>=1.0.4', # See https://github.com/Theano/Theano/pull/6671
'pybind11>=1.7',
'psutil',
'nibabel',
'joblib',
'wheel', # See https://github.com/astropy/astropy-helpers/issues/501
'pydicom',
],
extras_require={
'matnormal': [
'tensorflow',
'tensorflow_probability',
],
},
author='Princeton Neuroscience Institute and Intel Corporation',
author_email='mihai.capota@intel.com',
url='http://brainiak.org',
description='Brain Imaging Analysis Kit',
license='Apache 2',
keywords='neuroscience, algorithm, fMRI, distributed, scalable',
long_description=long_description,
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
packages=find_packages(),
include_package_data=True,
python_requires='>=3.5',
zip_safe=False,
)
| 5,433 | 30.593023 | 93 | py |
brainiak | brainiak-master/brainiak/image.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic image functionality."""
__all__ = [
"ConditionSpec",
"MaskedMultiSubjectData",
"mask_image",
"mask_images",
"multimask_images",
"SingleConditionSpec",
]
import itertools
from typing import Iterable, Sequence, Type, TypeVar
import numpy as np
from nibabel.spatialimages import SpatialImage
T = TypeVar("T", bound="MaskedMultiSubjectData")
class MaskedMultiSubjectData(np.ndarray):
"""Array with shape n_TRs, n_voxels, n_subjects."""
@classmethod
def from_masked_images(cls: Type[T], masked_images: Iterable[np.ndarray],
n_subjects: int) -> T:
"""Create a new instance of MaskedMultiSubjecData from masked images.
Parameters
----------
masked_images : iterator
Images from multiple subjects to stack along 3rd dimension
n_subjects : int
Number of subjects; must match the number of images
Returns
-------
T
A new instance of MaskedMultiSubjectData
Raises
------
ValueError
Images have different shapes.
The number of images differs from n_subjects.
"""
images_iterator = iter(masked_images)
first_image = next(images_iterator)
first_image_shape = first_image.T.shape
result = np.empty((first_image_shape[0], first_image_shape[1],
n_subjects))
for n_images, image in enumerate(itertools.chain([first_image],
images_iterator)):
image = image.T
if image.shape != first_image_shape:
raise ValueError("Image {} has different shape from first "
"image: {} != {}".format(n_images,
image.shape,
first_image_shape))
result[:, :, n_images] = image
n_images += 1
if n_images != n_subjects:
raise ValueError("n_subjects != number of images: {} != {}"
.format(n_subjects, n_images))
return result.view(cls)
class ConditionSpec(np.ndarray):
"""One-hot representation of conditions across epochs and TRs.
The shape is (n_conditions, n_epochs, n_trs).
"""
class SingleConditionSpec(ConditionSpec):
"""ConditionSpec with a single condition applicable to an epoch."""
def extract_labels(self) -> np.ndarray:
"""Extract condition labels.
Returns
-------
np.ndarray
The condition label of each epoch.
"""
condition_idxs, epoch_idxs, _ = np.where(self)
_, unique_epoch_idxs = np.unique(epoch_idxs, return_index=True)
return condition_idxs[unique_epoch_idxs]
def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None
) -> np.ndarray:
"""Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes.
"""
image_data = image.get_data()
if image_data.shape[:3] != mask.shape:
raise ValueError("Image data and mask have different shapes.")
if data_type is not None:
cast_data = image_data.astype(data_type)
else:
cast_data = image_data
return cast_data[mask]
def multimask_images(images: Iterable[SpatialImage],
masks: Sequence[np.ndarray], image_type: type = None
) -> Iterable[Sequence[np.ndarray]]:
"""Mask images with multiple masks.
Parameters
----------
images:
Images to mask.
masks:
Masks to apply.
image_type:
Type to cast images to.
Yields
------
Sequence[np.ndarray]
For each mask, a masked image.
"""
for image in images:
yield [mask_image(image, mask, image_type) for mask in masks]
def mask_images(images: Iterable[SpatialImage], mask: np.ndarray,
image_type: type = None) -> Iterable[np.ndarray]:
"""Mask images.
Parameters
----------
images:
Images to mask.
mask:
Mask to apply.
image_type:
Type to cast images to.
Yields
------
np.ndarray
Masked image.
"""
for images in multimask_images(images, (mask,), image_type):
yield images[0]
| 5,333 | 28.147541 | 77 | py |
brainiak | brainiak-master/brainiak/isc.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Intersubject correlation (ISC) analysis
Functions for computing intersubject correlation (ISC) and related
analyses (e.g., intersubject funtional correlations; ISFC), as well
as statistical tests designed specifically for ISC analyses.
The implementation is based on the work in [Hasson2004]_, [Kauppi2014]_,
[Simony2016]_, [Chen2016]_, and [Nastase2019]_.
.. [Chen2016] "Untangling the relatedness among correlations, part I:
nonparametric approaches to inter-subject correlation analysis at the
group level.", G. Chen, Y. W. Shin, P. A. Taylor, D. R. Glen, R. C.
Reynolds, R. B. Israel, R. W. Cox, 2016, NeuroImage, 142, 248-259.
https://doi.org/10.1016/j.neuroimage.2016.05.023
.. [Hasson2004] "Intersubject synchronization of cortical activity
during natural vision.", U. Hasson, Y. Nir, I. Levy, G. Fuhrmann,
R. Malach, 2004, Science, 303, 1634-1640.
https://doi.org/10.1126/science.1089506
.. [Kauppi2014] "A versatile software package for inter-subject
correlation based analyses of fMRI.", J. P. Kauppi, J. Pajula,
J. Tohka, 2014, Frontiers in Neuroinformatics, 8, 2.
https://doi.org/10.3389/fninf.2014.00002
.. [Simony2016] "Dynamic reconfiguration of the default mode network
during narrative comprehension.", E. Simony, C. J. Honey, J. Chen, O.
Lositsky, Y. Yeshurun, A. Wiesel, U. Hasson, 2016, Nature Communications,
7, 12141. https://doi.org/10.1038/ncomms12141
.. [Nastase2019] "Measuring shared responses across subjects using
intersubject correlation." S. A. Nastase, V. Gazzola, U. Hasson,
C. Keysers, 2019, Social Cognitive and Affective Neuroscience, 14,
667-685. https://doi.org/10.1093/scan/nsz037
"""
# Authors: Sam Nastase, Christopher Baldassano, Qihong Lu,
# Mai Nguyen, and Mor Regev
# Princeton University, 2018
import numpy as np
import logging
from scipy.spatial.distance import squareform
from itertools import combinations, permutations, product
from brainiak.fcma.util import compute_correlation
from brainiak.utils.utils import (array_correlation,
phase_randomize,
p_from_null,
_check_timeseries_input)
logger = logging.getLogger(__name__)
__all__ = [
"bootstrap_isc",
"compute_summary_statistic",
"isfc",
"isc",
"permutation_isc",
"phaseshift_isc",
"squareform_isfc",
"timeshift_isc",
]
MAX_RANDOM_SEED = 2**32 - 1
def isc(data, pairwise=False, summary_statistic=None, tolerate_nans=True):
"""Intersubject correlation
For each voxel or ROI, compute the Pearson correlation between each
subject's response time series and other subjects' response time series.
If pairwise is False (default), use the leave-one-out approach, where
correlation is computed between each subject and the average of the other
subjects. If pairwise is True, compute correlations between all pairs of
subjects. If summary_statistic is None, return N ISC values for N subjects
(leave-one-out) or N(N-1)/2 ISC values for each pair of N subjects,
corresponding to the upper triangle of the pairwise correlation matrix
(see scipy.spatial.distance.squareform). Alternatively, use either
'mean' or 'median' to compute summary statistic of ISCs (Fisher Z will
be applied if using mean). Input data should be a n_TRs by n_voxels by
n_subjects array (e.g., brainiak.image.MaskedMultiSubjectData) or a list
where each item is a n_TRs by n_voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a 2D array is supplied,
the last dimension is assumed to correspond to subjects. If only two
subjects are supplied, simply compute Pearson correlation (precludes
averaging in leave-one-out approach, and does not apply summary statistic).
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Output is an ndarray where the first dimension is the number of
subjects or pairs and the second dimension is the number of voxels (or
ROIs). If only two subjects are supplied or a summary statistic is invoked,
the output is a ndarray n_voxels long.
The implementation is based on the work in [Hasson2004]_.
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : None or str, default: None
Return all ISCs or collapse using 'mean' or 'median'
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
Returns
-------
iscs : subjects or pairs by voxels ndarray
ISC for each subject or pair (or summary statistic) per voxel
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# No summary statistic if only two subjects
if n_subjects == 2:
logger.info("Only two subjects! Simply computing Pearson correlation.")
summary_statistic = None
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans:
mean = np.nanmean
else:
mean = np.mean
data, mask = _threshold_nans(data, tolerate_nans)
# Compute correlation for only two participants
if n_subjects == 2:
# Compute correlation for each corresponding voxel
iscs_stack = array_correlation(data[..., 0],
data[..., 1])[np.newaxis, :]
# Compute pairwise ISCs using voxel loop and corrcoef for speed
elif pairwise:
# Swap axes for np.corrcoef
data = np.swapaxes(data, 2, 0)
# Loop through voxels
voxel_iscs = []
for v in np.arange(data.shape[1]):
voxel_data = data[:, v, :]
# Correlation matrix for all pairs of subjects (triangle)
iscs = squareform(np.corrcoef(voxel_data), checks=False)
voxel_iscs.append(iscs)
iscs_stack = np.column_stack(voxel_iscs)
# Compute leave-one-out ISCs
elif not pairwise:
# Loop through left-out subjects
iscs_stack = []
for s in np.arange(n_subjects):
# Correlation between left-out subject and mean of others
iscs_stack.append(array_correlation(
data[..., s],
mean(np.delete(data, s, axis=2), axis=2)))
iscs_stack = np.array(iscs_stack)
# Get ISCs back into correct shape after masking out NaNs
iscs = np.full((iscs_stack.shape[0], n_voxels), np.nan)
iscs[:, np.where(mask)[0]] = iscs_stack
# Summarize results (if requested)
if summary_statistic:
iscs = compute_summary_statistic(iscs,
summary_statistic=summary_statistic,
axis=0)[np.newaxis, :]
# Throw away first dimension if singleton
if iscs.shape[0] == 1:
iscs = iscs[0]
return iscs
def isfc(data, targets=None, pairwise=False, summary_statistic=None,
vectorize_isfcs=True, tolerate_nans=True):
"""Intersubject functional correlation (ISFC)
For each input voxel or ROI, compute the Pearson correlation between each
subject's response time series and all input voxels or ROIs in other
subjects. If a targets array is provided, instead compute ISFCs between
each input voxel time series and each voxel time series in targets across
subjects (resulting in asymmetric ISFC values). The targets array must have
the same number TRs and subjects as the input data. If pairwise is False
(default), use the leave-one-out approach, where correlation is computed
between each subject and the average of the other subjects. If pairwise is
True, compute correlations between all pairs of subjects. If a targets
array is provided, only the leave-one-out approach is supported. If
summary_statistic is None, return N ISFC values for N subjects (leave-one-
out) or N(N-1)/2 ISFC values for each pair of N subjects, corresponding to
the triangle of the correlation matrix (scipy.spatial.distance.squareform).
Alternatively, use either 'mean' or 'median' to compute summary statistic
of ISFCs (Fisher Z is applied if using mean). Input should be n_TRs by
n_voxels by n_subjects array (e.g., brainiak.image.MaskedMultiSubjectData)
or a list where each item is a n_TRs by n_voxels ndarray per subject.
Multiple input ndarrays must be the same shape. If a 2D array is supplied,
the last dimension is assumed to correspond to subjects. If only two
subjects are supplied, simply compute ISFC between these two subjects
(precludes averaging in leave-one-out approach, and does not apply summary
statistic). Returns vectorized upper triangle of ISFC matrices for each
subject or pair when vectorized_isfcs=True, or full (redundant) 2D ISFC
matrices when vectorized_isfcs=False. When using leave-one-out approach,
NaNs are ignored when computing mean time series of N-1 subjects (default:
tolerate_nans=True). Alternatively, you may supply a float between 0 and
1 indicating a threshold proportion of N subjects with non-NaN values
required when computing the average time series for a given voxel. For
example, if tolerate_nans=.8, ISCs will be computed for any voxel where
>= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN
values will be assigned NaNs. If set to False, NaNs are not tolerated
and voxels with one or more NaNs among the N-1 subjects will be assigned
NaN. Setting tolerate_nans to True or False will not affect the pairwise
approach; however, if a threshold float is provided, voxels that do not
reach this threshold will be excluded. Note that accommodating NaNs may
be notably slower than setting tolerate_nans to False. Output is either
a tuple comprising condensed off-diagonal ISFC values and the diagonal
ISC values if vectorize_isfcs=True, or a single ndarray with shape
n_subjects (or n_pairs) by n_voxels by n_voxels 3D array if
vectorize_isfcs=False (see brainiak.isc.squareform_isfc). If targets array
is provided (yielding asymmetric ISFCs), output ISFCs are not vectorized,
resulting in an n_subjects by n_voxels by n_targets ISFC array. If
summary_statistic is supplied, output is collapsed along first dimension.
The implementation is based on the work in [Simony2016]_.
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
targets : list or ndarray (n_TRs x n_voxels x n_subjects), optional
fMRI data to use as targets for ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : None or str, default: None
Return all ISFCs or collapse using 'mean' or 'median'
vectorize_isfcs : bool, default: True
Return tuple of condensed ISFCs and ISCs (True) or square (redundant)
ISFCs (False)
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
Returns
-------
isfcs : ndarray or tuple of ndarrays
ISFCs for each subject or pair (or summary statistic) per voxel pair
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Check for optional targets input array
targets, t_n_TRs, t_n_voxels, t_n_subejcts, symmetric = (
_check_targets_input(targets, data))
if not symmetric:
pairwise = False
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans:
mean = np.nanmean
else:
mean = np.mean
data, mask = _threshold_nans(data, tolerate_nans)
targets, targets_mask = _threshold_nans(targets, tolerate_nans)
# Handle just two subjects properly (for symmetric approach)
if symmetric and n_subjects == 2:
isfcs = compute_correlation(np.ascontiguousarray(data[..., 0].T),
np.ascontiguousarray(data[..., 1].T),
return_nans=True)
isfcs = (isfcs + isfcs.T) / 2
isfcs = isfcs[..., np.newaxis]
summary_statistic = None
logger.info("Only two subjects! Computing ISFC between them.")
# Compute all pairwise ISFCs (only for symmetric approach)
elif pairwise:
isfcs = []
for pair in combinations(np.arange(n_subjects), 2):
isfc_pair = compute_correlation(np.ascontiguousarray(
data[..., pair[0]].T),
np.ascontiguousarray(
targets[..., pair[1]].T),
return_nans=True)
if symmetric:
isfc_pair = (isfc_pair + isfc_pair.T) / 2
isfcs.append(isfc_pair)
isfcs = np.dstack(isfcs)
# Compute ISFCs using leave-one-out approach
else:
# Roll subject axis for loop
data = np.rollaxis(data, 2, 0)
targets = np.rollaxis(targets, 2, 0)
# Compute leave-one-out ISFCs
isfcs = [compute_correlation(np.ascontiguousarray(subject.T),
np.ascontiguousarray(mean(
np.delete(targets, s, axis=0),
axis=0).T),
return_nans=True)
for s, subject in enumerate(data)]
# Transpose and average ISFC matrices for both directions
isfcs = np.dstack([(isfc_matrix + isfc_matrix.T) / 2 if
symmetric else isfc_matrix for
isfc_matrix in isfcs])
# Get ISCs back into correct shape after masking out NaNs
isfcs_all = np.full((n_voxels, t_n_voxels, isfcs.shape[2]), np.nan)
isfcs_all[np.ix_(np.where(mask)[0], np.where(targets_mask)[0])] = isfcs
isfcs = np.moveaxis(isfcs_all, 2, 0)
# Summarize results (if requested)
if summary_statistic:
isfcs = compute_summary_statistic(isfcs,
summary_statistic=summary_statistic,
axis=0)
# Throw away first dimension if singleton
if isfcs.shape[0] == 1:
isfcs = isfcs[0]
# Optionally squareform to vectorize ISFC matrices (only if symmetric)
if vectorize_isfcs and symmetric:
isfcs, iscs = squareform_isfc(isfcs)
return isfcs, iscs
else:
return isfcs
def _check_isc_input(iscs, pairwise=False):
"""Checks ISC inputs for statistical tests
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array or a 1D
array (or list) of ISC values for a single voxel or ROI. This
function is only intended to be used internally by other
functions in this module (e.g., bootstrap_isc, permutation_isc).
Parameters
----------
iscs : ndarray or list
ISC values
Returns
-------
iscs : ndarray
Array of ISC values
n_subjects : int
Number of subjects
n_voxels : int
Number of voxels (or ROIs)
"""
# Standardize structure of input data
if type(iscs) == list:
iscs = np.array(iscs)[:, np.newaxis]
elif isinstance(iscs, np.ndarray):
if iscs.ndim == 1:
iscs = iscs[:, np.newaxis]
# Check if incoming pairwise matrix is vectorized triangle
if pairwise:
try:
test_square = squareform(iscs[:, 0], force='tomatrix')
n_subjects = test_square.shape[0]
except ValueError:
raise ValueError("For pairwise input, ISCs must be the "
"vectorized triangle of a square matrix.")
elif not pairwise:
n_subjects = iscs.shape[0]
# Infer subjects, voxels and print for user to check
n_voxels = iscs.shape[1]
logger.info("Assuming {0} subjects with and {1} "
"voxel(s) or ROI(s) in bootstrap ISC test.".format(n_subjects,
n_voxels))
return iscs, n_subjects, n_voxels
def _check_targets_input(targets, data):
"""Checks ISFC targets input array
For ISFC analysis, targets input array should either be a list
of n_TRs by n_targets arrays (where each array corresponds to
a subject), or an n_TRs by n_targets by n_subjects ndarray. This
function also checks the shape of the targets array against the
input data array.
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
targets : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data to use as targets for ISFC
Returns
-------
targets : ndarray (n_TRs x n_voxels x n_subjects)
ISFC targets with standadized structure
n_TRs : int
Number of time points (TRs) for targets array
n_voxels : int
Number of voxels (or ROIs) for targets array
n_subjects : int
Number of subjects for targets array
symmetric : bool
Indicator for symmetric vs. asymmetric
"""
if isinstance(targets, np.ndarray) or isinstance(targets, list):
targets, n_TRs, n_voxels, n_subjects = (
_check_timeseries_input(targets))
if data.shape[0] != n_TRs:
raise ValueError("Targets array must have same number of "
"TRs as input data")
if data.shape[2] != n_subjects:
raise ValueError("Targets array must have same number of "
"subjects as input data")
symmetric = False
else:
targets = data
n_TRs, n_voxels, n_subjects = data.shape
symmetric = True
return targets, n_TRs, n_voxels, n_subjects, symmetric
def compute_summary_statistic(iscs, summary_statistic='mean', axis=None):
"""Computes summary statistics for ISCs
Computes either the 'mean' or 'median' across a set of ISCs. In the
case of the mean, ISC values are first Fisher Z transformed (arctanh),
averaged, then inverse Fisher Z transformed (tanh).
The implementation is based on the work in [SilverDunlap1987]_.
.. [SilverDunlap1987] "Averaging correlation coefficients: should
Fisher's z transformation be used?", N. C. Silver, W. P. Dunlap, 1987,
Journal of Applied Psychology, 72, 146-148.
https://doi.org/10.1037/0021-9010.72.1.146
Parameters
----------
iscs : list or ndarray
ISC values
summary_statistic : str, default: 'mean'
Summary statistic, 'mean' or 'median'
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
Returns
-------
statistic : float or ndarray
Summary statistic of ISC values
"""
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic
if summary_statistic == 'mean':
statistic = np.tanh(np.nanmean(np.arctanh(iscs), axis=axis))
elif summary_statistic == 'median':
statistic = np.nanmedian(iscs, axis=axis)
return statistic
def squareform_isfc(isfcs, iscs=None):
"""Converts square ISFCs to condensed ISFCs (and ISCs), and vice-versa
If input is a 2- or 3-dimensional array of square ISFC matrices, converts
this to the condensed off-diagonal ISFC values (i.e., the vectorized
triangle) and the diagonal ISC values. In this case, input must be a
single array of shape either n_voxels x n_voxels or n_subjects (or
n_pairs) x n_voxels x n_voxels. The condensed ISFC values are vectorized
according to scipy.spatial.distance.squareform, yielding n_voxels *
(n_voxels - 1) / 2 values comprising every voxel pair. Alternatively, if
input is an array of condensed off-diagonal ISFC values and an array of
diagonal ISC values, the square (redundant) ISFC values are returned.
This function mimics scipy.spatial.distance.squareform, but is intended
to retain the diagonal ISC values.
Parameters
----------
isfcs : ndarray
Either condensed or redundant ISFC values
iscs: ndarray, optional
Diagonal ISC values, required when input is condensed
Returns
-------
isfcs : ndarray or tuple of ndarrays
If condensed ISFCs are passed, a single redundant ISFC array is
returned; if redundant ISFCs are passed, both a condensed off-
diagonal ISFC array and the diagonal ISC values are returned
"""
# Check if incoming ISFCs are square (redundant)
if not type(iscs) == np.ndarray and isfcs.shape[-2] == isfcs.shape[-1]:
if isfcs.ndim == 2:
isfcs = isfcs[np.newaxis, ...]
if isfcs.ndim == 3:
iscs = np.diagonal(isfcs, axis1=1, axis2=2)
isfcs = np.vstack([squareform(isfc, checks=False)[np.newaxis, :]
for isfc in isfcs])
else:
raise ValueError("Square (redundant) ISFCs must be square "
"with multiple subjects or pairs of subjects "
"indexed by the first dimension")
if isfcs.shape[0] == iscs.shape[0] == 1:
isfcs, iscs = isfcs[0], iscs[0]
return isfcs, iscs
# Otherwise, convert from condensed to redundant
else:
if isfcs.ndim == iscs.ndim == 1:
isfcs, iscs = isfcs[np.newaxis, :], iscs[np.newaxis, :]
isfcs_stack = []
for isfc, isc in zip(isfcs, iscs):
isfc_sq = squareform(isfc, checks=False)
np.fill_diagonal(isfc_sq, isc)
isfcs_stack.append(isfc_sq[np.newaxis, ...])
isfcs = np.vstack(isfcs_stack)
if isfcs.shape[0] == 1:
isfcs = isfcs[0]
return isfcs
def _threshold_nans(data, tolerate_nans):
"""Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold
"""
nans = np.all(np.any(np.isnan(data), axis=0), axis=1)
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans is True:
logger.info("ISC computation will tolerate all NaNs when averaging")
elif type(tolerate_nans) is float:
if not 0.0 <= tolerate_nans <= 1.0:
raise ValueError("If threshold to tolerate NaNs is a float, "
"it must be between 0.0 and 1.0; got {0}".format(
tolerate_nans))
nans += ~(np.sum(~np.any(np.isnan(data), axis=0), axis=1) >=
data.shape[-1] * tolerate_nans)
logger.info("ISC computation will tolerate voxels with at least "
"{0} non-NaN values: {1} voxels do not meet "
"threshold".format(tolerate_nans,
np.sum(nans)))
else:
logger.info("ISC computation will not tolerate NaNs when averaging")
mask = ~nans
data = data[:, mask, :]
return data, mask
def bootstrap_isc(iscs, pairwise=False, summary_statistic='median',
n_bootstraps=1000, ci_percentile=95, side='right',
random_state=None):
"""One-sample group-level bootstrap hypothesis test for ISCs
For ISCs from one more voxels or ROIs, resample subjects with replacement
to construct a bootstrap distribution. Input is a list or ndarray of
ISCs for a single voxel/ROI, or an ISCs-by-voxels ndarray. ISC values
should be either N ISC values for N subjects in the leave-one-out appraoch
(pairwise=False), N(N-1)/2 ISC values for N subjects in the pairwise
approach (pairwise=True). In the pairwise approach, ISC values should
correspond to the vectorized upper triangle of a square correlation matrix
(see scipy.stats.distance.squareform). Shifts bootstrap distribution by
actual summary statistic (effectively to zero) for null hypothesis test
(Hall & Wilson, 1991). Uses subject-wise (not pair-wise) resampling in the
pairwise approach. Returns the observed ISC, the confidence interval, and
a p-value for the bootstrap hypothesis test, as well as the bootstrap
distribution of summary statistics. The p-value corresponds to either a
'two-sided', 'left'-, or 'right'-sided (default) test, as specified by
side. According to Chen et al., 2016, this is the preferred nonparametric
approach for controlling false positive rates (FPRs) for one-sample tests
in the pairwise approach. Note that the bootstrap hypothesis test may not
strictly control FPRs in the leave-one-out approach.
The implementation is based on the work in [Chen2016]_ and
[HallWilson1991]_.
.. [HallWilson1991] "Two guidelines for bootstrap hypothesis testing.",
P. Hall, S. R., Wilson, 1991, Biometrics, 757-762.
https://doi.org/10.2307/2532163
Parameters
----------
iscs : list or ndarray, ISCs by voxels array
ISC values for one or more voxels
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs structure
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_bootstraps : int, default: 1000
Number of bootstrap samples (subject-level with replacement)
ci_percentile : int, default: 95
Percentile for computing confidence intervals
side : str
Perform one-sided ('left' or 'right') or 'two-sided' test
random_state = int or None, default: None
Initial random seed
Returns
-------
observed : float, median (or mean) ISC value
Summary statistic for actual ISCs
ci : tuple, bootstrap confidence intervals
Confidence intervals generated from bootstrap distribution
p : float, p-value
p-value based on bootstrap hypothesis test
distribution : ndarray, n_bootstraps by voxels
Bootstrap distribution
"""
# Standardize structure of input data
iscs, n_subjects, n_voxels = _check_isc_input(iscs, pairwise=pairwise)
# Check for valid summary statistic
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic for observed ISCs
observed = compute_summary_statistic(iscs,
summary_statistic=summary_statistic,
axis=0)
# Set up an empty list to build our bootstrap distribution
distribution = []
# Loop through n bootstrap iterations and populate distribution
for i in np.arange(n_bootstraps):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Randomly sample subject IDs with replacement
subject_sample = sorted(prng.choice(np.arange(n_subjects),
size=n_subjects))
# Squareform and shuffle rows/columns of pairwise ISC matrix to
# to retain correlation structure among ISCs, then get triangle
if pairwise:
# Loop through voxels
isc_sample = []
for voxel_iscs in iscs.T:
# Square the triangle and fill diagonal
voxel_iscs = squareform(voxel_iscs, force='tomatrix')
np.fill_diagonal(voxel_iscs, 1)
# Shuffle square correlation matrix and get triangle
voxel_sample = voxel_iscs[subject_sample, :][:, subject_sample]
voxel_sample = squareform(voxel_sample, checks=False)
# Censor off-diagonal 1s for same-subject pairs
voxel_sample[voxel_sample == 1.] = np.NaN
isc_sample.append(voxel_sample)
isc_sample = np.column_stack(isc_sample)
# Get simple bootstrap sample if not pairwise
elif not pairwise:
isc_sample = iscs[subject_sample, :]
# Compute summary statistic for bootstrap ISCs per voxel
# (alternatively could construct distribution for all voxels
# then compute statistics, but larger memory footprint)
distribution.append(compute_summary_statistic(
isc_sample,
summary_statistic=summary_statistic,
axis=0))
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.array(distribution)
# Compute CIs of median from bootstrap distribution (default: 95%)
ci = (np.percentile(distribution, (100 - ci_percentile)/2, axis=0),
np.percentile(distribution, ci_percentile + (100 - ci_percentile)/2,
axis=0))
# Shift bootstrap distribution to 0 for hypothesis test
shifted = distribution - observed
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, shifted,
side=side, exact=False,
axis=0)
return observed, ci, p, distribution
def _check_group_assignment(group_assignment, n_subjects):
if type(group_assignment) == list:
pass
elif type(group_assignment) == np.ndarray:
group_assignment = group_assignment.tolist()
else:
logger.info("No group assignment provided, "
"performing one-sample test.")
if group_assignment and len(group_assignment) != n_subjects:
raise ValueError("Group assignments ({0}) "
"do not match number of subjects ({1})!".format(
len(group_assignment), n_subjects))
return group_assignment
def _get_group_parameters(group_assignment, n_subjects, pairwise=False):
# Set up dictionary to contain group info
group_parameters = {'group_assignment': group_assignment,
'n_subjects': n_subjects,
'group_labels': None, 'groups': None,
'sorter': None, 'unsorter': None,
'group_matrix': None, 'group_selector': None}
# Set up group selectors for two-group scenario
if group_assignment and len(np.unique(group_assignment)) == 2:
group_parameters['n_groups'] = 2
# Get group labels and counts
group_labels = np.unique(group_assignment)
groups = {group_labels[0]: group_assignment.count(group_labels[0]),
group_labels[1]: group_assignment.count(group_labels[1])}
# For two-sample pairwise approach set up selector from matrix
if pairwise:
# Sort the group_assignment variable if it came in shuffled
# so it's easier to build group assignment matrix
sorter = np.array(group_assignment).argsort()
unsorter = np.array(group_assignment).argsort().argsort()
# Populate a matrix with group assignments
upper_left = np.full((groups[group_labels[0]],
groups[group_labels[0]]),
group_labels[0])
upper_right = np.full((groups[group_labels[0]],
groups[group_labels[1]]),
np.nan)
lower_left = np.full((groups[group_labels[1]],
groups[group_labels[0]]),
np.nan)
lower_right = np.full((groups[group_labels[1]],
groups[group_labels[1]]),
group_labels[1])
group_matrix = np.vstack((np.hstack((upper_left, upper_right)),
np.hstack((lower_left, lower_right))))
np.fill_diagonal(group_matrix, np.nan)
group_parameters['group_matrix'] = group_matrix
# Unsort matrix and squareform to create selector
group_parameters['group_selector'] = squareform(
group_matrix[unsorter, :][:, unsorter],
checks=False)
group_parameters['sorter'] = sorter
group_parameters['unsorter'] = unsorter
# If leave-one-out approach, just user group assignment as selector
else:
group_parameters['group_selector'] = group_assignment
# Save these parameters for later
group_parameters['groups'] = groups
group_parameters['group_labels'] = group_labels
# Manage one-sample and incorrect group assignments
elif not group_assignment or len(np.unique(group_assignment)) == 1:
group_parameters['n_groups'] = 1
# If pairwise initialize matrix of ones for sign-flipping
if pairwise:
group_parameters['group_matrix'] = np.ones((
group_parameters['n_subjects'],
group_parameters['n_subjects']))
elif len(np.unique(group_assignment)) > 2:
raise ValueError("This test is not valid for more than "
"2 groups! (got {0})".format(
len(np.unique(group_assignment))))
else:
raise ValueError("Invalid group assignments!")
return group_parameters
def _permute_one_sample_iscs(iscs, group_parameters, i, pairwise=False,
summary_statistic='median', group_matrix=None,
exact_permutations=None, prng=None):
"""Applies one-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Returns
-------
isc_sample : ndarray
Array of permuted ISC values
"""
# Randomized sign-flips
if exact_permutations:
sign_flipper = np.array(exact_permutations[i])
else:
sign_flipper = prng.choice([-1, 1],
size=group_parameters['n_subjects'],
replace=True)
# If pairwise, apply sign-flips by rows and columns
if pairwise:
matrix_flipped = (group_parameters['group_matrix'] * sign_flipper
* sign_flipper[
:, np.newaxis])
sign_flipper = squareform(matrix_flipped, checks=False)
# Apply flips along ISC axis (same across voxels)
isc_flipped = iscs * sign_flipper[:, np.newaxis]
# Get summary statistics on sign-flipped ISCs
isc_sample = compute_summary_statistic(
isc_flipped,
summary_statistic=summary_statistic,
axis=0)
return isc_sample
def _permute_two_sample_iscs(iscs, group_parameters, i, pairwise=False,
summary_statistic='median',
exact_permutations=None, prng=None):
"""Applies two-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Indicator of pairwise or leave-one-out, should match ISCs variable
Returns
-------
isc_sample : ndarray
Array of permuted ISC values
"""
# Shuffle the group assignments
if exact_permutations:
group_shuffler = np.array(exact_permutations[i])
elif not exact_permutations and pairwise:
group_shuffler = prng.permutation(np.arange(
len(np.array(group_parameters['group_assignment'])[
group_parameters['sorter']])))
elif not exact_permutations and not pairwise:
group_shuffler = prng.permutation(np.arange(
len(group_parameters['group_assignment'])))
# If pairwise approach, convert group assignments to matrix
if pairwise:
# Apply shuffler to group matrix rows/columns
group_shuffled = group_parameters['group_matrix'][
group_shuffler, :][:, group_shuffler]
# Unsort shuffled matrix and squareform to create selector
group_selector = squareform(group_shuffled[
group_parameters['unsorter'], :]
[:, group_parameters['unsorter']],
checks=False)
# Shuffle group assignments in leave-one-out two sample test
elif not pairwise:
# Apply shuffler to group matrix rows/columns
group_selector = np.array(
group_parameters['group_assignment'])[group_shuffler]
# Get difference of within-group summary statistics
# with group permutation
isc_sample = (compute_summary_statistic(
iscs[group_selector == group_parameters[
'group_labels'][0], :],
summary_statistic=summary_statistic,
axis=0) -
compute_summary_statistic(
iscs[group_selector == group_parameters[
'group_labels'][1], :],
summary_statistic=summary_statistic,
axis=0))
return isc_sample
def permutation_isc(iscs, group_assignment=None, pairwise=False, # noqa: C901
summary_statistic='median', n_permutations=1000,
side='right', random_state=None):
"""Group-level permutation test for ISCs
For ISCs from one or more voxels or ROIs, permute group assignments to
construct a permutation distribution. Input is a list or ndarray of
ISCs for a single voxel/ROI, or an ISCs-by-voxels ndarray. In the
leave-one-out approach, ISC values for two groups should be stacked
along first dimension (vertically) and a group_assignment list (or 1d
array) of same length as the number of subjects should be provided to
indicate groups. In the pairwise approach, pairwise ISCs should have
been computed across both groups at once; i.e. the pairwise ISC matrix
should be shaped N x N where N is the total number of subjects across
both groups, and should contain between-group ISC pairs. Pairwise ISC
input should correspond to the vectorized upper triangle of the square
pairwise ISC correlation matrix containing both groups. In the pairwise
approach, group_assignment order should match the row/column order of the
subject-by-subject square ISC matrix even though the input ISCs should be
supplied as the vectorized upper triangle of the square ISC matrix. If no
group_assignment is provided, one-sample test is performed using a sign-
flipping procedure. Performs exact test if number of possible permutations
(2**N for one-sample sign-flipping, N! for two-sample shuffling) is less
than or equal to number of requested permutation; otherwise, performs
approximate permutation test using Monte Carlo resampling. ISC values
should either be N ISC values for N subjects in the leave-one-out approach
(pairwise=False) or N(N-1)/2 ISC values for N subjects in the pairwise
approach (pairwise=True). Returns the observed ISC and permutation-based
p-value as well as the permutation distribution of summary statistic.
The p-value corresponds to either a 'two-sided', 'left'-, or 'right'-sided
(default) test, as specified by side. According to Chen et al., 2016,
this is the preferred nonparametric approach for controlling false
positive rates (FPRs) for two-sample tests. Note that the permutation test
may not strictly control FPRs for one-sample tests.
The implementation is based on the work in [Chen2016]_.
Parameters
----------
iscs : list or ndarray, correlation matrix of ISCs
ISC values for one or more voxels
group_assignment : list or ndarray, group labels
Group labels matching order of ISC input
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_permutations : int, default: 1000
Number of permutation iteration (randomizing group assignment)
side : str
Perform one-sided ('left' or 'right') or 'two-sided' test
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, ISC summary statistic or difference
Actual ISC or group difference (excluding between-group ISCs)
p : float, p-value
p-value based on permutation test
distribution : ndarray, n_permutations by voxels
Permutation distribution
"""
# Standardize structure of input data
iscs, n_subjects, n_voxels = _check_isc_input(iscs, pairwise=pairwise)
# Check for valid summary statistic
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Check match between group labels and ISCs
group_assignment = _check_group_assignment(group_assignment,
n_subjects)
# Get group parameters
group_parameters = _get_group_parameters(group_assignment, n_subjects,
pairwise=pairwise)
# Set up permutation type (exact or Monte Carlo)
if group_parameters['n_groups'] == 1:
if n_permutations < 2**n_subjects:
logger.info("One-sample approximate permutation test using "
"sign-flipping procedure with Monte Carlo resampling.")
exact_permutations = None
else:
logger.info("One-sample exact permutation test using "
"sign-flipping procedure with 2**{0} "
"({1}) iterations.".format(n_subjects,
2**n_subjects))
exact_permutations = list(product([-1, 1], repeat=n_subjects))
n_permutations = 2**n_subjects
# Check for exact test for two groups
else:
if n_permutations < np.math.factorial(n_subjects):
logger.info("Two-sample approximate permutation test using "
"group randomization with Monte Carlo resampling.")
exact_permutations = None
else:
logger.info("Two-sample exact permutation test using group "
"randomization with {0}! "
"({1}) iterations.".format(
n_subjects,
np.math.factorial(n_subjects)))
exact_permutations = list(permutations(
np.arange(len(group_assignment))))
n_permutations = np.math.factorial(n_subjects)
# If one group, just get observed summary statistic
if group_parameters['n_groups'] == 1:
observed = compute_summary_statistic(
iscs,
summary_statistic=summary_statistic,
axis=0)[np.newaxis, :]
# If two groups, get the observed difference
else:
observed = (compute_summary_statistic(
iscs[group_parameters['group_selector'] ==
group_parameters['group_labels'][0], :],
summary_statistic=summary_statistic,
axis=0) -
compute_summary_statistic(
iscs[group_parameters['group_selector'] ==
group_parameters['group_labels'][1], :],
summary_statistic=summary_statistic,
axis=0))
observed = np.array(observed)
# Set up an empty list to build our permutation distribution
distribution = []
# Loop through n permutation iterations and populate distribution
for i in np.arange(n_permutations):
# Random seed to be deterministically re-randomized at each iteration
if exact_permutations:
prng = None
elif isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# If one group, apply sign-flipping procedure
if group_parameters['n_groups'] == 1:
isc_sample = _permute_one_sample_iscs(
iscs, group_parameters, i,
pairwise=pairwise,
summary_statistic=summary_statistic,
exact_permutations=exact_permutations,
prng=prng)
# If two groups, set up group matrix get the observed difference
else:
isc_sample = _permute_two_sample_iscs(
iscs, group_parameters, i,
pairwise=pairwise,
summary_statistic=summary_statistic,
exact_permutations=exact_permutations,
prng=prng)
# Tack our permuted ISCs onto the permutation distribution
distribution.append(isc_sample)
# Update random state for next iteration
if not exact_permutations:
random_state = np.random.RandomState(prng.randint(
0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.array(distribution)
# Get p-value for actual median from shifted distribution
if exact_permutations:
p = p_from_null(observed, distribution,
side=side, exact=True,
axis=0)
else:
p = p_from_null(observed, distribution,
side=side, exact=False,
axis=0)
return observed, p, distribution
def timeshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, side='right', tolerate_nans=True,
random_state=None):
"""Circular time-shift randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are first circularly shifted by random intervals. If pairwise,
apply time-shift randomization to each subjects and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), apply
the random time-shift to only the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values, as well as the null
distribution of ISCs computed on randomly time-shifted data. The p-value
corresponds to either a 'two-sided', 'left'-, or 'right'-sided (default)
test, as specified by side. Note that circular time-shift randomization
may not strictly control false positive rates (FPRs).
The implementation is based on the work in [Kauppi2010]_ and
[Kauppi2014]_.
.. [Kauppi2010] "Inter-subject correlation of brain hemodynamic
responses during watching a movie: localization in space and
frequency.", J. P. Kauppi, I. P. Jääskeläinen, M. Sams, J. Tohka,
2010, Frontiers in Neuroinformatics, 4, 5.
https://doi.org/10.3389/fninf.2010.00005
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
side : str
Perform one-sided ('left' or 'right') or 'two-sided' test
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, n_shifts by voxels
Time-shifted null distribution
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Roll axis to get subjects in first dimension for loop
if pairwise:
data = np.rollaxis(data, 2, 0)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get a random set of shifts based on number of TRs
shifts = prng.choice(np.arange(n_TRs), size=n_subjects,
replace=True)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Apply circular shift to each subject's time series
shifted_data = []
for subject, shift in zip(data, shifts):
shifted_data.append(np.concatenate(
(subject[-shift:, :],
subject[:-shift, :])))
shifted_data = np.dstack(shifted_data)
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
shifted_isc = []
for s, shift in enumerate(shifts):
shifted_subject = np.concatenate((data[-shift:, :, s],
data[:-shift, :, s]))
nonshifted_mean = np.mean(np.delete(data, s, 2), axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False,
summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic,
axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side=side, exact=False,
axis=0)
return observed, p, distribution
def phaseshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, side='right', tolerate_nans=True,
random_state=None):
"""Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans
to False. Returns the observed ISC and p-values, as well as the null
distribution of ISCs computed on phase-randomized data. The p-value
corresponds to either a 'two-sided', 'left'-, or 'right'-sided (default)
test, as specified by side. Note that phase randomization may not
strictly control false positive rates (FPRs).
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
side : str
Perform one-sided ('left' or 'right') or 'two-sided' test
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, n_shifts by voxels
Phase-shifted null distribution
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get shifted version of data
shifted_data = phase_randomize(data, random_state=prng)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=True,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
# Roll subject axis of phase-randomized data
shifted_data = np.rollaxis(shifted_data, 2, 0)
shifted_isc = []
for s, shifted_subject in enumerate(shifted_data):
# ISC of shifted left-out subject vs mean of N-1 subjects
nonshifted_mean = np.mean(np.delete(data, s, axis=2),
axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False, summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic, axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side=side, exact=False,
axis=0)
return observed, p, distribution
| 63,516 | 40.005165 | 79 | py |
brainiak | brainiak-master/brainiak/__init__.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Brain Imaging Analysis Kit."""
import sys
if sys.version_info < (3, 5):
raise Exception(
"Please use Python version 3.5 or higher, "
"lower versions are not supported"
)
| 791 | 33.434783 | 75 | py |
brainiak | brainiak-master/brainiak/io.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""I/O functionality."""
__all__ = [
"load_boolean_mask",
"load_images",
"load_images_from_dir",
"load_labels",
"save_as_nifti_file",
]
from pathlib import Path
from typing import Callable, Iterable, List, Union
import logging
import nibabel as nib
import numpy as np
from nibabel.nifti1 import Nifti1Pair
from nibabel.spatialimages import SpatialImage
from .image import SingleConditionSpec
logger = logging.getLogger(__name__)
def load_images_from_dir(in_dir: Union[str, Path], suffix: str = "nii.gz",
) -> Iterable[SpatialImage]:
"""Load images from directory.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images_from_dir must be
called again.
Parameters
----------
in_dir:
Path to directory.
suffix:
Only load images with names that end like this.
Yields
------
SpatialImage
Image.
Examples
--------
>>> images = list(io.load_images_from_dir("face_scene", "bet.nii.gz"))
"""
if isinstance(in_dir, str):
in_dir = Path(in_dir)
files = sorted(in_dir.glob("*" + suffix))
for f in files:
logger.debug(
'Starting to read file %s', f
)
yield nib.load(str(f))
def load_images(image_paths: Iterable[Union[str, Path]]
) -> Iterable[SpatialImage]:
"""Load images from paths.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images must be called
again.
Parameters
----------
image_paths:
Paths to images.
Yields
------
SpatialImage
Image.
"""
for image_path in image_paths:
if isinstance(image_path, Path):
string_path = str(image_path)
else:
string_path = image_path
logger.debug(
'Starting to read file %s', string_path
)
yield nib.load(string_path)
def load_boolean_mask(path: Union[str, Path],
predicate: Callable[[np.ndarray], np.ndarray] = None
) -> np.ndarray:
"""Load boolean nibabel.SpatialImage mask.
Parameters
----------
path
Mask path.
predicate
Callable used to create boolean values, e.g. a threshold function
``lambda x: x > 50``.
Returns
-------
np.ndarray
Boolean array corresponding to mask.
"""
if not isinstance(path, str):
path = str(path)
data = nib.load(path).get_data()
if predicate is not None:
mask = predicate(data)
else:
mask = data.astype(np.bool)
return mask
def load_labels(path: Union[str, Path]) -> List[SingleConditionSpec]:
"""Load labels files.
Parameters
----------
path
Path of labels file.
Returns
-------
List[SingleConditionSpec]
List of SingleConditionSpec stored in labels file.
"""
condition_specs = np.load(str(path))
return [c.view(SingleConditionSpec) for c in condition_specs]
def save_as_nifti_file(data: np.ndarray, affine: np.ndarray,
path: Union[str, Path]) -> None:
"""Create a Nifti file and save it.
Parameters
----------
data
Brain data.
affine
Affine of the image, usually inherited from an existing image.
path
Output filename.
"""
if not isinstance(path, str):
path = str(path)
img = Nifti1Pair(data, affine)
nib.nifti1.save(img, path)
| 4,271 | 24.428571 | 78 | py |
brainiak | brainiak-master/brainiak/matnormal/utils.py | import tensorflow as tf
import tensorflow_probability as tfp
from scipy.stats import norm
from numpy.linalg import cholesky
import numpy as np
def rmn(rowcov, colcov):
"""
Generate random draws from a zero-mean matrix-normal distribution.
Parameters
-----------
rowcov : np.ndarray
Row covariance (assumed to be positive definite)
colcov : np.ndarray
Column covariance (assumed to be positive definite)
"""
Z = norm.rvs(size=(rowcov.shape[0], colcov.shape[0]))
return cholesky(rowcov).dot(Z).dot(cholesky(colcov))
def xx_t(x):
"""
Outer product
:math:`xx^T`
Parameters
-----------
x : tf.Tensor
"""
return tf.matmul(x, x, transpose_b=True)
def x_tx(x):
"""Inner product
:math:`x^T x`
Parameters
-----------
x : tf.Tensor
"""
return tf.matmul(x, x, transpose_a=True)
def scaled_I(x, size):
"""Scaled identity matrix
:math:`x I_{size}`
Parameters
------------
x: float or coercable to float
Scale to multiply the identity matrix by
size: int or otherwise coercable to a size
Dimension of the scaled identity matrix to return
"""
return tf.linalg.tensor_diag(tf.ones([size], dtype=tf.float64) * x)
def flatten_cholesky_unique(L):
"""
Flattens nonzero-elements Cholesky (triangular) factor
into a vector, and logs diagonal to make parameterization
unique. Inverse of unflatten_cholesky_unique.
"""
L_tf = tf.linalg.set_diag(L, tf.math.log(tf.linalg.diag_part(L)))
L_flat = tfp.math.fill_triangular_inverse(L_tf)
return L_flat
def unflatten_cholesky_unique(L_flat):
"""
Converts a vector of elements into a triangular matrix
(Cholesky factor). Exponentiates diagonal to make
parameterization unique. Inverse of flatten_cholesky_unique.
"""
L = tfp.math.fill_triangular(L_flat)
# exp diag for unique parameterization
L = tf.linalg.set_diag(L, tf.exp(tf.linalg.diag_part(L)))
return L
def pack_trainable_vars(trainable_vars):
"""
Pack trainable vars in a model into a single
vector that can be passed to scipy.optimize
"""
return tf.concat([tf.reshape(tv, (-1,)) for tv in trainable_vars], axis=0)
def unpack_trainable_vars(x, trainable_vars):
"""
Unpack trainable vars from a single vector as
used/returned by scipy.optimize
"""
sizes = [tv.shape for tv in trainable_vars]
idxs = [np.prod(sz) for sz in sizes]
flatvars = tf.split(x, idxs)
return [tf.reshape(fv, tv.shape) for fv, tv in zip(flatvars,
trainable_vars)]
def make_val_and_grad(lossfn, train_vars):
"""
Makes a function that ouptuts the loss and gradient in a format compatible
with scipy.optimize.minimize
"""
def val_and_grad(theta):
with tf.GradientTape() as tape:
tape.watch(train_vars)
unpacked_theta = unpack_trainable_vars(theta, train_vars)
for var, val in zip(train_vars, unpacked_theta):
var.assign(val)
loss = lossfn(theta)
grad = tape.gradient(loss, train_vars)
packed_grad = pack_trainable_vars(grad)
return loss.numpy(), packed_grad.numpy()
return val_and_grad
| 3,313 | 25.512 | 78 | py |
brainiak | brainiak-master/brainiak/matnormal/covs.py | import tensorflow as tf
import numpy as np
import abc
import scipy.linalg
import scipy.sparse
import tensorflow_probability as tfp
from brainiak.matnormal.utils import (
x_tx,
xx_t,
unflatten_cholesky_unique,
flatten_cholesky_unique,
)
from brainiak.utils.kronecker_solvers import (
tf_solve_lower_triangular_kron,
tf_solve_upper_triangular_kron,
tf_solve_lower_triangular_masked_kron,
tf_solve_upper_triangular_masked_kron,
)
__all__ = [
"CovBase",
"CovIdentity",
"CovAR1",
"CovIsotropic",
"CovDiagonal",
"CovDiagonalGammaPrior",
"CovUnconstrainedCholesky",
"CovUnconstrainedCholeskyWishartReg",
"CovUnconstrainedInvCholesky",
"CovKroneckerFactored",
]
class CovBase(abc.ABC):
"""Base metaclass for residual covariances.
For more on abstract classes, see
https://docs.python.org/3/library/abc.html
Parameters
----------
size: int
The size of the covariance matrix.
"""
def __init__(self, size):
self.size = size
# Log-likelihood of this covariance (useful for regularization)
self.logp = tf.constant(0, dtype=tf.float64)
@abc.abstractmethod
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized to fit
this covariance
"""
pass
@property
def logdet(self):
""" log determinant of this covariance
"""
pass
@abc.abstractmethod
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x`
"""
pass
@property
def _prec(self):
"""Expose the precision explicitly (mostly for testing /
visualization, materializing large covariances may be intractable)
"""
return self.solve(tf.eye(self.size, dtype=tf.float64))
@property
def _cov(self):
"""Expose the covariance explicitly (mostly for testing /
visualization, materializing large covariances may be intractable)
"""
return tf.linalg.inv(self._prec)
class CovIdentity(CovBase):
"""Identity noise covariance.
"""
def __init__(self, size):
super(CovIdentity, self).__init__(size)
@property
def logdet(self):
return tf.constant(0.0, "float64")
def get_optimize_vars(self):
"""Returns a list of tf variables that need to get optimized to
fit this covariance
"""
return []
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x`
"""
return X
@property
def _prec(self):
"""Expose the precision explicitly (mostly for testing /
visualization, materializing large covariances may be intractable)
"""
return tf.eye(self.size, dtype=tf.float64)
@property
def _cov(self):
"""Expose the covariance explicitly (mostly for testing /
visualization, materializing large covariances may be intractable)
"""
return tf.eye(self.size, dtype=tf.float64)
class CovAR1(CovBase):
"""AR(1) covariance parameterized by autoregressive parameter rho
and new noise sigma.
Parameters
----------
size: int
size of covariance matrix
rho: float or None
initial value of autoregressive parameter (if None, initialize
randomly)
sigma: float or None
initial value of new noise parameter (if None, initialize randomly)
"""
def __init__(self, size, rho=None, sigma=None, scan_onsets=None):
super(CovAR1, self).__init__(size)
# Similar to BRSA trick I think
if scan_onsets is None:
self.run_sizes = [size]
self.offdiag_template = tf.constant(
scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(size - 2)]),
dtype=tf.float64
)
self.diag_template = tf.constant(
np.diag(np.r_[0, np.ones(size - 2), 0]))
else:
self.run_sizes = np.ediff1d(np.r_[scan_onsets, size])
sub_offdiags = [
scipy.linalg.toeplitz(np.r_[0, 1, np.zeros(r - 2)])
for r in self.run_sizes
]
self.offdiag_template = tf.constant(
scipy.sparse.block_diag(sub_offdiags).toarray()
)
subdiags = [np.diag(np.r_[0, np.ones(r - 2), 0])
for r in self.run_sizes]
self.diag_template = tf.constant(
scipy.sparse.block_diag(subdiags).toarray()
)
self._identity_mat = tf.constant(np.eye(size))
if sigma is None:
self.log_sigma = tf.Variable(
tf.random.normal([1], dtype=tf.float64), name="log_sigma"
)
else:
self.log_sigma = tf.Variable(np.log(sigma), name="log_sigma")
if rho is None:
self.rho_unc = tf.Variable(
tf.random.normal([1], dtype=tf.float64), name="rho_unc"
)
else:
self.rho_unc = tf.Variable(
scipy.special.logit(rho / 2 + 0.5), name="rho_unc"
)
@property
def logdet(self):
""" log-determinant of this covariance
"""
# first, unconstrain rho and sigma
rho = 2 * tf.sigmoid(self.rho_unc) - 1
# now compute logdet
return tf.reduce_sum(
input_tensor=2
* tf.constant(self.run_sizes, dtype=tf.float64)
* self.log_sigma
- tf.math.log(1 - tf.square(rho))
)
@property
def _prec(self):
"""Precision matrix corresponding to this AR(1) covariance.
We assume stationarity within block so no special case
for first/last element of a block. This makes constructing this
matrix easier.
reprsimil.BRSA says (I - rho1 * D + rho1**2 * F) / sigma**2 and we
use the same trick
"""
rho = 2 * tf.sigmoid(self.rho_unc) - 1
sigma = tf.exp(self.log_sigma)
return (
self._identity_mat
- rho * self.offdiag_template
+ rho ** 2 * self.diag_template
) / tf.square(sigma)
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized to
fit this covariance
"""
return [self.rho_unc, self.log_sigma]
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x`
"""
return tf.matmul(self._prec, X)
class CovIsotropic(CovBase):
"""Scaled identity (isotropic) noise covariance.
Parameters
----------
size: int
size of covariance matrix
var: float or None
initial value of new variance parameter (if None, initialize randomly)
"""
def __init__(self, size, var=None):
super(CovIsotropic, self).__init__(size)
if var is None:
self.log_var = tf.Variable(
tf.random.normal([1], dtype=tf.float64), name="sigma"
)
else:
self.log_var = tf.Variable(np.log(var), name="sigma")
self.var = tf.exp(self.log_var)
@property
def logdet(self):
return self.size * self.log_var
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized to fit
this covariance
"""
return [self.log_var]
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x`
Parameters
----------
X: tf.Tensor
Tensor to multiply by inverse of this covariance
"""
return X / self.var
class CovDiagonal(CovBase):
"""Uncorrelated (diagonal) noise covariance
Parameters
----------
size: int
size of covariance matrix
diag_var: float or None
initial value of (diagonal) variance vector (if None, initialize
randomly)
"""
def __init__(self, size, diag_var=None):
super(CovDiagonal, self).__init__(size)
if diag_var is None:
self.logprec = tf.Variable(
tf.random.normal([size], dtype=tf.float64), name="precisions"
)
else:
self.logprec = tf.Variable(
np.log(1 / diag_var), name="log-precisions")
@property
def logdet(self):
return -tf.reduce_sum(input_tensor=self.logprec)
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized to fit
this covariance
"""
return [self.logprec]
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x`
Parameters
----------
X: tf.Tensor
Tensor to multiply by inverse of this covariance
"""
prec = tf.exp(self.logprec)
prec_dimaugmented = tf.expand_dims(prec, -1)
return tf.multiply(prec_dimaugmented, X)
class CovDiagonalGammaPrior(CovDiagonal):
"""Uncorrelated (diagonal) noise covariance
"""
def __init__(self, size, sigma=None, alpha=1.5, beta=1e-10):
super(CovDiagonalGammaPrior, self).__init__(size, sigma)
self.ig = tfp.distributions.InverseGamma(
concentration=tf.constant(alpha, dtype=tf.float64),
scale=tf.constant(beta, dtype=tf.float64),
)
self.logp = tf.reduce_sum(
input_tensor=self.ig.log_prob(tf.exp(self.logprec)))
class CovUnconstrainedCholesky(CovBase):
"""Unconstrained noise covariance parameterized in terms of its cholesky
"""
def __init__(self, size=None, Sigma=None):
if size is None and Sigma is None:
raise RuntimeError("Must pass either Sigma or size but not both")
if size is not None and Sigma is not None:
raise RuntimeError("Must pass either Sigma or size but not both")
if Sigma is not None:
size = Sigma.shape[0]
super(CovUnconstrainedCholesky, self).__init__(size)
# number of parameters in the triangular mat
npar = (size * (size + 1)) // 2
if Sigma is None:
self.L_flat = tf.Variable(
tf.random.normal([npar], dtype=tf.float64), name="L_flat"
)
else:
L = np.linalg.cholesky(Sigma)
self.L_flat = tf.Variable(
flatten_cholesky_unique(L), name="L_flat")
self.optimize_vars = [self.L_flat]
@property
def L(self):
"""
Cholesky factor of this covariance
"""
return unflatten_cholesky_unique(self.L_flat)
@property
def logdet(self):
return 2 * tf.reduce_sum(input_tensor=tf.math.log(
tf.linalg.diag_part(self.L)))
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized to fit
this covariance
"""
return [self.L_flat]
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x` (using cholesky solve)
Parameters
----------
X: tf.Tensor
Tensor to multiply by inverse of this covariance
"""
return tf.linalg.cholesky_solve(self.L, X)
class CovUnconstrainedCholeskyWishartReg(CovUnconstrainedCholesky):
"""Unconstrained noise covariance parameterized in terms of its
cholesky factor. Regularized using the trick from
Chung et al. 2015 such that as the covariance approaches
singularity, the likelihood goes to 0.
References
----------
Chung, Y., Gelman, A., Rabe-Hesketh, S., Liu, J., & Dorie, V. (2015).
Weakly Informative Prior for Point Estimation of Covariance Matrices
in Hierarchical Models. Journal of Educational and Behavioral Statistics,
40(2), 136–157. https://doi.org/10.3102/1076998615570945
"""
def __init__(self, size, Sigma=None):
super(CovUnconstrainedCholeskyWishartReg, self).__init__(size)
self.wishartReg = tfp.distributions.WishartTriL(
df=tf.constant(size + 2, dtype=tf.float64),
scale_tril=tf.constant(1e5 * np.eye(size), dtype=tf.float64),
)
Sigma = xx_t(self.L)
self.logp = self.wishartReg.log_prob(Sigma)
class CovUnconstrainedInvCholesky(CovBase):
"""Unconstrained noise covariance parameterized
in terms of its precision cholesky. Use this over the
regular cholesky unless you have a good reason not to, since
this saves a cholesky solve on every step of optimization
"""
def __init__(self, size=None, invSigma=None):
if size is None and invSigma is None:
raise RuntimeError(
"Must pass either invSigma or size but not both")
if size is not None and invSigma is not None:
raise RuntimeError(
"Must pass either invSigma or size but not both")
if invSigma is not None:
size = invSigma.shape[0]
super(CovUnconstrainedInvCholesky, self).__init__(size)
# number of parameters in the triangular mat
npar = (size * (size + 1)) // 2
if invSigma is None:
self.Linv_flat = tf.Variable(
tf.random.normal([npar], dtype=tf.float64), name="Linv_flat"
)
else:
Linv = np.linalg.cholesky(invSigma)
self.Linv_flat = tf.Variable(
flatten_cholesky_unique(Linv), name="Linv_flat"
)
@property
def Linv(self):
"""
Inverse of Cholesky factor of this covariance
"""
return unflatten_cholesky_unique(self.Linv_flat)
@property
def logdet(self):
return -2 * tf.reduce_sum(
input_tensor=tf.math.log(tf.linalg.diag_part(self.Linv))
)
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized to fit
this covariance
"""
return [self.Linv_flat]
def solve(self, X):
"""Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x` (using cholesky solve)
Parameters
----------
X: tf.Tensor
Tensor to multiply by inverse of this covariance
"""
return tf.matmul(x_tx(self.Linv), X)
class CovKroneckerFactored(CovBase):
""" Kronecker product noise covariance parameterized in terms
of its component cholesky factors
"""
def __init__(self, sizes, Sigmas=None, mask=None):
"""Initialize the kronecker factored covariance object.
Arguments
---------
sizes : list
List of dimensions (int) of the factors
E.g. ``sizes = [2, 3]`` will create two factors of
sizes 2x2 and 3x3 giving us a 6x6 dimensional covariance
Sigmas : list (default : None)
Initial guess for the covariances. List of positive definite
covariance matrices the same sizes as sizes.
mask : int array (default : None)
1-D tensor with length equal to product of sizes with 1 for
valid elements and 0 for don't care
Returns
-------
None
Raises
------
TypeError
If sizes is not a list
"""
if not isinstance(sizes, list):
raise TypeError("sizes is not a list")
self.sizes = sizes
self.nfactors = len(sizes)
self.size = np.prod(np.array(sizes), dtype=np.int32)
npar = [(size * (size + 1)) // 2 for size in self.sizes]
if Sigmas is None:
self.Lflat = [
tf.Variable(
tf.random.normal([npar[i]], dtype=tf.float64),
name="L" + str(i) + "_flat",
)
for i in range(self.nfactors)
]
else:
self.Lflat = [
tf.Variable(
flatten_cholesky_unique(np.linalg.cholesky(Sigmas[i])),
name="L" + str(i) + "_flat",
)
for i in range(self.nfactors)
]
self.mask = mask
@property
def L(self):
return [unflatten_cholesky_unique(mat) for mat in self.Lflat]
def get_optimize_vars(self):
""" Returns a list of tf variables that need to get optimized
to fit this covariance
"""
return self.Lflat
@property
def logdet(self):
""" log|Sigma| using the diagonals of the cholesky factors.
"""
if self.mask is None:
n_list = tf.stack(
[tf.cast(tf.shape(input=mat)[0], dtype=tf.float64)
for mat in self.L]
)
n_prod = tf.reduce_prod(input_tensor=n_list)
logdet = tf.stack(
[
tf.reduce_sum(
input_tensor=tf.math.log(
tf.linalg.tensor_diag_part(mat))
)
for mat in self.L
]
)
logdetfinal = tf.reduce_sum(
input_tensor=(logdet * n_prod) / n_list)
else:
n_list = [tf.shape(input=mat)[0] for mat in self.L]
mask_reshaped = tf.reshape(self.mask, n_list)
logdet = 0.0
for i in range(self.nfactors):
indices = list(range(self.nfactors))
indices.remove(i)
logdet += (tf.math.log(tf.linalg.tensor_diag_part(self.L[i])) *
tf.cast(
tf.reduce_sum(
input_tensor=mask_reshaped, axis=indices),
dtype=tf.float64,
))
logdetfinal = tf.reduce_sum(input_tensor=logdet)
return 2.0 * logdetfinal
def solve(self, X):
""" Given this covariance :math:`\\Sigma` and some input :math:`X`,
compute :math:`\\Sigma^{-1}x` using traingular solves with the cholesky
factors.
Specifically, we solve :math:`L L^T x = y` by solving
:math:`L z = y` and :math:`L^T x = z`.
Parameters
----------
X: tf.Tensor
Tensor to multiply by inverse of this covariance
"""
if self.mask is None:
z = tf_solve_lower_triangular_kron(self.L, X)
x = tf_solve_upper_triangular_kron(self.L, z)
else:
z = tf_solve_lower_triangular_masked_kron(self.L, X, self.mask)
x = tf_solve_upper_triangular_masked_kron(self.L, z, self.mask)
return x
| 18,989 | 29.481541 | 79 | py |
brainiak | brainiak-master/brainiak/matnormal/regression.py | import tensorflow as tf
import numpy as np
from sklearn.base import BaseEstimator
from brainiak.matnormal.matnormal_likelihoods import matnorm_logp
from brainiak.matnormal.utils import (
pack_trainable_vars,
unpack_trainable_vars,
make_val_and_grad,
)
from scipy.optimize import minimize
__all__ = ["MatnormalRegression"]
class MatnormalRegression(BaseEstimator):
""" This analysis allows maximum likelihood estimation of regression models
in the presence of both spatial and temporal covariance.
..math::
Y \\sim \\mathcal{MN}(X\beta, time_cov, space_cov)
Parameters
----------
time_cov : subclass of CovBase
TR noise covariance class following CovBase interface.
space_cov : subclass of CovBase
Voxel noise covariance class following CovBase interface.
optimizer : string, default="L-BFGS-B"
Scipy optimizer to use. For other options, see "method" argument
of scipy.optimize.minimize
optCtrl: dict, default=None
Additional arguments to pass to scipy.optimize.minimize.
"""
def __init__(self, time_cov, space_cov, optimizer="L-BFGS-B",
optCtrl=None):
self.optMethod = optimizer
if optCtrl is None:
self.optCtrl = {}
self.time_cov = time_cov
self.space_cov = space_cov
self.n_t = time_cov.size
self.n_v = space_cov.size
def logp(self, X, Y):
""" Log likelihood of model (internal)
"""
y_hat = tf.matmul(X, self.beta)
resid = Y - y_hat
return matnorm_logp(resid, self.time_cov, self.space_cov)
def fit(self, X, y, naive_init=True):
""" Compute the regression fit.
Parameters
----------
X : np.array, TRs by conditions.
Design matrix
y : np.array, TRs by voxels.
fMRI data
"""
self.n_c = X.shape[1]
if naive_init:
# initialize to the least squares solution (basically all
# we need now is the cov)
sigma_inv_x = self.time_cov.solve(X)
sigma_inv_y = self.time_cov.solve(y)
beta_init = np.linalg.solve(
(X.T).dot(sigma_inv_x), (X.T).dot(sigma_inv_y))
else:
beta_init = np.random.randn(self.n_c, self.n_v)
self.beta = tf.Variable(beta_init, name="beta")
self.train_variables = [self.beta]
self.train_variables.extend(self.time_cov.get_optimize_vars())
self.train_variables.extend(self.space_cov.get_optimize_vars())
def lossfn(theta):
return -self.logp(X, y)
val_and_grad = make_val_and_grad(lossfn, self.train_variables)
x0 = pack_trainable_vars(self.train_variables)
opt_results = minimize(
fun=val_and_grad, x0=x0, jac=True, method=self.optMethod,
**self.optCtrl
)
unpacked_theta = unpack_trainable_vars(
opt_results.x, self.train_variables)
for var, val in zip(self.train_variables, unpacked_theta):
var.assign(val)
self.beta_ = self.beta.numpy()
def predict(self, X):
""" Predict fMRI signal from design matrix.
Parameters
----------
X : np.array, TRs by conditions.
Design matrix
"""
return X.dot(self.beta_)
def calibrate(self, Y):
""" Decode design matrix from fMRI dataset, based on a previously
trained mapping. This method just does naive MLE:
.. math::
X = Y \\Sigma_s^{-1}B^T(B \\Sigma_s^{-1} B^T)^{-1}
Parameters
----------
Y : np.array, TRs by voxels.
fMRI dataset
"""
if Y.shape[1] <= self.n_c:
raise RuntimeError(
"More conditions than voxels! System is singular,\
cannot decode."
)
# Sigma_s^{-1} B'
Sigma_s_btrp = self.space_cov.solve(tf.transpose(a=self.beta))
# Y Sigma_s^{-1} B'
Y_Sigma_Btrp = tf.matmul(Y, Sigma_s_btrp).numpy()
# (B Sigma_s^{-1} B')^{-1}
B_Sigma_Btrp = tf.matmul(self.beta, Sigma_s_btrp).numpy()
X_test = np.linalg.solve(B_Sigma_Btrp.T, Y_Sigma_Btrp.T).T
return X_test
| 4,301 | 28.265306 | 79 | py |
brainiak | brainiak-master/brainiak/matnormal/mnrsa.py | import tensorflow as tf
from sklearn.base import BaseEstimator
from sklearn.linear_model import LinearRegression
from .covs import CovIdentity
from brainiak.utils.utils import cov2corr
import numpy as np
from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row
from brainiak.matnormal.utils import (
pack_trainable_vars,
unpack_trainable_vars,
make_val_and_grad,
unflatten_cholesky_unique,
flatten_cholesky_unique,
)
from scipy.optimize import minimize
__all__ = ["MNRSA"]
class MNRSA(BaseEstimator):
""" Matrix normal version of RSA.
The goal of this analysis is to find the covariance of the mapping from
some design matrix X to the fMRI signal Y. It does so by marginalizing over
the actual mapping (i.e. averaging over the uncertainty in it), which
happens to correct a bias imposed by structure in the design matrix on the
RSA estimate (see Cai et al., NIPS 2016).
This implementation makes different choices about residual covariance
relative to `brainiak.reprsimil.BRSA`: Here, the noise covariance is
assumed to be kronecker-separable. Informally, this means that all voxels
have the same temporal covariance, and all time points have the same
spatial covariance. This is in contrast to BRSA, which allows different
temporal covariance for each voxel. On the other hand, computational
efficiencies enabled by this choice allow MNRSA to support a richer class
of space and time covariances (anything in `brainiak.matnormal.covs`).
For users: in general, if you are worried about voxels each having
different temporal noise structure,you should use
`brainiak.reprsimil.BRSA`. If you are worried about between-voxel
correlations or temporal covaraince structures that BRSA does not
support, you should use MNRSA.
.. math::
Y &\\sim \\mathcal{MN}(0, \\Sigma_t + XLL^TX^T+
X_0X_0^T, \\Sigma_s)\\
U &= LL^T
Parameters
----------
time_cov : subclass of CovBase
Temporal noise covariance class following CovBase interface.
space_cov : subclass of CovBase
Spatial noise covariance class following CovBase interface.
optimizer : string, Default :'L-BFGS'
Name of scipy optimizer to use.
optCtrl : dict, default: None
Additional arguments to pass to scipy.optimize.minimize.
"""
def __init__(
self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B",
optCtrl=None
):
self.n_T = time_cov.size
self.n_V = space_cov.size
self.n_nureg = n_nureg
self.optMethod = optimizer
if optCtrl is None:
self.optCtrl = {}
self.X_0 = tf.Variable(
tf.random.normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0"
)
self.train_variables = [self.X_0]
self.time_cov = time_cov
self.space_cov = space_cov
self.train_variables.extend(self.time_cov.get_optimize_vars())
self.train_variables.extend(self.space_cov.get_optimize_vars())
@property
def L(self):
"""
Cholesky factor of the RSA matrix.
"""
return unflatten_cholesky_unique(self.L_flat)
def fit(self, X, y, naive_init=True):
""" Estimate dimension reduction and cognitive model parameters
Parameters
----------
X: 2d array
Brain data matrix (TRs by voxels). Y in the math
y: 2d array or vector
Behavior data matrix (TRs by behavioral obsevations). X in the math
max_iter: int, default=1000
Maximum number of iterations to run
step: int, default=100
Number of steps between optimizer output
restart: bool, default=True
If this is true, optimizer is restarted (e.g. for a new dataset).
Otherwise optimizer will continue from where it is now (for example
for running more iterations if the initial number was not enough).
"""
# In the method signature we follow sklearn discriminative API
# where brain is X and behavior is y. Internally we are
# generative so we flip this here
X, Y = y, X
self.n_c = X.shape[1]
if naive_init:
# initialize from naive RSA
m = LinearRegression(fit_intercept=False)
m.fit(X=X, y=Y)
self.naive_U_ = np.cov(m.coef_.T)
naiveRSA_L = np.linalg.cholesky(self.naive_U_)
self.L_flat = tf.Variable(
flatten_cholesky_unique(naiveRSA_L), name="L_flat",
dtype="float64"
)
else:
chol_flat_size = (self.n_c * (self.n_c + 1)) // 2
self.L_flat = tf.Variable(
tf.random.normal([chol_flat_size], dtype="float64"),
name="L_flat",
dtype="float64",
)
self.train_variables.extend([self.L_flat])
def lossfn(theta): return -self.logp(X, Y)
val_and_grad = make_val_and_grad(lossfn, self.train_variables)
x0 = pack_trainable_vars(self.train_variables)
opt_results = minimize(fun=val_and_grad, x0=x0,
jac=True, method=self.optMethod, **self.optCtrl)
unpacked_theta = unpack_trainable_vars(
opt_results.x, self.train_variables)
for var, val in zip(self.train_variables, unpacked_theta):
var.assign(val)
self.U_ = self.L.numpy().dot(self.L.numpy().T)
self.C_ = cov2corr(self.U_)
def logp(self, X, Y):
""" MNRSA Log-likelihood"""
rsa_cov = CovIdentity(size=self.n_c + self.n_nureg)
x_stack = tf.concat([tf.matmul(X, self.L), self.X_0], 1)
return (
self.time_cov.logp
+ self.space_cov.logp
+ rsa_cov.logp
+ matnorm_logp_marginal_row(
Y,
row_cov=self.time_cov,
col_cov=self.space_cov,
marg=x_stack,
marg_cov=rsa_cov,
)
)
| 6,106 | 33.698864 | 79 | py |
brainiak | brainiak-master/brainiak/matnormal/__init__.py | """
Some properties of the matrix-variate normal distribution
---------------------------------------------------------
.. math::
\\DeclareMathOperator{\\Tr}{Tr}
\\newcommand{\\trp}{^{T}} % transpose
\\newcommand{\\trace}{\\text{Trace}} % trace
\\newcommand{\\inv}{^{-1}}
\\newcommand{\\mb}{\\mathbf{b}}
\\newcommand{\\M}{\\mathbf{M}}
\\newcommand{\\C}{\\mathbf{C}}
\\newcommand{\\G}{\\mathbf{G}}
\\newcommand{\\A}{\\mathbf{A}}
\\newcommand{\\R}{\\mathbf{R}}
\\renewcommand{\\S}{\\mathbf{S}}
\\newcommand{\\B}{\\mathbf{B}}
\\newcommand{\\Q}{\\mathbf{Q}}
\\newcommand{\\mH}{\\mathbf{H}}
\\newcommand{\\U}{\\mathbf{U}}
\\newcommand{\\mL}{\\mathbf{L}}
\\newcommand{\\diag}{\\mathrm{diag}}
\\newcommand{\\etr}{\\mathrm{etr}}
\\renewcommand{\\H}{\\mathbf{H}}
\\newcommand{\\vecop}{\\mathrm{vec}}
\\newcommand{\\I}{\\mathbf{I}}
\\newcommand{\\X}{\\mathbf{X}}
\\newcommand{\\Y}{\\mathbf{Y}}
\\newcommand{\\Z}{\\mathbf{Z}}
\\renewcommand{\\L}{\\mathbf{L}}
The matrix-variate normal distribution is a generalization to matrices of the
normal distribution. Another name for it is the multivariate normal
distribution with kronecker separable covariance.
The distributional intuition is as follows: if
:math:`X \\sim \\mathcal{MN}(M,R,C)` then
:math:`\\mathrm{vec}(X)\\sim\\mathcal{N}(\\mathrm{vec}(M), C \\otimes R)`,
where :math:`\\mathrm{vec}(\\cdot)` is the vectorization operator and
:math:`\\otimes` is the Kronecker product. If we think of X as a matrix of TRs
by voxels in the fMRI setting, then this model assumes that each voxel has the
same TR-by-TR covariance structure (represented by the matrix R),
and each volume has the same spatial covariance (represented by the matrix C).
This assumption allows us to model both covariances separately.
We can assume that the spatial covariance itself is kronecker-structured,
which implies that the spatial covariance of voxels is the same in the X,
Y and Z dimensions.
The log-likelihood for the matrix-normal density is:
.. math::
\\log p(X\\mid \\M,\\R, \\C) = -2\\log mn - m \\log|\\C| - n \\log|\\R| -
\\Tr\\left[\\C\\inv(\\X-\\M)\\trp\\R\\inv(\\X-\\M)\\right]
Here :math:`X` and :math:`M` are both :math:`m\\times n` matrices, :math:`\\R`
is :math:`m\\times m` and :math:`\\C` is :math:`n\\times n`.
The `brainiak.matnormal` package provides structure to infer models that
can be stated in the matrix-normal notation that are useful for fMRI analysis.
It provides a few interfaces. `MatnormModelBase` is intended as a
base class for matrix-variate models. It provides a wrapper for the tensorflow
optimizer that provides convergence checks based on thresholds on the function
value and gradient, and simple verbose outputs. It also provides an interface
for noise covariances (`CovBase`). Any class that follows the interface
can be used as a noise covariance in any of the matrix normal models. The
package includes a variety of noise covariances to work with.
Matrix normal marginals
-------------------------
Here we extend the multivariate gaussian marginalization identity to matrix
normals. This is used in a number of the models in the package. Below, we
use lowercase subscripts for sizes to make dimensionalities easier to track.
Uppercase subscripts for covariances help keep track where they come from.
.. math::
\\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\mathbf{A}_{ij},
\\Sigma_{\\mathbf{X}i},\\Sigma_{\\mathbf{X}j})\\\\
\\mathbf{Y}_{jk} &\\sim \\mathcal{MN}(\\mathbf{B}_{jk},
\\Sigma_{\\mathbf{Y}j},\\Sigma_{\\mathbf{Y}k})\\\\
\\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} &\\sim
\\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{Y}_{jk} + \\mathbf{C}_{ik},
\\Sigma_{\\mathbf{Z}_i}, \\Sigma_{\\mathbf{Z}_k})\\\\
We vectorize, and convert to a form we recognize as
:math:`y \\sim \\mathcal{N}(Mx+b, \\Sigma)`.
.. math::
\\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} &\\sim
\\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{Y}_{jk}+\\mathbf{C}_{ik}),
\\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})\\\\
\\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk}
&\\sim \\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{Y}_{jk})
+ \\vecop(\\mathbf{C}_{ik}),
\\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i})
Now we can use our standard gaussian marginalization identity:
.. math::
\\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim
\\mathcal{N}((\\I_k\\otimes\\X_{ij})\\vecop(\\mathbf{B}_{jk}) +
\\vecop(\\mathbf{C}_{ik}),
\\Sigma_{\\mathbf{Z}_k}\\otimes\\Sigma_{\\mathbf{Z}_i} +
(\\I_k\\otimes\\X_{ij})(\\Sigma_{\\mathbf{Y}_k}\\otimes
\\Sigma_{\\mathbf{Y}_j})(\\I_k\\otimes\\X_{ij})\\trp )
Collect terms using the mixed-product property of kronecker products:
.. math::
\\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} \\sim
\\mathcal{N}(\\vecop(\\X_{ij}\\mathbf{B}_{jk}) +
\\vecop(\\mathbf{C}_{ik}), \\Sigma_{\\mathbf{Z}_k}\\otimes
\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{\\mathbf{Y}_k}\\otimes
\\X_{ij}\\Sigma_{\\mathbf{Y}_j}\\X_{ij}\\trp)
Now, we can see that the marginal density is a matrix-variate normal only if
:math:`\\Sigma_{\\mathbf{Z}_k}= \\Sigma_{\\mathbf{Y}_k}` -- that is, the
variable we're marginalizing over has the same covariance in the dimension
we're *not* marginalizing over as the marginal density. Otherwise the densit
is well-defined but the covariance retains its kronecker structure. So we let
:math:`\\Sigma_k:=\\Sigma_{\\mathbf{Z}_k}= \\Sigma_{\\mathbf{Y}_k}`, factor,
and transform it back into a matrix normal:
.. math::
\\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} &\\sim
\\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}),
\\Sigma_{k}\\otimes\\Sigma_{\\mathbf{Z}_i} + \\Sigma_{_k}\\otimes
\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp)\\\\
\\vecop(\\mathbf{Z}_{ik})\\mid\\mathbf{X}_{ij} &\\sim
\\mathcal{N}(\\vecop(\\X\\mathbf{B}_{jk}) + \\vecop(\\mathbf{C}_{ik}),
\\Sigma_{k}\\otimes(\\Sigma_{\\mathbf{Z}_i}
+\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp))\\\\
\\mathbf{Z}_{ik}\\mid\\mathbf{X}_{ij} &\\sim
\\mathcal{MN}(\\X\\mathbf{B}_{jk} + \\mathbf{C}_{ik},
\\Sigma_{\\mathbf{Z}_i} +\\X\\Sigma_{\\mathbf{Y}_j}\\X\\trp,\\Sigma_{k})
We can do it in the other direction as well, because if
:math:`\\X \\sim \\mathcal{MN}(M, U, V)` then :math:`\\X\\trp \\sim
\\mathcal{MN}(M\\trp, V, U)`:
.. math::
\\mathbf{Z\\trp}_{ik}\\mid\\mathbf{X}_{ij},\\mathbf{Y}_{jk} &\\sim
\\mathcal{MN}(\\mathbf{Y}_{jk}\\trp\\mathbf{X}_{ij}\\trp +
\\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k},\\Sigma_{\\mathbf{Z}_i})\\\\
\\mbox{let } \\Sigma_i :=
\\Sigma_{\\mathbf{Z}_i}=\\Sigma_{\\mathbf{X}_i} \\\\
\\cdots\\\\
\\mathbf{Z\\trp}_{ik}\\mid\\mathbf{Y}_{jk} &\\sim
\\mathcal{MN}(\\mathbf{A}_{jk}\\trp\\mathbf{X}_{ij}\\trp +
\\mathbf{C}\\trp_{ik}, \\Sigma_{\\mathbf{Z}_k} +
\\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y,\\Sigma_{\\mathbf{Z}_i})\\\\
\\mathbf{Z}_{ik}\\mid\\mathbf{Y}_{jk} &\\sim
\\mathcal{MN}(\\mathbf{X}_{ij}\\mathbf{A}_{jk}+
\\mathbf{C}_{ik},\\Sigma_{\\mathbf{Z}_i},\\Sigma_{\\mathbf{Z}_k} +
\\Y\\trp\\Sigma_{\\mathbf{Y}_j}\\Y)
These marginal likelihoods are implemented relatively efficiently in
`MatnormModelBase.matnorm_logp_marginal_row` and
`MatnormModelBase.matnorm_logp_marginal_col`.
Partitioned matrix normal conditionals
--------------------------------------
Here we extend the multivariate gaussian conditional identity to matrix
normals. This is used for prediction in some models. Below, we
use lowercase subscripts for sizes to make dimensionalities easier to track.
Uppercase subscripts for covariances help keep track where they come from.
Next, we do the same for the partitioned gaussian identity. First two
vectorized matrix-normals that form our partition:
.. math::
\\mathbf{X}_{ij} &\\sim \\mathcal{MN}(\\mathbf{A}_{ij}, \\Sigma_{i},
\\Sigma_{j}) \\rightarrow \\vecop[\\mathbf{X}_{ij}] \\sim
\\mathcal{N}(\\vecop[\\mathbf{A}_{ij}], \\Sigma_{j}\\otimes\\Sigma_{i})\\\\
\\mathbf{Y}_{ik} &\\sim \\mathcal{MN}(\\mathbf{B}_{ik}, \\Sigma_{i},
\\Sigma_{k}) \\rightarrow \\vecop[\\mathbf{Y}_{ik}] \\sim
\\mathcal{N}(\\vecop[\\mathbf{B}_{ik}], \\Sigma_{k}\\otimes\\Sigma_{i})\\\\
\\begin{bmatrix}\\vecop[\\mathbf{X}_{ij}] \\\\ \\vecop[\\mathbf{Y}_{ik}]
\\end{bmatrix}
& \\sim \\mathcal{N}\\left(\\vecop\\begin{bmatrix}\\mathbf{A}_{ij}
\\\\ \\mathbf{B}_{ik}
\\end{bmatrix}
, \\begin{bmatrix} \\Sigma_{j}\\otimes \\Sigma_i &
\\Sigma_{jk} \\otimes \\Sigma_i \\\\
\\Sigma_{kj}\\otimes \\Sigma_i & \\Sigma_{k} \\otimes
\\Sigma_i\\end{bmatrix}\\right)
We apply the standard partitioned Gaussian identity and simplify using the
properties of the :math:`\\vecop` operator and the mixed product property
of kronecker products:
.. math::
\\vecop[\\X_{ij}] \\mid \\vecop[\\Y_{ik}]\\sim
\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\otimes\\Sigma_i)
(\\Sigma_k\\inv\\otimes\\Sigma_i\\inv)(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]),\\\\
& \\Sigma_j\\otimes\\Sigma_i - (\\Sigma_{jk}\\otimes\\Sigma_i)
(\\Sigma_k\\inv\\otimes\\Sigma_i\\inv) (\\Sigma_{kj}\\otimes\\Sigma_i))\\\\
=\\mathcal{N}(&\\vecop[\\A_{ij}] +
(\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\Sigma_i\\Sigma_i\\inv)
(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\
& \\Sigma_j\\otimes\\Sigma_i -
(\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes
\\Sigma_i\\Sigma_i\\inv \\Sigma_i))\\\\
=\\mathcal{N}(&\\vecop[\\A_{ij}] + (\\Sigma_{jk}\\Sigma_k\\inv\\otimes\\I)
(\\vecop[\\Y_{ik}]-\\vecop[\\B_{ik}]), \\\\
& \\Sigma_j\\otimes\\Sigma_i -
(\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}\\otimes\\Sigma_i)\\\\
=\\mathcal{N}(&\\vecop[\\A_{ij}] +
\\vecop[\\Y_{ik}-\\B_{ik}\\Sigma_k\\inv\\Sigma_{kj}],
(\\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj})\\otimes\\Sigma_i)
Next, we recognize that this multivariate gaussian is equivalent to the
following matrix variate gaussian:
.. math::
\\X_{ij} \\mid \\Y_{ik}\\sim \\mathcal{MN}(\\A_{ij} +
(\\Y_{ik}-\\B_{ik})\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i,
\\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj})
The conditional in the other direction can be written by working through the
same algebra:
.. math::
\\Y_{ik} \\mid \\X_{ij}\\sim \\mathcal{MN}(\\B_{ik} +(\\X_{ij}-
\\A_{ij})\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i,
\\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk})
Finally, vertical rather than horizontal concatenation (yielding a partitioned
row rather than column covariance) can be written by recognizing the behavior
of the matrix normal under transposition:
.. math::
\\X\\trp_{ji} \\mid \\Y\\trp_{ki}\\sim \\mathcal{MN}(&\\A\\trp_{ji} +
\\Sigma_{jk}\\Sigma_k\\inv(\\Y\\trp_{ki}-\\B\\trp_{ki}),
\\Sigma_j-\\Sigma_{jk}\\Sigma_k\\inv\\Sigma_{kj}, \\Sigma_i)\\\\
\\Y\\trp_{ki} \\mid \\X\\trp_{ji}\\sim \\mathcal{MN}(&\\B\\trp_{ki} +
\\Sigma_{kj}\\Sigma_j\\inv(\\X\\trp_{ji}-\\A\\trp_{ji}),
\\Sigma_k-\\Sigma_{kj}\\Sigma_j\\inv\\Sigma_{jk}, \\Sigma_i)
These conditional likelihoods are implemented relatively efficiently
in `MatnormModelBase.matnorm_logp_conditional_row` and
`MatnormModelBase.matnorm_logp_conditional_col`.
"""
| 11,396 | 44.047431 | 84 | py |
brainiak | brainiak-master/brainiak/matnormal/matnormal_likelihoods.py | import tensorflow as tf
from tensorflow import linalg as tlinalg
from .utils import scaled_I
import logging
logger = logging.getLogger(__name__)
def _condition(X):
"""
Condition number (https://en.wikipedia.org/wiki/Condition_number)
used for diagnostics.
NOTE: this formulation is only defined for symmetric positive definite
matrices (which covariances should be, and what we're using this for)
Parameters
----------
X: tf.Tensor
Symmetric tensor to compute condition number of
"""
s = tf.linalg.svd(X, compute_uv=False)
return tf.reduce_max(input_tensor=s) / tf.reduce_min(input_tensor=s)
def solve_det_marginal(x, sigma, A, Q):
"""
Use matrix inversion lemma for the solve:
.. math::
(\\Sigma + AQA^T)^{-1} X =\\
(\\Sigma^{-1} - \\Sigma^{-1} A (Q^{-1} +
A^T \\Sigma^{-1} A)^{-1} A^T \\Sigma^{-1}) X
Use matrix determinant lemma for determinant:
.. math::
\\log|(\\Sigma + AQA^T)| = \\log|Q^{-1} + A^T \\Sigma^{-1} A|
+ \\log|Q| + \\log|\\Sigma|
Parameters
----------
x: tf.Tensor
Tensor to multiply the solve by
sigma: brainiak.matnormal.CovBase
Covariance object implementing solve and logdet
A: tf.Tensor
Factor multiplying the variable we marginalized out
Q: brainiak.matnormal.CovBase
Covariance object of marginalized variable,
implementing solve and logdet
"""
# For diagnostics, we want to check condition numbers
# of things we invert. This includes Q and Sigma, as well
# as the "lemma factor" for lack of a better definition
logging.log(logging.DEBUG, "Printing diagnostics for solve_det_marginal")
lemma_cond = _condition(
Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True))
logging.log(
logging.DEBUG,
f"lemma_factor condition={lemma_cond}",
)
logging.log(logging.DEBUG, f"Q condition={_condition(Q._cov)}")
logging.log(logging.DEBUG, f"sigma condition={_condition(sigma._cov)}")
logging.log(
logging.DEBUG,
f"sigma max={tf.reduce_max(input_tensor=A)}," +
f"sigma min={tf.reduce_min(input_tensor=A)}",
)
# cholesky of (Qinv + A^T Sigma^{-1} A), which looks sort of like
# a schur complement but isn't, so we call it the "lemma factor"
# since we use it in woodbury and matrix determinant lemmas
lemma_factor = tlinalg.cholesky(
Q._prec + tf.matmul(A, sigma.solve(A), transpose_a=True)
)
logdet = (
Q.logdet
+ sigma.logdet
+ 2 *
tf.reduce_sum(input_tensor=tf.math.log(
tlinalg.diag_part(lemma_factor)))
)
logging.log(logging.DEBUG, f"Log-determinant of Q={Q.logdet}")
logging.log(logging.DEBUG, f"sigma logdet={sigma.logdet}")
lemma_logdet = 2 * \
tf.reduce_sum(input_tensor=tf.math.log(
tlinalg.diag_part(lemma_factor)))
logging.log(
logging.DEBUG,
f"lemma factor logdet={lemma_logdet}",
)
# A^T Sigma^{-1}
Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True)
# (Qinv + A^T Sigma^{-1} A)^{-1} A^T Sigma^{-1}
prod_term = tlinalg.cholesky_solve(lemma_factor, Atrp_Sinv)
solve = tf.matmul(
sigma.solve(scaled_I(1.0, sigma.size) - tf.matmul(A, prod_term)), x
)
return solve, logdet
def solve_det_conditional(x, sigma, A, Q):
"""
Use matrix inversion lemma for the solve:
.. math::
(\\Sigma - AQ^{-1}A^T)^{-1} X =\\
(\\Sigma^{-1} + \\Sigma^{-1} A (Q -
A^T \\Sigma^{-1} A)^{-1} A^T \\Sigma^{-1}) X
Use matrix determinant lemma for determinant:
.. math::
\\log|(\\Sigma - AQ^{-1}A^T)| =
\\log|Q - A^T \\Sigma^{-1} A| - \\log|Q| + \\log|\\Sigma|
Parameters
----------
x: tf.Tensor
Tensor to multiply the solve by
sigma: brainiak.matnormal.CovBase
Covariance object implementing solve and logdet
A: tf.Tensor
Factor multiplying the variable we conditioned on
Q: brainiak.matnormal.CovBase
Covariance object of conditioning variable,
implementing solve and logdet
"""
# (Q - A^T Sigma^{-1} A)
lemma_factor = tlinalg.cholesky(
Q._cov - tf.matmul(A, sigma.solve(A), transpose_a=True)
)
logdet = (
-Q.logdet
+ sigma.logdet
+ 2 *
tf.reduce_sum(input_tensor=tf.math.log(
tlinalg.diag_part(lemma_factor)))
)
# A^T Sigma^{-1}
Atrp_Sinv = tf.matmul(A, sigma._prec, transpose_a=True)
# (Q - A^T Sigma^{-1} A)^{-1} A^T Sigma^{-1}
prod_term = tlinalg.cholesky_solve(lemma_factor, Atrp_Sinv)
solve = tf.matmul(
sigma.solve(scaled_I(1.0, sigma.size) + tf.matmul(A, prod_term)), x
)
return solve, logdet
def _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
):
"""Construct logp from the solves and determinants.
Parameters
----------------
colsize: int
Column dimnesion of observation tensor
rowsize: int
Row dimension of observation tensor
logdet_row: tf.Tensor (scalar)
log-determinant of row covariance
logdet_col: tf.Tensor (scalar)
log-determinant of column covariance
solve_row: tf.Tensor
Inverse row covariance multiplying the observation tensor
solve_col
Inverse column covariance multiplying the transpose of
the observation tensor
"""
log2pi = 1.8378770664093453
logging.log(logging.DEBUG,
f"column precision trace ={tlinalg.trace(solve_col)}")
logging.log(logging.DEBUG,
f"row precision trace ={tlinalg.trace(solve_row)}")
logging.log(logging.DEBUG, f"row cov logdet ={logdet_row}")
logging.log(logging.DEBUG, f"col cov logdet ={logdet_col}")
denominator = (
-rowsize * colsize * log2pi - colsize * logdet_row -
rowsize * logdet_col
)
numerator = -tlinalg.trace(tf.matmul(solve_col, solve_row))
return 0.5 * (numerator + denominator)
def matnorm_logp(x, row_cov, col_cov):
"""Log likelihood for centered matrix-variate normal density.
Assumes that row_cov and col_cov follow the API defined in CovBase.
Parameters
----------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance implementing the CovBase API
col_cov: CovBase
Column Covariance implementing the CovBase API
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
# precompute sigma_col^{-1} * x'
solve_col = col_cov.solve(tf.transpose(a=x))
logdet_col = col_cov.logdet
# precompute sigma_row^{-1} * x
solve_row = row_cov.solve(x)
logdet_row = row_cov.logdet
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
def matnorm_logp_marginal_row(x, row_cov, col_cov, marg, marg_cov):
"""
Log likelihood for marginal centered matrix-variate normal density.
.. math::
X &\\sim \\mathcal{MN}(0, Q, C)\\
Y \\mid \\X &\\sim \\mathcal{MN}(AX, R, C),\\
Y &\\sim \\mathcal{MN}(0, R + AQA^T, C)
This function efficiently computes the marginals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_marginal`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance implementing the CovBase API (:math:`R` above).
col_cov: CovBase
Column Covariance implementing the CovBase API (:math:`C` above).
marg: tf.Tensor
Marginal factor (:math:`A` above).
marg_cov: CovBase
Prior covariance implementing the CovBase API (:math:`Q` above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_col = col_cov.solve(tf.transpose(a=x))
logdet_col = col_cov.logdet
solve_row, logdet_row = solve_det_marginal(x, row_cov, marg, marg_cov)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
def matnorm_logp_marginal_col(x, row_cov, col_cov, marg, marg_cov):
"""
Log likelihood for centered marginal matrix-variate normal density.
.. math::
X &\\sim \\mathcal{MN}(0, R, Q)\\
Y \\mid \\X &\\sim \\mathcal{MN}(XA, R, C),\\
Y &\\sim \\mathcal{MN}(0, R, C + A^TQA)
This function efficiently computes the marginals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_marginal`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance implementing the CovBase API (:math:`R` above).
col_cov: CovBase
Column Covariance implementing the CovBase API (:math:`C` above).
marg: tf.Tensor
Marginal factor (:math:`A` above).
marg_cov: CovBase
Prior covariance implementing the CovBase API (:math:`Q` above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_row = row_cov.solve(x)
logdet_row = row_cov.logdet
solve_col, logdet_col = solve_det_marginal(
tf.transpose(a=x), col_cov, tf.transpose(a=marg), marg_cov
)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
def matnorm_logp_conditional_row(x, row_cov, col_cov, cond, cond_cov):
"""
Log likelihood for centered conditional matrix-variate normal density.
Consider the following partitioned matrix-normal density:
.. math::
\\begin{bmatrix}
\\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\
\\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix}
\\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes
\\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\
\\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i}
\\end{bmatrix}\\right)
Then we can write the conditional:
.. math::
\\mathbf{X}^T j i \\mid \\mathbf{Y}_{k i}^T
\\sim \\mathcal{M}\\
\\mathcal{N}\\left(0, \\Sigma_{j}-\\Sigma_{j k} \\Sigma_{k}^{-1}
\\Sigma_{k j},\\
\\Sigma_{i}\\right)
This function efficiently computes the conditionals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_conditional`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance (:math:`\\Sigma_{i}` in the notation above).
col_cov: CovBase
Column covariance (:math:`\\Sigma_{j}` in the notation above).
cond: tf.Tensor
Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}`
in the notation above).
cond_cov: CovBase
Covariance of conditioning variable (:math:`\\Sigma_{k}` in the
notation above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_col = col_cov.solve(tf.transpose(a=x))
logdet_col = col_cov.logdet
solve_row, logdet_row = solve_det_conditional(x, row_cov, cond, cond_cov)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov):
"""
Log likelihood for centered conditional matrix-variate normal density.
Consider the following partitioned matrix-normal density:
.. math::
\\begin{bmatrix}
\\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\
\\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix}
\\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes
\\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\
\\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i}
\\end{bmatrix}\\right)
Then we can write the conditional:
.. math::
\\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\
\\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\
\\Sigma_{k}^{-1} \\Sigma_{k j}\\right)
This function efficiently computes the conditionals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_conditional`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance (:math:`\\Sigma_{i}` in the notation above).
col_cov: CovBase
Column covariance (:math:`\\Sigma_{j}` in the notation above).
cond: tf.Tensor
Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}`
in the notation above).
cond_cov: CovBase
Covariance of conditioning variable (:math:`\\Sigma_{k}` in the
notation above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_row = row_cov.solve(x)
logdet_row = row_cov.logdet
solve_col, logdet_col = solve_det_conditional(
tf.transpose(a=x), col_cov, tf.transpose(a=cond), cond_cov
)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
| 13,482 | 30.355814 | 78 | py |
brainiak | brainiak-master/brainiak/factoranalysis/tfa.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Topographical Factor Analysis (TFA)
This implementation is based on the work in [Manning2014]_ and
[AndersonM2016]_.
.. [Manning2014] "Topographic factor analysis: a bayesian model for inferring
brain networks from neural data", J. R. Manning, R. Ranganath, K. A. Norman,
and D. M. Blei.PLoS One, vol. 9, no. 5, 2014.
.. [AndersonM2016] "Scaling Up Multi-Subject Neuroimaging Factor Analysis"
Michael J. Anderson, Mihai Capota, Javier S. Turek, Xia Zhu,
Theodore L. Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning,
Peter J. Ramadge, and Kenneth A. Norman
2016.
"""
# Authors: Xia Zhu (Intel Labs), Jeremy Manning (Dartmouth College) 2015~2016
from sklearn.base import BaseEstimator
from sklearn.metrics import mean_squared_error
from sklearn.cluster import KMeans
from scipy.optimize import least_squares
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance
from ..utils.utils import from_tri_2_sym, from_sym_2_tri
from . import tfa_extension # type: ignore
import numpy as np
import math
import gc
import logging
__all__ = [
"TFA",
]
logger = logging.getLogger(__name__)
class TFA(BaseEstimator):
"""Topographical Factor Analysis (TFA)
Given data from one subject, factorize it as a spatial factor F and
a weight matrix W.
Parameters
----------
max_iter : int, default: 10
Number of iterations to run the algorithm.
threshold : float, default: 1.0
Tolerance for terminating the parameter estimation
K : int, default: 50
Number of factors to compute
nlss_method : {'trf', 'dogbox', 'lm'}, default: 'trf'
Non-Linear Least Square (NLSS) algorithm used by scipy.least_suqares to
perform minimization. More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
nlss_loss: str or callable, default: 'linear'
Loss function used by scipy.least_squares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
jac : {'2-point', '3-point', 'cs', callable}, default: '2-point'
Method of computing the Jacobian matrix.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
x_scale : float or array_like or 'jac', default: 1.0
Characteristic scale of each variable for scipy.least_suqares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
tr_solver: {None, 'exact', 'lsmr'}, default: None
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
weight_method : {'rr','ols'}, default: 'rr'
Method for estimating weight matrix W given X and F.
'rr' means ridge regression, 'ols' means ordinary least square.
upper_ratio : float, default: 1.8
The upper bound of the ratio between factor's width
and maximum sigma of scanner coordinates.
lower_ratio : float, default: 0.02
The lower bound of the ratio between factor's width
and maximum sigma of scanner coordinates.
max_num_voxel : int, default: 5000
The maximum number of voxels to subsample.
max_num_tr : int, default: 500
The maximum number of TRs to subsample.
seed : int, default: 100
Seed for subsampling voxels and trs.
verbose : boolean, default: False
Verbose mode flag.
Attributes
----------
local_posterior_ : 1D array
Local posterior on subject's centers and widths
F_ : 2D array, in shape [n_voxel, K]
Latent factors of the subject
W_ : 2D array, in shape [K, n_tr]
Weight matrix of the subject
"""
def __init__(
self,
max_iter=10,
threshold=1.0,
K=50,
nlss_method='trf',
nlss_loss='soft_l1',
jac='2-point',
x_scale='jac',
tr_solver=None,
weight_method='rr',
upper_ratio=1.8,
lower_ratio=0.02,
max_num_tr=500,
max_num_voxel=5000,
seed=100,
verbose=False):
self.miter = max_iter
self.threshold = threshold
self.K = K
self.nlss_method = nlss_method
self.nlss_loss = nlss_loss
self.jac = jac
self.x_scale = x_scale
self.tr_solver = tr_solver
self.weight_method = weight_method
self.upper_ratio = upper_ratio
self.lower_ratio = lower_ratio
self.max_num_tr = max_num_tr
self.max_num_voxel = max_num_voxel
self.seed = seed
self.verbose = verbose
def set_K(self, K):
"""set K for the subject
Parameters
----------
K : integer
Number of latent factor.
Returns
-------
TFA
Returns the instance itself.
"""
self.K = K
return self
def set_prior(self, prior):
"""set prior for the subject
Parameters
----------
prior : 1D array, with K*(n_dim+1) elements
Subject prior of centers and widths.
Returns
-------
TFA
Returns the instance itself.
"""
self.local_prior = prior
return self
def set_seed(self, seed):
"""set seed for the subject
Parameters
----------
seed : int
Seed for subsampling voxels and trs
Returns
-------
TFA
Returns the instance itself.
"""
self.seed = seed
return self
def init_prior(self, R):
"""initialize prior for the subject
Returns
-------
TFA
Returns the instance itself.
"""
centers, widths = self.init_centers_widths(R)
# update prior
prior = np.zeros(self.K * (self.n_dim + 1))
self.set_centers(prior, centers)
self.set_widths(prior, widths)
self.set_prior(prior)
return self
def _assign_posterior(self):
"""assign posterior to prior based on Hungarian algorithm
Returns
-------
TFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.local_prior)
posterior_centers = self.get_centers(self.local_posterior_)
posterior_widths = self.get_widths(self.local_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.local_posterior_, posterior_centers[col_ind])
self.set_widths(self.local_posterior_, posterior_widths[col_ind])
return self
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
diff = self.local_prior - self.local_posterior_
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(self.local_posterior_ ** 2)
logger.info(
'tfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
def _mse_converged(self):
"""Check convergence based on mean squared error
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
mse = mean_squared_error(self.local_prior, self.local_posterior_,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse
def get_map_offset(self):
"""Compute offset of prior/posterior
Returns
-------
map_offest : 1D array
The offset to different fields in prior/posterior
"""
nfield = 4
self.map_offset = np.zeros(nfield).astype(int)
field_size = self.K * np.array([self.n_dim, 1, self.cov_vec_size, 1])
for i in np.arange(nfield - 1) + 1:
self.map_offset[i] = self.map_offset[i - 1] + field_size[i - 1]
return self.map_offset
def init_centers_widths(self, R):
"""Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
"""
kmeans = KMeans(
init='k-means++',
n_clusters=self.K,
n_init=10,
random_state=100)
kmeans.fit(R)
centers = kmeans.cluster_centers_
widths = self._get_max_sigma(R) * np.ones((self.K, 1))
return centers, widths
def get_template(self, R):
"""Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
"""
centers, widths = self.init_centers_widths(R)
template_prior =\
np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size))
# template centers cov and widths var are const
template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0)
template_widths_var = self._get_max_sigma(R)
centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K)
widths_var_all = np.tile(template_widths_var, self.K)
# initial mean of centers' mean
self.set_centers(template_prior, centers)
self.set_widths(template_prior, widths)
self.set_centers_mean_cov(template_prior, centers_cov_all)
self.set_widths_mean_var(template_prior, widths_var_all)
return template_prior, template_centers_cov, template_widths_var
def set_centers(self, estimation, centers):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior or posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[0:self.map_offset[1]] = centers.ravel()
def set_widths(self, estimation, widths):
"""Set estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
widths : 2D array, in shape [K, 1]
Estimation on widths
"""
estimation[self.map_offset[1]:self.map_offset[2]] = widths.ravel()
def set_centers_mean_cov(self, estimation, centers_mean_cov):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[self.map_offset[2]:self.map_offset[3]] =\
centers_mean_cov.ravel()
def set_widths_mean_var(self, estimation, widths_mean_var):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[self.map_offset[3]:] = widths_mean_var.ravel()
def get_centers(self, estimation):
"""Get estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
centers = estimation[0:self.map_offset[1]]\
.reshape(self.K, self.n_dim)
return centers
def get_widths(self, estimation):
"""Get estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
fields : 2D array, in shape [K, 1]
Estimation of widths
"""
widths = estimation[self.map_offset[1]:self.map_offset[2]]\
.reshape(self.K, 1)
return widths
def get_centers_mean_cov(self, estimation):
"""Get estimation on the covariance of centers' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers_mean_cov : 2D array, in shape [K, cov_vec_size]
Estimation of the covariance of centers' mean
"""
centers_mean_cov = estimation[self.map_offset[2]:self.map_offset[3]]\
.reshape(self.K, self.cov_vec_size)
return centers_mean_cov
def get_widths_mean_var(self, estimation):
"""Get estimation on the variance of widths' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
widths_mean_var : 2D array, in shape [K, 1]
Estimation on variance of widths' mean
"""
widths_mean_var = \
estimation[self.map_offset[3]:].reshape(self.K, 1)
return widths_mean_var
def get_factors(self, unique_R, inds, centers, widths):
"""Calculate factors based on centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
scanner coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
centers : 2D array, with shape [K, n_dim]
The centers of factors.
widths : 1D array, with shape [K, 1]
The widths of factors.
Returns
-------
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
"""
F = np.zeros((len(inds[0]), self.K))
tfa_extension.factor(
F,
centers,
widths,
unique_R[0],
unique_R[1],
unique_R[2],
inds[0],
inds[1],
inds[2])
return F
def get_weights(self, data, F):
"""Calculate weight matrix based on fMRI data and factors
Parameters
----------
data : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
Returns
-------
W : 2D array, with shape [K, n_tr]
The weight matrix from fMRI data.
"""
beta = np.var(data)
trans_F = F.T.copy()
W = np.zeros((self.K, data.shape[1]))
if self.weight_method == 'rr':
W = np.linalg.solve(trans_F.dot(F) + beta * np.identity(self.K),
trans_F.dot(data))
else:
W = np.linalg.solve(trans_F.dot(F), trans_F.dot(data))
return W
def _get_max_sigma(self, R):
"""Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates.
"""
max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
return max_sigma
def get_bounds(self, R):
"""Calculate lower and upper bounds for centers and widths
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
bounds : 2-tuple of array_like, default: None
The lower and upper bounds on factor's centers and widths.
"""
max_sigma = self._get_max_sigma(R)
final_lower = np.zeros(self.K * (self.n_dim + 1))
final_lower[0:self.K * self.n_dim] =\
np.tile(np.nanmin(R, axis=0), self.K)
final_lower[self.K * self.n_dim:] =\
np.repeat(self.lower_ratio * max_sigma, self.K)
final_upper = np.zeros(self.K * (self.n_dim + 1))
final_upper[0:self.K * self.n_dim] =\
np.tile(np.nanmax(R, axis=0), self.K)
final_upper[self.K * self.n_dim:] =\
np.repeat(self.upper_ratio * max_sigma, self.K)
bounds = (final_lower, final_upper)
return bounds
def _residual_multivariate(
self,
estimate,
unique_R,
inds,
X,
W,
template_centers,
template_centers_mean_cov,
template_widths,
template_widths_mean_var_reci,
data_sigma):
"""Residual function for estimating centers and widths
Parameters
----------
estimate : 1D array
Initial estimation on centers
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
template_centers: 2D array, with shape [K, n_dim]
The template prior on centers
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths: 1D array
The template prior on widths
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
data_sigma: float
The variance of X.
Returns
-------
final_err : 1D array
The residual function for estimating centers.
"""
centers = self.get_centers(estimate)
widths = self.get_widths(estimate)
recon = X.size
other_err = 0 if template_centers is None else (2 * self.K)
final_err = np.zeros(recon + other_err)
F = self.get_factors(unique_R, inds, centers, widths)
sigma = np.zeros((1,))
sigma[0] = data_sigma
tfa_extension.recon(final_err[0:recon], X, F, W, sigma)
if other_err > 0:
# center error
for k in np.arange(self.K):
diff = (centers[k] - template_centers[k])
cov = from_tri_2_sym(template_centers_mean_cov[k], self.n_dim)
final_err[recon + k] = math.sqrt(
self.sample_scaling *
diff.dot(np.linalg.solve(cov, diff.T)))
# width error
base = recon + self.K
dist = template_widths_mean_var_reci *\
(widths - template_widths) ** 2
final_err[base:] = np.sqrt(self.sample_scaling * dist).ravel()
return final_err
def _estimate_centers_widths(
self,
unique_R,
inds,
X,
W,
init_centers,
init_widths,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Estimate centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
init_centers : 2D array, with shape [K, n_dim]
The initial values of centers.
init_widths : 1D array
The initial values of widths.
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
final_estimate.x: 1D array
The newly estimated centers and widths.
final_estimate.cost: float
The cost value.
"""
# least_squares only accept x in 1D format
init_estimate = np.hstack(
(init_centers.ravel(), init_widths.ravel())) # .copy()
data_sigma = 1.0 / math.sqrt(2.0) * np.std(X)
final_estimate = least_squares(
self._residual_multivariate,
init_estimate,
args=(
unique_R,
inds,
X,
W,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci,
data_sigma),
method=self.nlss_method,
loss=self.nlss_loss,
bounds=self.bounds,
verbose=0,
x_scale=self.x_scale,
tr_solver=self.tr_solver)
return final_estimate.x, final_estimate.cost
def _fit_tfa(self, data, R, template_prior=None):
"""TFA main algorithm
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data from one subject.
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : 1D array,
The template prior on centers and widths.
Returns
-------
TFA
Returns the instance itself.
"""
if template_prior is None:
template_centers = None
template_widths = None
template_centers_mean_cov = None
template_widths_mean_var_reci = None
else:
template_centers = self.get_centers(template_prior)
template_widths = self.get_widths(template_prior)
template_centers_mean_cov =\
self.get_centers_mean_cov(template_prior)
template_widths_mean_var_reci = 1.0 /\
self.get_widths_mean_var(template_prior)
inner_converged = False
np.random.seed(self.seed)
n = 0
while n < self.miter and not inner_converged:
self._fit_tfa_inner(
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci)
self._assign_posterior()
inner_converged, _ = self._converged()
if not inner_converged:
self.local_prior = self.local_posterior_
else:
logger.info("TFA converged at %d iteration." % (n))
n += 1
gc.collect()
return self
def get_unique_R(self, R):
"""Get unique vlaues from coordinate matrix
Parameters
----------
R : 2D array
The coordinate matrix of a subject's fMRI data
Return
------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
"""
unique_R = []
inds = []
for d in np.arange(self.n_dim):
tmp_unique, tmp_inds = np.unique(R[:, d], return_inverse=True)
unique_R.append(tmp_unique)
inds.append(tmp_inds)
return unique_R, inds
def _fit_tfa_inner(
self,
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself.
"""
nfeature = data.shape[0]
nsample = data.shape[1]
feature_indices =\
np.random.choice(nfeature, self.max_num_voxel, replace=False)
sample_features = np.zeros(nfeature).astype(bool)
sample_features[feature_indices] = True
samples_indices =\
np.random.choice(nsample, self.max_num_tr, replace=False)
curr_data = np.zeros((self.max_num_voxel, self.max_num_tr))\
.astype(float)
curr_data = data[feature_indices]
curr_data = curr_data[:, samples_indices].copy()
curr_R = R[feature_indices].copy()
centers = self.get_centers(self.local_prior)
widths = self.get_widths(self.local_prior)
unique_R, inds = self.get_unique_R(curr_R)
F = self.get_factors(unique_R, inds, centers, widths)
W = self.get_weights(curr_data, F)
self.local_posterior_, self.total_cost = self._estimate_centers_widths(
unique_R, inds, curr_data, W, centers, widths,
template_centers, template_centers_mean_cov,
template_widths, template_widths_mean_var_reci)
return self
def fit(self, X, R, template_prior=None):
""" Topographical Factor Analysis (TFA)[Manning2014]
Parameters
----------
X : 2D array, in shape [n_voxel, n_sample]
The fMRI data of one subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : None or 1D array
The template prior as an extra constraint
None when fitting TFA alone
"""
if self.verbose:
logger.info('Start to fit TFA ')
if not isinstance(X, np.ndarray):
raise TypeError("Input data should be an array")
if X.ndim != 2:
raise TypeError("Input data should be 2D array")
if not isinstance(R, np.ndarray):
raise TypeError("Input coordinate matrix should be an array")
if R.ndim != 2:
raise TypeError("Input coordinate matrix should be 2D array")
if X.shape[0] != R.shape[0]:
raise TypeError(
"The number of voxels should be the same in X and R!")
if self.weight_method != 'rr' and self.weight_method != 'ols':
raise ValueError(
"only 'rr' and 'ols' are accepted as weight_method!")
# main algorithm
self.n_dim = R.shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
self.map_offset = self.get_map_offset()
self.bounds = self.get_bounds(R)
n_voxel = X.shape[0]
n_tr = X.shape[1]
self.sample_scaling = 0.5 * float(
self.max_num_voxel * self.max_num_tr) / float(n_voxel * n_tr)
if template_prior is None:
self.init_prior(R)
else:
self.local_prior = template_prior[0: self.map_offset[2]]
self._fit_tfa(X, R, template_prior)
if template_prior is None:
centers = self.get_centers(self.local_posterior_)
widths = self.get_widths(self.local_posterior_)
unique_R, inds = self.get_unique_R(R)
self.F_ = self.get_factors(unique_R, inds, centers, widths)
self.W_ = self.get_weights(X, self.F_)
return self
| 30,560 | 28.81561 | 99 | py |
brainiak | brainiak-master/brainiak/factoranalysis/htfa.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hierarchical Topographical Factor Analysis (HTFA)
This implementation is based on the work in [Manning2014-1]_, [Manning2014-2]_,
[AndersonMJ2016]_, and [Manning2018]_.
.. [Manning2014-1] "Topographic factor analysis: a bayesian model for
inferring brain networks from neural data", J. R. Manning,
R. Ranganath, K. A. Norman, and D. M. Blei. PLoS One, vol. 9, no. 5,
2014.
.. [Manning2014-2] "Hierarchical topographic factor analysis", Jeremy. R.
Manning, R. Ranganath, W. Keung, N. B. Turk-Browne, J. D.Cohen,
K. A. Norman, and D. M. Blei. Pattern Recognition in Neuroimaging,
2014 International Workshop on, June 2014.
.. [Manning2018] "A Probabilistic Approach to Discovering Dynamic Full-brain
Functional Connectivit Patterns", J. R. Manning, X. Zhu, T.L. Willke,
R. Ranganath, K. Stachenfeld, U. Hasson, D. M. Blei and K. A. Norman.
Neuroimage, 2018.
https://doi.org/10.1016/j.neuroimage.2018.01.071
.. [AndersonMJ2016] "Enabling Factor Analysis on Thousand-Subject Neuroimaging
Datasets",
Michael J. Anderson, Mihai Capotă, Javier S. Turek, Xia Zhu, Theodore L.
Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning, Peter J. Ramadge,
Kenneth A. Norman,
IEEE International Conference on Big Data, 2016.
https://doi.org/10.1109/BigData.2016.7840719
"""
# Authors: Xia Zhu (Intel Labs), Jeremy Manning (Dartmouth College) 2015~2016
import numpy as np
from mpi4py import MPI
from scipy.optimize import linear_sum_assignment
from sklearn.metrics import mean_squared_error
from scipy.spatial import distance
import logging
from .tfa import TFA
from ..utils.utils import from_tri_2_sym, from_sym_2_tri
__all__ = [
"HTFA",
]
logger = logging.getLogger(__name__)
class HTFA(TFA):
"""Hierarchical Topographical Factor Analysis (HTFA)
Given multi-subject data, HTFA factorizes data from each subject as a
spatial factor F and a weight matrix W per subject. Also at top
level, it estimates global template across subjects:
Parameters
----------
K : int
Number of factors to compute.
n_subj : int
Total number of subjects in dataset.
max_global_iter : int, default: 10
Number of global iterations to run the algorithm.
max_local_iter : int, default: 10
Number of local iterations to run on each subject within each
global interation.
threshold : float, default: 1.0
Tolerance for terminate the parameter estimation
nlss_method : {'trf', 'dogbox', 'lm'}, default: 'trf'
Non-Linear Least Square (NLSS) algorithm used by scipy.least_suqares to
perform minimization. More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
nlss_loss: str or callable, default: 'linear'
Loss function used by scipy.least_squares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
jac : {'2-point', '3-point', 'cs', callable}, default: '2-point'
Method of computing the Jacobian matrix.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
x_scale : float or array_like or 'jac', default: 1.0
Characteristic scale of each variable for scipy.least_suqares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
tr_solver: {None, 'exact', 'lsmr'}, default: None
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
weight_method : {'rr','ols'}, default: 'rr'
Method for estimating weight matrix W given X and F.
'rr' means ridge regression, 'ols' means ordinary least square.
upper_ratio : float, default: 1.8
The upper bound of the ratio between factor's width and brain diameter.
lower_ratio : float, default: 0.02
The lower bound of the ratio between factor's width and brain diameter.
voxel_ratio : float, default: 0.25
The percentage of voxels to sample in each inner iteration.
tr_ratio : float, default: 0.1
The percentage of trs to sample in each inner iteration.
max_voxel : int, default: 5000
The maximum number of voxels to sample in each inner iteration.
max_tr : int, default: 500
The maximum number of trs to sample in each inner iteration.
comm : Intracomm
MPI communication group, default MPI.COMM_WORLD
verbose : boolean, default: False
Verbose mode flag.
Attributes
----------
global_prior_ : 1D array
The global prior on mean and variance of centers and widths.
global_posterior_ : 1D array
The global posterior on mean and variance of centers and widths.
local_posterior_ : 1D array
Local posterior on centers and widths of subjects allocated
to this process.
local_weights_ : 1D array
Local posterior on weights allocated to this process.
Notes
-----
We recommend to use data in MNI space to better interpret global template
"""
def __init__(self, K, n_subj, max_global_iter=10, max_local_iter=10,
threshold=0.01, nlss_method='trf', nlss_loss='soft_l1',
jac='2-point', x_scale='jac', tr_solver=None,
weight_method='rr', upper_ratio=1.8, lower_ratio=0.02,
voxel_ratio=0.25, tr_ratio=0.1, max_voxel=5000, max_tr=500,
comm=MPI.COMM_WORLD, verbose=False):
self.K = K
self.n_subj = n_subj
self.max_global_iter = max_global_iter
self.max_local_iter = max_local_iter
self.threshold = threshold
self.nlss_method = nlss_method
self.nlss_loss = nlss_loss
self.jac = jac
self.x_scale = x_scale
self.tr_solver = tr_solver
self.weight_method = weight_method
self.upper_ratio = upper_ratio
self.lower_ratio = lower_ratio
self.voxel_ratio = voxel_ratio
self.tr_ratio = tr_ratio
self.max_voxel = max_voxel
self.max_tr = max_tr
self.comm = comm
self.verbose = verbose
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
diff = prior - posterior
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(posterior ** 2)
logger.info(
'htfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
def _mse_converged(self):
"""Check convergence based on mean squared difference between
prior and posterior
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
mse = mean_squared_error(prior, posterior,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse
def _map_update(
self,
prior_mean,
prior_cov,
global_cov_scaled,
new_observation):
"""Maximum A Posterior (MAP) update of a parameter
Parameters
----------
prior_mean : float or 1D array
Prior mean of parameters.
prior_cov : float or 1D array
Prior variance of scalar parameter, or
prior covariance of multivariate parameter
global_cov_scaled : float or 1D array
Global prior variance of scalar parameter, or
global prior covariance of multivariate parameter
new_observation : 1D or 2D array, with shape [n_dim, n_subj]
New observations on parameters.
Returns
-------
posterior_mean : float or 1D array
Posterior mean of parameters.
posterior_cov : float or 1D array
Posterior variance of scalar parameter, or
posterior covariance of multivariate parameter
"""
common = np.linalg.inv(prior_cov + global_cov_scaled)
observation_mean = np.mean(new_observation, axis=1)
posterior_mean = prior_cov.dot(common.dot(observation_mean)) +\
global_cov_scaled.dot(common.dot(prior_mean))
posterior_cov =\
prior_cov.dot(common.dot(global_cov_scaled))
return posterior_mean, posterior_cov
def _map_update_posterior(self):
"""Maximum A Posterior (MAP) update of HTFA parameters
Returns
-------
HTFA
Returns the instance itself.
"""
self.global_posterior_ = self.global_prior_.copy()
prior_centers = self.get_centers(self.global_prior_)
prior_widths = self.get_widths(self.global_prior_)
prior_centers_mean_cov = self.get_centers_mean_cov(self.global_prior_)
prior_widths_mean_var = self.get_widths_mean_var(self.global_prior_)
center_size = self.K * self.n_dim
posterior_size = center_size + self.K
for k in np.arange(self.K):
next_centers = np.zeros((self.n_dim, self.n_subj))
next_widths = np.zeros(self.n_subj)
for s in np.arange(self.n_subj):
center_start = s * posterior_size
width_start = center_start + center_size
start_idx = center_start + k * self.n_dim
end_idx = center_start + (k + 1) * self.n_dim
next_centers[:, s] = self.gather_posterior[start_idx:end_idx]\
.copy()
next_widths[s] = self.gather_posterior[width_start + k].copy()
# centers
posterior_mean, posterior_cov = self._map_update(
prior_centers[k].T.copy(),
from_tri_2_sym(prior_centers_mean_cov[k], self.n_dim),
self.global_centers_cov_scaled,
next_centers)
self.global_posterior_[k * self.n_dim:(k + 1) * self.n_dim] =\
posterior_mean.T
start_idx = self.map_offset[2] + k * self.cov_vec_size
end_idx = self.map_offset[2] + (k + 1) * self.cov_vec_size
self.global_posterior_[start_idx:end_idx] =\
from_sym_2_tri(posterior_cov)
# widths
common = 1.0 /\
(prior_widths_mean_var[k] + self.global_widths_var_scaled)
observation_mean = np.mean(next_widths)
tmp = common * self.global_widths_var_scaled
self.global_posterior_[self.map_offset[1] + k] = \
prior_widths_mean_var[k] * common * observation_mean +\
tmp * prior_widths[k]
self.global_posterior_[self.map_offset[3] + k] = \
prior_widths_mean_var[k] * tmp
return self
def _get_gather_offset(self, size):
"""Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id
"""
gather_size = np.zeros(size).astype(int)
gather_offset = np.zeros(size).astype(int)
num_local_subjs = np.zeros(size).astype(int)
subject_map = {}
for idx, s in enumerate(np.arange(self.n_subj)):
cur_rank = idx % size
gather_size[cur_rank] += self.prior_size
subject_map[idx] = (cur_rank, num_local_subjs[cur_rank])
num_local_subjs[cur_rank] += 1
for idx in np.arange(size - 1) + 1:
gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1]
tuple_size = tuple(gather_size)
tuple_offset = tuple(gather_offset)
return tuple_size, tuple_offset, subject_map
def _get_weight_size(self, data, n_local_subj):
"""Calculate the size of weight for this process
Parameters
----------
data : a list of 2D array, each in shape [n_voxel, n_tr]
The fMRI data from multi-subject.
n_local_subj : int
Number of subjects allocated to this process.
Returns
-------
weight_size : 1D array
The size of total subject weight on this process.
local_weight_offset : 1D array
Number of elements away from the first element
in the combined weight array at which to begin
the new, segmented array for a subject
"""
weight_size = np.zeros(1).astype(int)
local_weight_offset = np.zeros(n_local_subj).astype(int)
for idx, subj_data in enumerate(data):
if idx > 0:
local_weight_offset[idx] = weight_size[0]
weight_size[0] += self.K * subj_data.shape[1]
return weight_size, local_weight_offset
def _get_subject_info(self, n_local_subj, data):
"""Calculate metadata for subjects allocated to this process
Parameters
----------
n_local_subj : int
Number of subjects allocated to this process.
data : list of 2D array. Each in shape [n_voxel, n_tr]
Total number of MPI process.
Returns
-------
max_sample_tr : 1D array
Maximum number of TR to subsample for each subject
max_sample_voxel : 1D array
Maximum number of voxel to subsample for each subject
"""
max_sample_tr = np.zeros(n_local_subj).astype(int)
max_sample_voxel = np.zeros(n_local_subj).astype(int)
for idx in np.arange(n_local_subj):
nvoxel = data[idx].shape[0]
ntr = data[idx].shape[1]
max_sample_voxel[idx] =\
min(self.max_voxel, int(self.voxel_ratio * nvoxel))
max_sample_tr[idx] = min(self.max_tr, int(self.tr_ratio * ntr))
return max_sample_tr, max_sample_voxel
def _get_mpi_info(self):
"""get basic MPI info
Returns
-------
comm : Intracomm
Returns MPI communication group
rank : integer
Returns the rank of this process
size : integer
Returns total number of processes
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
return rank, size
def _init_prior_posterior(self, rank, R, n_local_subj):
"""set prior for this subject
Parameters
----------
rank : integer
The rank of this process
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
The number of subjects allocated to this process.
Returns
-------
HTFA
Returns the instance itself.
"""
if rank == 0:
idx = np.random.choice(n_local_subj, 1)
self.global_prior_, self.global_centers_cov,\
self.global_widths_var = self.get_template(R[idx[0]])
self.global_centers_cov_scaled =\
self.global_centers_cov / float(self.n_subj)
self.global_widths_var_scaled =\
self.global_widths_var / float(self.n_subj)
self.gather_posterior = np.zeros(self.n_subj * self.prior_size)
self.global_posterior_ = np.zeros(self.prior_size)
else:
self.global_prior_ = np.zeros(self.prior_bcast_size)
self.global_posterior_ = None
self.gather_posterior = None
return self
def _gather_local_posterior(self, use_gather,
gather_size, gather_offset):
"""Gather/Gatherv local posterior
Parameters
----------
comm : object
MPI communication group
use_gather : boolean
Whether to use Gather or Gatherv
gather_size : 1D array
The size of each local posterior
gather_offset : 1D array
The offset of each local posterior
Returns
-------
HTFA
Returns the instance itself.
Notes
-----
We use numpy array rather than generic Python objects for MPI
communication because Gatherv is only supported for the former.
https://pythonhosted.org/mpi4py/usrman/tutorial.html
"""
if use_gather:
self.comm.Gather(self.local_posterior_,
self.gather_posterior, root=0)
else:
target = [
self.gather_posterior,
gather_size,
gather_offset,
MPI.DOUBLE]
self.comm.Gatherv(self.local_posterior_, target)
return self
def _assign_posterior(self):
"""assign posterior to the right prior based on
Hungarian algorithm
Returns
-------
HTFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.global_prior_)
posterior_centers = self.get_centers(self.global_posterior_)
posterior_widths = self.get_widths(self.global_posterior_)
posterior_centers_mean_cov =\
self.get_centers_mean_cov(self.global_posterior_)
posterior_widths_mean_var =\
self.get_widths_mean_var(self.global_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.global_posterior_, posterior_centers)
self.set_widths(self.global_posterior_, posterior_widths)
# reorder cov/var based on cost assignment
self.set_centers_mean_cov(
self.global_posterior_,
posterior_centers_mean_cov[col_ind])
self.set_widths_mean_var(
self.global_posterior_,
posterior_widths_mean_var[col_ind])
return self
def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged
def _update_weight(self, data, R, n_local_subj, local_weight_offset):
"""update local weight
Parameters
----------
data : list of 2D array, element i has shape=[n_voxel, n_tr]
Subjects' fMRI data.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
Number of subjects allocated to this process.
local_weight_offset : 1D array
Offset of each subject's weights on this process.
Returns
-------
HTFA
Returns the instance itself.
"""
for s, subj_data in enumerate(data):
base = s * self.prior_size
centers = self.local_posterior_[base:base + self.K * self.n_dim]\
.reshape((self.K, self.n_dim))
start_idx = base + self.K * self.n_dim
end_idx = base + self.prior_size
widths = self.local_posterior_[start_idx:end_idx]\
.reshape((self.K, 1))
unique_R, inds = self.get_unique_R(R[s])
F = self.get_factors(unique_R, inds, centers, widths)
start_idx = local_weight_offset[s]
if s == n_local_subj - 1:
self.local_weights_[start_idx:] =\
self.get_weights(subj_data, F).ravel()
else:
end_idx = local_weight_offset[s + 1]
self.local_weights_[start_idx:end_idx] =\
self.get_weights(subj_data, F).ravel()
return self
def _fit_htfa(self, data, R):
"""HTFA main algorithm
Parameters
----------
data : list of 2D array. Each in shape [n_voxel, n_tr]
The fMRI data from multiple subjects.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
rank, size = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
max_sample_tr, max_sample_voxel =\
self._get_subject_info(n_local_subj, data)
tfa = []
# init tfa for each subject
for s, subj_data in enumerate(data):
tfa.append(TFA(
max_iter=self.max_local_iter,
threshold=self.threshold,
K=self.K,
nlss_method=self.nlss_method,
nlss_loss=self.nlss_loss,
x_scale=self.x_scale,
tr_solver=self.tr_solver,
weight_method=self.weight_method,
upper_ratio=self.upper_ratio,
lower_ratio=self.lower_ratio,
verbose=self.verbose,
max_num_tr=max_sample_tr[s],
max_num_voxel=max_sample_voxel[s]))
# map data to processes
gather_size, gather_offset, subject_map =\
self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
node_weight_size, local_weight_offset =\
self._get_weight_size(data, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and not outer_converged[0]:
if(self.verbose):
logger.info("HTFA global iter %d " % (m))
# root broadcast first 4 fields of global_prior to all nodes
self.comm.Bcast(self.global_prior_, root=0)
# each node loop over its data
for s, subj_data in enumerate(data):
# update tfa with current local prior
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(
subj_data,
R=R[s],
template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] =\
tfa[s].local_posterior_
self._gather_local_posterior(
use_gather,
gather_size,
gather_offset)
# root updates global_posterior
outer_converged =\
self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
# update weight matrix for each subject
self._update_weight(
data,
R,
n_local_subj,
local_weight_offset)
return self
def _check_input(self, X, R):
"""Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
# Check data type
if not isinstance(X, list):
raise TypeError("Input data should be a list")
if not isinstance(R, list):
raise TypeError("Coordinates should be a list")
# Check the number of subjects
if len(X) < 1:
raise ValueError("Need at leat one subject to train the model.\
Got {0:d}".format(len(X)))
for idx, x in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError("Each subject data should be an array")
if x.ndim != 2:
raise TypeError("Each subject data should be 2D array")
if not isinstance(R[idx], np.ndarray):
raise TypeError(
"Each scanner coordinate matrix should be an array")
if R[idx].ndim != 2:
raise TypeError(
"Each scanner coordinate matrix should be 2D array")
if x.shape[0] != R[idx].shape[0]:
raise TypeError(
"n_voxel should be the same in X[idx] and R[idx]")
return self
def fit(self, X, R):
"""Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
self._check_input(X, R)
if self.verbose:
logger.info("Start to fit HTFA")
self.n_dim = R[0].shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
# centers,widths
self.prior_size = self.K * (self.n_dim + 1)
# centers,widths,centerCov,widthVar
self.prior_bcast_size =\
self.K * (self.n_dim + 2 + self.cov_vec_size)
self.get_map_offset()
self._fit_htfa(X, R)
return self
| 28,941 | 33.372922 | 100 | py |
brainiak | brainiak-master/brainiak/factoranalysis/__init__.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factor analysis."""
| 615 | 40.066667 | 75 | py |
brainiak | brainiak-master/brainiak/eventseg/event.py | # Copyright 2020 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation using a Hidden Markov Model
Given an ROI timeseries, this class uses an annealed fitting procedure to
segment the timeseries into events with stable activity patterns. After
learning the signature activity pattern of each event, the model can then be
applied to other datasets to identify a corresponding sequence of events.
Full details are available in:
Christopher Baldassano, Janice Chen, Asieh Zadbood,
Jonathan W Pillow, Uri Hasson, Kenneth A Norman
Discovering event structure in continuous narrative perception and memory
Neuron, Volume 95, Issue 3, 709 - 721.e5
https://doi.org/10.1016/j.neuron.2017.06.041
This class also extends the model described in the Neuron paper:
1) It allows transition matrices that are composed of multiple separate
chains of events rather than a single linear path. This allows a model to
contain patterns for multiple event sequences (e.g. narratives), and
fit probabilities along each of these chains on a new, unlabeled timeseries.
To use this option, pass in an event_chain vector labeling which events
belong to each chain, define event patterns using set_event_patterns(),
then fit to a new dataset with find_events.
2) To obtain better fits when the underlying event structure contains
events that vary substantially in length, the split_merge option allows
the fit() function to re-distribute events during fitting. The number of
merge/split proposals is controlled by split_merge_proposals, which
controls how thorough versus fast the fitting process is.
"""
# Authors: Chris Baldassano and Cătălin Iordan (Princeton University)
import numpy as np
from scipy import stats
import logging
import copy
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.exceptions import NotFittedError
import itertools
from . import _utils as utils # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"EventSegment",
]
class EventSegment(BaseEstimator):
"""Class for event segmentation of continuous fMRI data
Parameters
----------
n_events: int
Number of segments to learn
step_var: Callable[[int], float] : default 4 * (0.98 ** (step - 1))
The Gaussian variance to use during fitting, as a function of the
number of steps. Should decrease slowly over time.
n_iter: int, default: 500
Maximum number of steps to run during fitting
event_chains: ndarray with length = n_events
Array with unique value for each separate chain of events, each linked
in the order they appear in the array
split_merge: bool, default: False
Determines whether merge/split proposals are used during fitting with
fit(). This can improve fitting performance when events are highly
uneven in size, but requires additional time
split_merge_proposals: int, default: 1
Number of merges and splits to consider at each step. Computation time
scales as O(proposals^2) so this should usually be a small value
Attributes
----------
p_start, p_end: length n_events+1 ndarray
initial and final prior distributions over events
P: n_events+1 by n_events+1 ndarray
HMM transition matrix
ll_ : ndarray with length = number of training datasets
Log-likelihood for training datasets over the course of training
segments_: list of (time by event) ndarrays
Learned (soft) segmentation for training datasets
event_var_ : float
Gaussian variance at the end of learning
event_pat_ : voxel by event ndarray
Learned mean patterns for each event
"""
def _default_var_schedule(step):
return 4 * (0.98 ** (step - 1))
def __init__(self, n_events=2,
step_var=_default_var_schedule,
n_iter=500, event_chains=None,
split_merge=False, split_merge_proposals=1):
self.n_events = n_events
self.step_var = step_var
self.n_iter = n_iter
self.split_merge = split_merge
self.split_merge_proposals = split_merge_proposals
if event_chains is None:
self.event_chains = np.zeros(n_events)
else:
self.event_chains = event_chains
def _fit_validate(self, X):
"""Validate input to fit()
Validate data passed to fit(). Includes a transpose operation to
change the row/column order of X and z-scoring in time.
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented
Returns
-------
X: list of voxel by time ndarrays
"""
if len(np.unique(self.event_chains)) > 1:
raise RuntimeError("Cannot fit chains, use set_event_patterns")
# Copy X into a list and transpose
X = copy.deepcopy(X)
if type(X) is not list:
X = [X]
for i in range(len(X)):
X[i] = check_array(X[i])
X[i] = X[i].T
# Check that number of voxels is consistent across datasets
n_dim = X[0].shape[0]
for i in range(len(X)):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(len(X)):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
return X
def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = self._fit_validate(X)
n_train = len(X)
n_dim = X[0].shape[0]
self.classes_ = np.arange(self.n_events)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1], self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i], mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
if step > 1 and self.split_merge:
curr_ll = np.mean(self.ll_[-1, :])
self.ll_[-1, :], log_gamma, mean_pat = \
self._split_merge(X, log_gamma, iteration_var, curr_ll)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob
def _forward_backward(self, logprob):
"""Runs forward-backward algorithm on observation log probs
Given the log probability of each timepoint being generated by
each event, run the HMM forward-backward algorithm to find the
probability that each timepoint belongs to each event (based on the
transition priors in p_start, p_end, and P)
See https://en.wikipedia.org/wiki/Forward-backward_algorithm for
mathematical details
Parameters
----------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
Returns
-------
log_gamma : time by event ndarray
Log probability of each timepoint belonging to each event
ll : float
Log-likelihood of fit
"""
logprob = copy.copy(logprob)
t = logprob.shape[0]
logprob = np.hstack((logprob, float("-inf") * np.ones((t, 1))))
# Initialize variables
log_scale = np.zeros(t)
log_alpha = np.zeros((t, self.n_events + 1))
log_beta = np.zeros((t, self.n_events + 1))
# Set up transition matrix, with final sink state
self.p_start = np.zeros(self.n_events + 1)
self.p_end = np.zeros(self.n_events + 1)
self.P = np.zeros((self.n_events + 1, self.n_events + 1))
label_ind = np.unique(self.event_chains, return_inverse=True)[1]
n_chains = np.max(label_ind) + 1
# For each chain of events, link them together and then to sink state
for c in range(n_chains):
chain_ind = np.nonzero(label_ind == c)[0]
self.p_start[chain_ind[0]] = 1 / n_chains
self.p_end[chain_ind[-1]] = 1 / n_chains
p_trans = (len(chain_ind) - 1) / t
if p_trans >= 1:
raise ValueError('Too few timepoints')
for i in range(len(chain_ind)):
self.P[chain_ind[i], chain_ind[i]] = 1 - p_trans
if i < len(chain_ind) - 1:
self.P[chain_ind[i], chain_ind[i+1]] = p_trans
else:
self.P[chain_ind[i], -1] = p_trans
self.P[-1, -1] = 1
# Forward pass
for i in range(t):
if i == 0:
log_alpha[0, :] = self._log(self.p_start) + logprob[0, :]
else:
log_alpha[i, :] = self._log(np.exp(log_alpha[i - 1, :])
.dot(self.P)) + logprob[i, :]
log_scale[i] = np.logaddexp.reduce(log_alpha[i, :])
log_alpha[i] -= log_scale[i]
# Backward pass
log_beta[-1, :] = self._log(self.p_end) - log_scale[-1]
for i in reversed(range(t - 1)):
obs_weighted = log_beta[i + 1, :] + logprob[i + 1, :]
offset = np.max(obs_weighted)
log_beta[i, :] = offset + self._log(
np.exp(obs_weighted - offset).dot(self.P.T)) - log_scale[i]
# Combine and normalize
log_gamma = log_alpha + log_beta
log_gamma -= np.logaddexp.reduce(log_gamma, axis=1, keepdims=True)
ll = np.sum(log_scale[:(t - 1)]) + np.logaddexp.reduce(
log_alpha[-1, :] + log_scale[-1] + self._log(self.p_end))
log_gamma = log_gamma[:, :-1]
return log_gamma, ll
def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape)
def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy()
def find_events(self, testing_data, var=None, scramble=False):
"""Applies learned event segmentation to new testing dataset
After fitting an event segmentation using fit() or setting event
patterns directly using set_event_patterns(), this function finds the
same sequence of event patterns in a new testing dataset.
Parameters
----------
testing_data: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
var: float or 1D ndarray of length equal to the number of events
default: uses variance that maximized training log-likelihood
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance. If fit() has not previously
been run, this must be specifed (cannot be None).
scramble: bool : default False
If true, the order of the learned events are shuffled before
fitting, to give a null distribution
Returns
-------
segments : time by event ndarray
The resulting soft segmentation. segments[t,e] = probability
that timepoint t is in event e
test_ll : float
Log-likelihood of model fit
"""
if var is None:
if not hasattr(self, 'event_var_'):
raise NotFittedError(("Event variance must be provided, if "
"not previously set by fit()"))
else:
var = self.event_var_
if not hasattr(self, 'event_pat_'):
raise NotFittedError(("The event patterns must first be set "
"by fit() or set_event_patterns()"))
if scramble:
mean_pat = self.event_pat_[:, np.random.permutation(self.n_events)]
else:
mean_pat = self.event_pat_
logprob = self._logprob_obs(testing_data.T, mean_pat, var)
lg, test_ll = self._forward_backward(logprob)
segments = np.exp(lg)
return segments, test_ll
def predict(self, X):
"""Applies learned event segmentation to new testing dataset
Alternative function for segmenting a new dataset after using
fit() to learn a sequence of events, to comply with the sklearn
Classifier interface
Parameters
----------
X: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
Returns
-------
Event label for each timepoint
"""
check_is_fitted(self, ["event_pat_", "event_var_"])
X = check_array(X)
segments, test_ll = self.find_events(X)
return np.argmax(segments, axis=1)
def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var
def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events)))
segments = np.exp(lg)
return segments, test_ll
def _split_merge(self, X, log_gamma, iteration_var, curr_ll):
"""Attempt to improve log-likelihood with a merge/split
The simulated annealing used in fit() is susceptible to getting
stuck in a local minimum if there are some very short events. This
function attempts to find
a) pairs of neighboring events that are highly similar, to merge
b) events that can be split into two dissimilar events
It then tests to see whether simultaneously merging one of the
pairs from (a) and splitting one of the events from (b) can improve
the log-likelihood. The number of (a)/(b) pairs tested is determined
by the split_merge_proposals class attribute.
Parameters
----------
X: list of voxel by time ndarrays
fMRI datasets being fit
log_gamma : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset
iteration_var : float
Current variance in simulated annealing
curr_ll: float
Log-likelihood of current model
Returns
-------
return_ll : ndarray with length equal to length of X
Log-likelihood after merge/split (same as curr_ll if no
merge/split improved curr_ll)
return_lg : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset (same as log_gamma if no merge/split
improved curr_ll)
return_mp : voxel by event ndarray
Mean patterns of events (after possible merge/split)
"""
# Compute current probabilities and mean patterns
n_train = len(X)
n_dim = X[0].shape[0]
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# For each event, merge its probability distribution
# with the next event, and also split its probability
# distribution at its median into two separate events.
# Use these new event probability distributions to compute
# merged and split event patterns.
merge_pat = np.empty((n_train, n_dim, self.n_events))
split_pat = np.empty((n_train, n_dim, 2 * self.n_events))
for i, sp in enumerate(seg_prob): # Iterate over datasets
m_evprob = np.zeros((sp.shape[0], sp.shape[1]))
s_evprob = np.zeros((sp.shape[0], 2 * sp.shape[1]))
cs = np.cumsum(sp, axis=0)
for e in range(sp.shape[1]):
# Split distribution at midpoint and normalize each half
mid = np.where(cs[:, e] >= 0.5)[0][0]
cs_first = cs[mid, e] - sp[mid, e]
cs_second = 1 - cs_first
s_evprob[:mid, 2 * e] = sp[:mid, e] / cs_first
s_evprob[mid:, 2 * e + 1] = sp[mid:, e] / cs_second
# Merge distribution with next event distribution
m_evprob[:, e] = sp[:, e:(e + 2)].mean(1)
# Weight data by distribution to get event patterns
merge_pat[i, :, :] = X[i].dot(m_evprob)
split_pat[i, :, :] = X[i].dot(s_evprob)
# Average across datasets
merge_pat = np.mean(merge_pat, axis=0)
split_pat = np.mean(split_pat, axis=0)
# Correlate the current event patterns with the split and
# merged patterns
merge_corr = np.zeros(self.n_events)
split_corr = np.zeros(self.n_events)
for e in range(self.n_events):
split_corr[e] = np.corrcoef(mean_pat[:, e],
split_pat[:, (2 * e):(2 * e + 2)],
rowvar=False)[0, 1:3].max()
merge_corr[e] = np.corrcoef(merge_pat[:, e],
mean_pat[:, e:(e + 2)],
rowvar=False)[0, 1:3].min()
merge_corr = merge_corr[:-1]
# Find best merge/split candidates
# A high value of merge_corr indicates that a pair of events are
# very similar to their merged pattern, and are good candidates for
# being merged.
# A low value of split_corr indicates that an event's pattern is
# very dissimilar from the patterns in its first and second half,
# and is a good candidate for being split.
best_merge = np.flipud(np.argsort(merge_corr))
best_merge = best_merge[:self.split_merge_proposals]
best_split = np.argsort(split_corr)
best_split = best_split[:self.split_merge_proposals]
# For every pair of merge/split candidates, attempt the merge/split
# and measure the log-likelihood. If any are better than curr_ll,
# accept this best merge/split
mean_pat_last = mean_pat.copy()
return_ll = curr_ll
return_lg = copy.deepcopy(log_gamma)
return_mp = mean_pat.copy()
for m_e, s_e in itertools.product(best_merge, best_split):
if m_e == s_e or m_e+1 == s_e:
# Don't attempt to merge/split same event
continue
# Construct new set of patterns with merge/split
mean_pat_ms = np.delete(mean_pat_last, s_e, axis=1)
mean_pat_ms = np.insert(mean_pat_ms, [s_e, s_e],
split_pat[:, (2 * s_e):(2 * s_e + 2)],
axis=1)
mean_pat_ms = np.delete(mean_pat_ms,
[m_e + (s_e < m_e), m_e + (s_e < m_e) + 1],
axis=1)
mean_pat_ms = np.insert(mean_pat_ms, m_e + (s_e < m_e),
merge_pat[:, m_e], axis=1)
# Measure log-likelihood with these new patterns
ll_ms = np.zeros(n_train)
log_gamma_ms = list()
for i in range(n_train):
logprob = self._logprob_obs(X[i],
mean_pat_ms, iteration_var)
lg, ll_ms[i] = self._forward_backward(logprob)
log_gamma_ms.append(lg)
# If better than best ll so far, save to return to fit()
if ll_ms.mean() > return_ll:
return_mp = mean_pat_ms.copy()
return_ll = ll_ms
for i in range(n_train):
return_lg[i] = log_gamma_ms[i].copy()
logger.debug("Identified merge %d,%d and split %d",
m_e, m_e+1, s_e)
return return_ll, return_lg, return_mp
| 26,615 | 37.406926 | 79 | py |
brainiak | brainiak-master/brainiak/eventseg/__init__.py | # Copyright 2016 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation of continuous data + event transfer between datasets."""
| 674 | 44 | 78 | py |
brainiak | brainiak-master/brainiak/fcma/preprocessing.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FCMA preprocessing."""
# Authors: Yida Wang
# (Intel Labs), 2017
import math
import time
import numpy as np
import logging
from scipy.stats.mstats import zscore
from mpi4py import MPI
from enum import Enum
from ..image import mask_images, multimask_images
logger = logging.getLogger(__name__)
__all__ = [
"generate_epochs_info",
"prepare_fcma_data",
"prepare_mvpa_data",
"prepare_searchlight_mvpa_data",
"RandomType",
]
def _separate_epochs(activity_data, epoch_list):
""" create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
time1 = time.time()
raw_data = []
labels = []
for sid in range(len(epoch_list)):
epoch = epoch_list[sid]
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
# mat is row-major
# regardless of the order of acitvity_data[sid]
mat = activity_data[sid][:, sub_epoch[eid, :] == 1]
mat = np.ascontiguousarray(mat.T)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(r)
raw_data.append(mat)
labels.append(cond)
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return raw_data, labels
def _randomize_single_subject(data, seed=None):
"""Randomly permute the voxels of the subject.
The subject is organized as Voxel x TR,
this method shuffles the voxel dimension in place.
Parameters
----------
data: 2D array in shape [nVoxels, nTRs]
Activity image data to be shuffled.
seed: Optional[int]
Seed for random state used implicitly for shuffling.
Returns
-------
None.
"""
if seed is not None:
np.random.seed(seed)
np.random.shuffle(data)
def _randomize_subject_list(data_list, random):
"""Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
"""
if random == RandomType.REPRODUCIBLE:
for i in range(len(data_list)):
_randomize_single_subject(data_list[i], seed=i)
elif random == RandomType.UNREPRODUCIBLE:
for data in data_list:
_randomize_single_subject(data)
class RandomType(Enum):
"""Define the random types as enumeration
NORANDOM means do not randomize the image data;
REPRODUCIBLE means randomize the image data with a fixed seed so that the
permutation holds between different runs;
UNREPRODUCIBLE means truly randomize the image data which returns
different results in different runs.
"""
NORANDOM = 0
REPRODUCIBLE = 1
UNREPRODUCIBLE = 2
def prepare_fcma_data(images, conditions, mask1, mask2=None,
random=RandomType.NORANDOM, comm=MPI.COMM_WORLD):
"""Prepare data for correlation-based computation and analysis.
Generate epochs of interests, then broadcast to all workers.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask1: np.ndarray
Mask to apply to each image.
mask2: Optional[np.ndarray]
Mask to apply to each image.
If it is not specified, the method will assign None to the returning
variable raw_data2 and the self-correlation on raw_data1 will be
computed
random: Optional[RandomType]
Randomize the image data within subject or not.
comm: MPI.Comm
MPI communicator to use for MPI operations.
Returns
-------
raw_data1: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the first mask.
len(raw_data) equals the number of epochs
raw_data2: Optional, list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the second mask if any.
len(raw_data2) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
rank = comm.Get_rank()
labels = []
raw_data1 = []
raw_data2 = []
if rank == 0:
logger.info('start to apply masks and separate epochs')
if mask2 is not None:
masks = (mask1, mask2)
activity_data1, activity_data2 = zip(*multimask_images(images,
masks,
np.float32))
_randomize_subject_list(activity_data2, random)
raw_data2, _ = _separate_epochs(activity_data2, conditions)
else:
activity_data1 = list(mask_images(images, mask1, np.float32))
_randomize_subject_list(activity_data1, random)
raw_data1, labels = _separate_epochs(activity_data1, conditions)
time1 = time.time()
raw_data_length = len(raw_data1)
raw_data_length = comm.bcast(raw_data_length)
# broadcast the data subject by subject to prevent size overflow
for i in range(raw_data_length):
if rank != 0:
raw_data1.append(None)
if mask2 is not None:
raw_data2.append(None)
raw_data1[i] = comm.bcast(raw_data1[i], root=0)
if mask2 is not None:
raw_data2[i] = comm.bcast(raw_data2[i], root=0)
if comm.Get_size() > 1:
labels = comm.bcast(labels, root=0)
if rank == 0:
time2 = time.time()
logger.info(
'data broadcasting done, takes %.2f s' %
(time2 - time1)
)
if mask2 is None:
raw_data2 = None
return raw_data1, raw_data2, labels
def generate_epochs_info(epoch_list):
""" use epoch_list to generate epoch_info defined below
Parameters
----------
epoch_list: list of 3D (binary) array in shape [condition, nEpochs, nTRs]
Contains specification of epochs and conditions, assuming
1. all subjects have the same number of epochs;
2. len(epoch_list) equals the number of subjects;
3. an epoch is always a continuous time course.
Returns
-------
epoch_info: list of tuple (label, sid, start, end).
label is the condition labels of the epochs;
sid is the subject id, corresponding to the index of raw_data;
start is the start TR of an epoch (inclusive);
end is the end TR of an epoch(exclusive).
Assuming len(labels) labels equals the number of epochs and
the epochs of the same sid are adjacent in epoch_info
"""
time1 = time.time()
epoch_info = []
for sid, epoch in enumerate(epoch_list):
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
start = np.nonzero(sub_epoch[eid, :])[0][0]
epoch_info.append((cond, sid, start, start + r))
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return epoch_info
def prepare_mvpa_data(images, conditions, mask):
"""Prepare data for activity-based model training and prediction.
Average the activity within epochs and z-scoring within subject.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask: np.ndarray
Mask to apply to each image.
Returns
-------
processed_data: 2D array in shape [num_voxels, num_epochs]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
activity_data = list(mask_images(images, mask, np.float32))
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
(d1, _) = activity_data[0].shape
processed_data = np.empty([d1, num_epochs])
labels = np.empty(num_epochs)
subject_count = [0] # counting the epochs per subject for z-scoring
cur_sid = -1
# averaging
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
if cur_sid != epoch[1]:
subject_count.append(0)
cur_sid = epoch[1]
subject_count[-1] += 1
processed_data[:, idx] = \
np.mean(activity_data[cur_sid][:, epoch[2]:epoch[3]],
axis=1)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, cur_epoch:cur_epoch + i],
axis=1, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
return processed_data, labels
def prepare_searchlight_mvpa_data(images, conditions, data_type=np.float32,
random=RandomType.NORANDOM):
""" obtain the data for activity-based voxel selection using Searchlight
Average the activity within epochs and z-scoring within subject,
while maintaining the 3D brain structure. In order to save memory,
the data is processed subject by subject instead of reading all in before
processing. Assuming all subjects live in the identical cube.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
data_type
Type to cast image to.
random: Optional[RandomType]
Randomize the image data within subject or not.
Returns
-------
processed_data: 4D array in shape [brain 3D + epoch]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
time1 = time.time()
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
processed_data = None
logger.info(
'there are %d subjects, and in total %d epochs' %
(len(conditions), num_epochs)
)
labels = np.empty(num_epochs)
# assign labels
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
# counting the epochs per subject for z-scoring
subject_count = np.zeros(len(conditions), dtype=np.int32)
logger.info('start to apply masks and separate epochs')
for sid, f in enumerate(images):
data = f.get_data().astype(data_type)
[d1, d2, d3, d4] = data.shape
if random == RandomType.REPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data, seed=sid)
data = data.reshape((d1, d2, d3, d4))
elif random == RandomType.UNREPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data)
data = data.reshape((d1, d2, d3, d4))
if processed_data is None:
processed_data = np.empty([d1, d2, d3, num_epochs],
dtype=data_type)
# averaging
for idx, epoch in enumerate(epoch_info):
if sid == epoch[1]:
subject_count[sid] += 1
processed_data[:, :, :, idx] = \
np.mean(data[:, :, :, epoch[2]:epoch[3]], axis=3)
logger.debug(
'file %s is loaded and processed, with data shape %s',
f.get_filename(), data.shape
)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, :, :, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, :, :, cur_epoch:cur_epoch + i],
axis=3, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
time2 = time.time()
logger.info(
'data processed for activity-based voxel selection, takes %.2f s' %
(time2 - time1)
)
return processed_data, labels
| 14,358 | 33.6 | 79 | py |
brainiak | brainiak-master/brainiak/fcma/classifier.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full Correlation Matrix Analysis (FCMA)
Correlation-based training and prediction
"""
# Authors: Yida Wang
# (Intel Labs), 2016
import numpy as np
import time
from sklearn.base import BaseEstimator
import sklearn
from . import fcma_extension # type: ignore
from . import cython_blas as blas # type: ignore
import logging
logger = logging.getLogger(__name__)
__all__ = [
"Classifier",
]
class Classifier(BaseEstimator):
"""Correlation-based classification component of FCMA
The classifier first computes correlation of the input data,
and normalizes them if needed, then uses the given classifier
to train and/or predict the correlation data.
NOTE: if the classifier is sklearn.svm.SVC with precomputed kernel,
the test data may be provided in the fit method to compute
the kernel matrix together with the training data to save the memory usage,
but the test data will NEVER be seen in the model training.
Parameters
----------
clf: class
The classifier used, normally a classifier class of sklearn
num_processed_voxels: int, default 2000
Used for SVM with precomputed kernel,
every time it only computes correlation between num_process_voxels and
the whole mask to aggregate the kernel matrices.
This is to save the memory
so as to handle correlations at a larger scale.
epochs_per_subj: int, default 0
The number of epochs of each subject
within-subject normalization will be performed during
classifier training if epochs_per_subj is specified
default 0 means no within-subject normalization
Attributes
----------
training_data_: 2D numpy array in shape [num_samples, num_features]
training_data\\_ is None except clf is SVM.SVC with precomputed kernel,
in which case training data is needed to compute
the similarity vector for each sample to be classified.
However, if the test samples are also provided during the fit,
the similarity vectors can be precomputed too
and then training_data\\_ is None
test_raw_data_: a list of 2D array in shape [num_TRs, num_voxels]
default None
test_raw_data\\_ is set after a prediction is called,
if the new input data equals test_raw_data\\_,
test_data\\_ can be reused
test_data_: 2D numpy array in shape [num_samples, num_features]
default None
test_data\\_ is set after a prediction is called,
so that the test data does not need to be regenerated in the
subsequent operations, e.g. getting decision values of the prediction.
test_data\\_ may also be set in the fit method
if sklearn.svm.SVC with precomputed kernel
and the test samples are known.
NOTE: the test samples will never be used to fit the model.
num_voxels_: int
The number of voxels of the first brain region used in the classifier.
The first brain region is always large. When training, this region may
be divided to compute the correlation portion by portion.
The brain regions are defined by the applied mask, e.g. the top voxels
selected by FCMA voxel selection
num_features_: int
The dimension of correlation data, normally is the product of
the number of voxels of brain region 1 and
the number of voxels of brain region 2.
num_features\\_ must be consistent in both training and classification
num_samples_: int
The number of samples
num_digits_: int
The number of digits of the first value of the kernel matrix,
for normalizing the kernel values accordingly
"""
def __init__(self,
clf,
num_processed_voxels=2000,
epochs_per_subj=0):
self.clf = clf
self.num_processed_voxels = num_processed_voxels
self.epochs_per_subj = epochs_per_subj
self.num_digits_ = 0
return
def _prepare_corerelation_data(self, X1, X2,
start_voxel=0,
num_processed_voxels=None):
"""Compute auto-correlation for the input data X1 and X2.
it will generate the correlation between some voxels and all voxels
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels1]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value.
X2: a list of numpy array in shape [num_TRs, num_voxels2]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit and/or self.predict).
start_voxel: int, default 0
the starting voxel id for correlation computation
num_processed_voxels: int, default None
the number of voxels it computes for correlation computation
if it is None, it is set to self.num_voxels
Returns
-------
corr_data: the correlation data
in shape [len(X), num_processed_voxels, num_voxels2]
"""
num_samples = len(X1)
assert num_samples > 0, \
'at least one sample is needed for correlation computation'
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
assert num_voxels1 * num_voxels2 == self.num_features_, \
'the number of features provided by the input data ' \
'does not match the number of features defined in the model'
assert X1[0].shape[0] == X2[0].shape[0], \
'the numbers of TRs of X1 and X2 are not identical'
if num_processed_voxels is None:
num_processed_voxels = num_voxels1
corr_data = np.zeros((num_samples, num_processed_voxels, num_voxels2),
np.float32, order='C')
# compute correlation
for idx, data in enumerate(X1):
data2 = X2[idx]
num_TRs = data.shape[0]
blas.compute_corr_vectors('N', 'T',
num_voxels2, num_processed_voxels,
num_TRs,
1.0, data2, num_voxels2,
data, num_voxels1,
0.0, corr_data, num_voxels2,
start_voxel, idx)
logger.debug(
'correlation computation done'
)
return corr_data
def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data
def _prepare_test_data(self, corr_data):
"""Prepare the data to be applied to the predict function.
if the classifier is SVM, do kernel precomputation,
otherwise the test data is the reshaped corr_data
Parameters
----------
corr_data: the (normalized) correlation data
in shape [num_samples, num_voxels, num_voxels2]
Returns
-------
data: the data to be predicted, in shape of [num_samples, num_dim]
"""
num_test_samples = corr_data.shape[0]
assert num_test_samples > 0, \
'at least one test sample is needed'
if isinstance(self.clf, sklearn.svm.SVC) \
and self.clf.kernel == 'precomputed':
assert self.training_data_ is not None, \
'when using precomputed kernel of SVM, ' \
'all training data must be provided'
num_training_samples = self.training_data_.shape[0]
data = np.zeros((num_test_samples, num_training_samples),
np.float32,
order='C')
num_features = self.num_features_
corr_data = corr_data.reshape(num_test_samples,
num_features)
# compute the similarity vectors using corr_data and training_data
blas.compute_single_matrix_multiplication('T', 'N',
num_training_samples,
num_test_samples,
num_features,
1.0,
self.training_data_,
num_features,
corr_data,
num_features,
0.0,
data,
num_training_samples)
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = self.num_digits_
if num_digits > 2:
proportion = 10**(2-num_digits)
data *= proportion
logger.debug(
'similarity vectors computation done'
)
else:
data = corr_data.reshape(num_test_samples,
self.num_features_)
return data
def _compute_kernel_matrix_in_portion(self, X1, X2):
"""Compute kernel matrix for sklearn.svm.SVC with precomputed kernel.
The method generates the kernel matrix (similarity matrix) for
sklearn.svm.SVC with precomputed kernel. It first computes
the correlation from X, then normalizes the correlation if needed,
and finally computes the kernel matrix. It is worth noting that if
the resulting correlation is large, the kernel matrix will be computed
portion by portion to save memory usage (the portion size is specified
in self.num_processed_voxels.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 always has more voxels
than X2.
Returns
-------
kernel_matrix: 2D array in shape [num_samples, num_samples]
the kernel matrix to be used in sklearn.svm.SVC
normalized_corr_data: 2D array in shape [num_samples, num_features]
the training data to be used in self.predict() if
the kernel matrix is computed in one portion,
otherwise it will not be used.
"""
kernel_matrix = np.zeros((self.num_samples_, self.num_samples_),
np.float32,
order='C')
sr = 0
row_length = self.num_processed_voxels
num_voxels2 = X2[0].shape[1]
normalized_corr_data = None
while sr < self.num_voxels_:
if row_length >= self.num_voxels_ - sr:
row_length = self.num_voxels_ - sr
# compute sub-correlation
corr_data = self._prepare_corerelation_data(X1, X2,
sr, row_length)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
self.epochs_per_subj)
# compute partial kernel matrices
# for using kernel matrix computation from voxel selection
normalized_corr_data = normalized_corr_data.reshape(
1,
self.num_samples_,
row_length * num_voxels2)
blas.compute_kernel_matrix('L', 'T',
self.num_samples_,
row_length * num_voxels2,
1.0, normalized_corr_data,
0, row_length * num_voxels2,
1.0, kernel_matrix, self.num_samples_)
sr += row_length
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrix[0, 0])))
self.num_digits_ = num_digits
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrix *= proportion
return kernel_matrix, normalized_corr_data
def _generate_training_data(self, X1, X2, num_training_samples):
"""Generate training data for the classifier.
Compute the correlation, do the normalization if necessary,
and compute the kernel matrix if the classifier is
sklearn.svm.SVC with precomputed kernel.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit).
num_training_samples: Optional[int]
Default None.
The number of samples used in the training,
which is set when the kernel matrix is constructed
portion by portion so the similarity vectors of the
test data have to be computed here.
This is ONLY set when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
data: 2D numpy array
If the classifier is sklearn.svm.SVC with precomputed kernel,
data is the kenrl matrix in shape [num_samples, num_samples];
otherwise, data is in shape [num_samples, num_features] as
the training data.
"""
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
self.epochs_per_subj)
# training data prepare
data = normalized_corr_data.reshape(self.num_samples_,
self.num_features_)
self.training_data_ = None
else: # SVM with precomputed kernel
if self.num_processed_voxels < self.num_voxels_:
if num_training_samples is None:
raise RuntimeError('the kernel matrix will be '
'computed portion by portion, '
'the test samples must be predefined '
'by specifying '
'num_training_samples')
if num_training_samples >= self.num_samples_:
raise ValueError('the number of training samples '
'must be smaller than '
'the number of total samples')
data, normalized_corr_data = \
self._compute_kernel_matrix_in_portion(X1, X2)
if self.num_processed_voxels >= self.num_voxels_:
# training data is in shape
# [num_samples, num_voxels * num_voxels]
self.training_data_ = normalized_corr_data.reshape(
self.num_samples_,
self.num_features_)
else:
# do not store training data because it was partially computed
self.training_data_ = None
logger.debug(
'kernel computation done'
)
return data
def fit(self, X, y, num_training_samples=None):
"""Use correlation data to train a model.
First compute the correlation of the input data,
and then normalize within subject
if more than one sample in one subject,
and then fit to a model defined by self.clf.
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
They contain the activity data filtered by ROIs
and prepared for correlation computation.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
y: 1D numpy array
labels, len(X) equals len(y)
num_training_samples: Optional[int]
The number of samples used in the training.
Set it to construct the kernel matrix
portion by portion so the similarity vectors of the
test data have to be computed here.
Only set num_training_samples when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
Classifier:
self.
"""
time1 = time.time()
assert len(X) == len(y), \
'the number of samples must be equal to the number of labels'
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
if num_training_samples is not None:
num_training_samples = None
logger.warning(
'num_training_samples should not be set for classifiers '
'other than SVM with precomputed kernels'
)
num_samples = len(X1)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
self.num_voxels_ = num_voxels1
self.num_features_ = num_voxels1 * num_voxels2
self.num_samples_ = num_samples
data = self._generate_training_data(X1, X2, num_training_samples)
if num_training_samples is not None:
self.test_raw_data_ = None
self.test_data_ = data[num_training_samples:,
0:num_training_samples]
# limit training to the data specified by num_training_samples
data = data[0:num_training_samples, 0:num_training_samples]
# training
self.clf = self.clf.fit(data, y[0:num_training_samples])
# set the test data
if num_training_samples is None:
self.test_raw_data_ = None
self.test_data_ = None
time2 = time.time()
logger.info(
'training done, takes %.2f s' %
(time2 - time1)
)
return self
def predict(self, X=None):
"""Use a trained model to predict correlation data.
first compute the correlation of the input data,
and then normalize across all samples in the list
if there are more than one sample,
and then predict via self.clf.
If X is None, use the similarity vectors produced in fit
to predict
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
y_pred: the predicted label of X, in shape [len(X),]
"""
time1 = time.time()
if X is not None:
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
assert self.num_features_ == num_voxels1 * num_voxels2, \
'the number of features does not match the model'
num_test_samples = len(X1)
self.test_raw_data_ = X
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
num_test_samples)
# test data generation
self.test_data_ = self._prepare_test_data(normalized_corr_data)
# prediction
y_pred = self.clf.predict(self.test_data_)
time2 = time.time()
logger.info(
'prediction done, takes %.2f s' %
(time2 - time1)
)
return y_pred
def _is_equal_to_test_raw_data(self, X):
"""Check if the new input data X is equal to the old one.
compare X and self.test_raw_data_ if it exists
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels],
the input data to be checked.
Returns
-------
a boolean value to indicate if X == self.test_raw_data_
"""
if self.test_raw_data_ is None or len(X) != len(self.test_raw_data_):
return False
X1, X2 = zip(*X)
c1, c2 = zip(*self.test_raw_data_)
# this for loop is faster than
# doing np.array_equal(X, self.test_raw_data_) directly
for new, old in zip(X1, c1):
if not np.array_equal(new, old):
return False
for new, old in zip(X2, c2):
if not np.array_equal(new, old):
return False
return True
def decision_function(self, X=None):
"""Output the decision value of the prediction.
if X is not equal to self.test_raw_data\\_, i.e. predict is not called,
first generate the test_data
after getting the test_data, get the decision value via self.clf.
if X is None, test_data\\_ is ready to be used
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
confidence: the predictions confidence values of X, in shape [len(X),]
"""
if X is not None and not self._is_equal_to_test_raw_data(X):
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
assert len(X1) == len(X2), \
'the list lengths do not match'
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
assert self.num_features_ == num_voxels1 * num_voxels2, \
'the number of features does not match the model'
num_test_samples = len(X1)
self.test_raw_data_ = X
# generate the test_data first
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = \
self._normalize_correlation_data(corr_data,
num_test_samples)
# test data generation
self.test_data_ = self._prepare_test_data(normalized_corr_data)
confidence = self.clf.decision_function(self.test_data_)
return confidence
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
NOTE: In the condition of sklearn.svm.SVC with precomputed kernel
when the kernel matrix is computed portion by portion, the function
will ignore the first input argument X.
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
They are test samples.
They contain the activity data filtered by ROIs
and prepared for correlation computation.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
len(X) is the number of test samples.
y: 1D numpy array
labels, len(X) equals len(y), which is num_samples
sample_weight: 1D array in shape [num_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from sklearn.metrics import accuracy_score
if isinstance(self.clf, sklearn.svm.SVC) \
and self.clf.kernel == 'precomputed' \
and self.training_data_ is None:
result = accuracy_score(y, self.predict(),
sample_weight=sample_weight)
else:
result = accuracy_score(y, self.predict(X),
sample_weight=sample_weight)
return result
| 29,451 | 41.622287 | 79 | py |
brainiak | brainiak-master/brainiak/fcma/mvpa_voxelselector.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full Correlation Matrix Analysis (FCMA)
Activity-based voxel selection
"""
# Authors: Yida Wang
# (Intel Labs), 2017
import numpy as np
from sklearn import model_selection
import logging
from mpi4py import MPI
logger = logging.getLogger(__name__)
__all__ = [
"MVPAVoxelSelector",
]
def _sfn(data, mask, myrad, bcast_var):
"""Score classifier on searchlight data using cross-validation.
The classifier is in `bcast_var[2]`. The labels are in `bast_var[0]`. The
number of cross-validation folds is in `bast_var[1].
"""
clf = bcast_var[2]
masked_data = data[0][mask, :].T
# print(l[0].shape, mask.shape, data.shape)
skf = model_selection.StratifiedKFold(n_splits=bcast_var[1],
shuffle=False)
accuracy = np.mean(model_selection.cross_val_score(clf, masked_data,
y=bcast_var[0],
cv=skf,
n_jobs=1))
return accuracy
class MVPAVoxelSelector:
"""Activity-based voxel selection component of FCMA
Parameters
----------
data: 4D array in shape [brain 3D + epoch]
contains the averaged and normalized brain data epoch by epoch.
It is generated by .io.prepare_searchlight_mvpa_data
mask: 3D array
labels: 1D array
contains the labels of the epochs.
It is generated by .io.prepare_searchlight_mvpa_data
num_folds: int
the number of folds to be conducted in the cross validation
sl: Searchlight
the distributed Searchlight object
"""
def __init__(self,
data,
mask,
labels,
num_folds,
sl
):
self.data = data
self.mask = mask.astype(np.bool)
self.labels = labels
self.num_folds = num_folds
self.sl = sl
num_voxels = np.sum(self.mask)
if num_voxels == 0:
raise ValueError('Zero processed voxels')
def run(self, clf):
""" run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.info(
'running activity-based voxel selection via Searchlight'
)
self.sl.distribute([self.data], self.mask)
self.sl.broadcast((self.labels, self.num_folds, clf))
if rank == 0:
logger.info(
'data preparation done'
)
# obtain a 3D array with accuracy numbers
result_volume = self.sl.run_searchlight(_sfn)
# get result tuple list from the volume
result_list = result_volume[self.mask]
results = []
if rank == 0:
for idx, value in enumerate(result_list):
if value is None:
value = 0
results.append((idx, value))
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
logger.info(
'activity-based voxel selection via Searchlight is done'
)
return result_volume, results
| 4,426 | 31.313869 | 77 | py |
brainiak | brainiak-master/brainiak/fcma/util.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full Correlation Matrix Analysis (FCMA)
Correlation related high performance routines
"""
# Authors: Yida Wang
# (Intel Labs), 2017
import numpy as np
from . import cython_blas as blas # type: ignore
from scipy.stats.mstats import zscore
import math
__all__ = [
"compute_correlation",
]
def _normalize_for_correlation(data, axis, return_nans=False):
"""normalize the data before computing correlation
The data will be z-scored and divided by sqrt(n)
along the assigned axis
Parameters
----------
data: 2D array
axis: int
specify which dimension of the data should be normalized
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
data: 2D array
the normalized data
"""
shape = data.shape
data = zscore(data, axis=axis, ddof=0)
# if zscore fails (standard deviation is zero),
# optionally set all values to be zero
if not return_nans:
data = np.nan_to_num(data)
data = data / math.sqrt(shape[axis])
return data
def compute_correlation(matrix1, matrix2, return_nans=False):
"""compute correlation between two sets of variables
Correlate the rows of matrix1 with the rows of matrix2.
If matrix1 == matrix2, it is auto-correlation computation
resulting in a symmetric correlation matrix.
The number of columns MUST agree between set1 and set2.
The correlation being computed here is
the Pearson's correlation coefficient, which can be expressed as
.. math:: corr(X, Y) = \\frac{cov(X, Y)}{\\sigma_X\\sigma_Y}
where cov(X, Y) is the covariance of variable X and Y, and
.. math:: \\sigma_X
is the standard deviation of variable X
Reducing the correlation computation to matrix multiplication
and using BLAS GEMM API wrapped by Scipy can speedup the numpy built-in
correlation computation (numpy.corrcoef) by one order of magnitude
.. math::
corr(X, Y)
&= \\frac{\\sum\\limits_{i=1}^n (x_i-\\bar{x})(y_i-\\bar{y})}{(n-1)
\\sqrt{\\frac{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}{n-1}}
\\sqrt{\\frac{\\sum\\limits_{j=1}^{n} y_j^2-n\\bar{y}}{n-1}}}\\\\
&= \\sum\\limits_{i=1}^n(\\frac{(x_i-\\bar{x})}
{\\sqrt{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}}
\\frac{(y_i-\\bar{y})}{\\sqrt{\\sum\\limits_{j=1}^n y_j^2-n\\bar{y}}})
By default (return_nans=False), returns zeros for vectors with NaNs.
If return_nans=True, convert zeros to NaNs (np.nan) in output.
Parameters
----------
matrix1: 2D array in shape [r1, c]
MUST be continuous and row-major
matrix2: 2D array in shape [r2, c]
MUST be continuous and row-major
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
corr_data: 2D array in shape [r1, r2]
continuous and row-major in np.float32
"""
matrix1 = matrix1.astype(np.float32)
matrix2 = matrix2.astype(np.float32)
[r1, d1] = matrix1.shape
[r2, d2] = matrix2.shape
if d1 != d2:
raise ValueError('Dimension discrepancy')
# preprocess two components
matrix1 = _normalize_for_correlation(matrix1, 1,
return_nans=return_nans)
matrix2 = _normalize_for_correlation(matrix2, 1,
return_nans=return_nans)
corr_data = np.empty((r1, r2), dtype=np.float32, order='C')
# blas routine is column-major
blas.compute_single_matrix_multiplication('T', 'N',
r2, r1, d1,
1.0,
matrix2, d2,
matrix1, d1,
0.0,
corr_data,
r2)
return corr_data
| 4,594 | 33.037037 | 78 | py |
brainiak | brainiak-master/brainiak/fcma/voxelselector.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full Correlation Matrix Analysis (FCMA)
Correlation-based voxel selection
"""
# Authors: Yida Wang
# (Intel Labs), 2016
import numpy as np
import time
from mpi4py import MPI
from scipy.stats.mstats import zscore
from sklearn import model_selection
import sklearn
from . import fcma_extension # type: ignore
from . import cython_blas as blas # type: ignore
from ..utils.utils import usable_cpu_count
import logging
import multiprocessing
logger = logging.getLogger(__name__)
__all__ = [
"VoxelSelector",
]
def _cross_validation_for_one_voxel(clf, vid, num_folds, subject_data, labels):
"""Score classifier on data using cross validation."""
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_folds,
shuffle=False)
scores = model_selection.cross_val_score(clf, subject_data,
y=labels,
cv=skf, n_jobs=1)
logger.debug(
'cross validation for voxel %d is done' %
vid
)
return (vid, scores.mean())
class VoxelSelector:
"""Correlation-based voxel selection component of FCMA.
Parameters
----------
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
epochs_per_subj: int
The number of epochs of each subject
num_folds: int
The number of folds to be conducted in the cross validation
raw_data: list of 2D array in shape [epoch length, nVoxels]
Assumption: 1. all activity data contains the same number of voxels
2. the activity data has been z-scored,
ready to compute correlation as matrix multiplication
3. all subjects have the same number of epochs
4. epochs belonging to the same subject are adjacent
in the list
5. if MPI jobs are running on multiple nodes, the path
used must be on a filesystem shared by all nodes
raw_data2: Optional, list of 2D array in shape [epoch length, nVoxels]
raw_data2 shares the data structure of the assumptions of raw_data
If raw_data2 is None, the correlation will be computed as
raw_data by raw_data.
If raw_data2 is specified, len(raw_data) MUST equal len(raw_data2),
the correlation will be computed as raw_data by raw_data2.
voxel_unit: int, default 64
The number of voxels assigned to a worker each time
process_num: Optional[int]
The maximum number of processes used in cross validation.
If None, the number of processes will equal
the number of available hardware threads, considering cpusets
restrictions.
If 0, cross validation will not use python multiprocessing.
master_rank: int, default 0
The process which serves as the master
"""
def __init__(self,
labels,
epochs_per_subj,
num_folds,
raw_data,
raw_data2=None,
voxel_unit=64,
process_num=4,
master_rank=0):
self.labels = labels
self.epochs_per_subj = epochs_per_subj
self.num_folds = num_folds
self.raw_data = raw_data
self.num_voxels = raw_data[0].shape[1]
self.raw_data2 = raw_data2
self.num_voxels2 = raw_data2[0].shape[1] \
if raw_data2 is not None else self.num_voxels
self.voxel_unit = voxel_unit
usable_cpus = usable_cpu_count()
if process_num is None:
self.process_num = usable_cpus
else:
self.process_num = np.min((process_num, usable_cpus))
if self.process_num == 0:
self.use_multiprocessing = False
else:
self.use_multiprocessing = True
self.master_rank = master_rank
if self.raw_data2 is not None \
and len(self.raw_data) != len(self.raw_data2):
raise ValueError('The raw data lists must have the same number '
'of elements for computing the correlations '
'element by element')
if self.num_voxels == 0 or self.num_voxels2 == 0:
raise ValueError('Zero processed voxels')
if MPI.COMM_WORLD.Get_size() == 1:
raise RuntimeError('one process cannot run the '
'master-worker model')
if self.master_rank >= MPI.COMM_WORLD.Get_size():
logger.warn('Master rank exceeds the number of '
'launched processes, set to 0')
self.master_rank = 0
# tags for MPI messages
_WORKTAG = 0
_TERMINATETAG = 1
def run(self, clf):
"""Run correlation-based voxel selection in master-worker model.
Sort the voxels based on the cross-validation accuracy
of their correlation vectors
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == self.master_rank:
results = self._master()
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
else:
self._worker(clf)
results = []
return results
def _master(self):
"""Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
logger.info(
'Master at rank %d starts to allocate tasks',
MPI.COMM_WORLD.Get_rank()
)
results = []
comm = MPI.COMM_WORLD
size = comm.Get_size()
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels \
else self.num_voxels
current_task = (0, sending_voxels)
status = MPI.Status()
# using_size is used when the number of tasks
# is smaller than the number of workers
using_size = size
for i in range(0, size):
if i == self.master_rank:
continue
if current_task[1] == 0:
using_size = i
break
logger.debug(
'master starts to send a task to worker %d' %
i
)
comm.send(current_task,
dest=i,
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
while using_size == size:
if current_task[1] == 0:
break
result = comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG,
status=status)
results += result
comm.send(current_task,
dest=status.Get_source(),
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
for i in range(0, using_size):
if i == self.master_rank:
continue
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
results += result
for i in range(0, size):
if i == self.master_rank:
continue
comm.send(None,
dest=i,
tag=self._TERMINATETAG)
return results
def _worker(self, clf):
"""Worker node's operation.
Receiving tasks from the master to process and sending the result back
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
None
"""
logger.debug(
'worker %d is running, waiting for tasks from master at rank %d' %
(MPI.COMM_WORLD.Get_rank(), self.master_rank)
)
comm = MPI.COMM_WORLD
status = MPI.Status()
while 1:
task = comm.recv(source=self.master_rank,
tag=MPI.ANY_TAG,
status=status)
if status.Get_tag():
break
comm.send(self._voxel_scoring(task, clf),
dest=self.master_rank)
def _correlation_computation(self, task):
"""Use BLAS API to do correlation computation (matrix multiplication).
Parameters
----------
task: tuple (start_voxel_id, num_processed_voxels)
depicting the voxels assigned to compute
Returns
-------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the correlation values of all subjects in all epochs
for the assigned values, in row-major
corr[i, e, s + j] = corr[j, e, s + i]
"""
time1 = time.time()
s = task[0]
nEpochs = len(self.raw_data)
logger.debug(
'start to compute the correlation: #epochs: %d, '
'#processed voxels: %d, #total voxels to compute against: %d' %
(nEpochs, task[1], self.num_voxels2)
)
corr = np.zeros((task[1], nEpochs, self.num_voxels2),
np.float32, order='C')
count = 0
for i in range(len(self.raw_data)):
mat = self.raw_data[i]
mat2 = self.raw_data2[i] if self.raw_data2 is not None else mat
no_trans = 'N'
trans = 'T'
blas.compute_self_corr_for_voxel_sel(no_trans, trans,
self.num_voxels2, task[1],
mat.shape[0], 1.0,
mat2, self.num_voxels2,
s, mat, self.num_voxels,
0.0, corr,
self.num_voxels2 * nEpochs,
count)
count += 1
time2 = time.time()
logger.debug(
'correlation computation for %d voxels, takes %.2f s' %
(task[1], (time2 - time1))
)
return corr
def _correlation_normalization(self, corr):
"""Do within-subject normalization.
This method uses scipy.zscore to normalize the data,
but is much slower than its C++ counterpart.
It is doing in-place z-score.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the correlation values of all subjects in all epochs
for the assigned values, in row-major
Returns
-------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
"""
time1 = time.time()
(sv, e, av) = corr.shape
for i in range(sv):
start = 0
while start < e:
cur_val = corr[i, start: start + self.epochs_per_subj, :]
cur_val = .5 * np.log((cur_val + 1) / (1 - cur_val))
corr[i, start: start + self.epochs_per_subj, :] = \
zscore(cur_val, axis=0, ddof=0)
start += self.epochs_per_subj
# if zscore fails (standard deviation is zero),
# set all values to be zero
corr = np.nan_to_num(corr)
time2 = time.time()
logger.debug(
'within-subject normalization for %d voxels '
'using numpy zscore function, takes %.2f s' %
(sv, (time2 - time1))
)
return corr
def _prepare_for_cross_validation(self, corr, clf):
"""Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
"""
time1 = time.time()
(num_processed_voxels, num_epochs, _) = corr.shape
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# kernel matrices should be computed
kernel_matrices = np.zeros((num_processed_voxels, num_epochs,
num_epochs),
np.float32, order='C')
for i in range(num_processed_voxels):
blas.compute_kernel_matrix('L', 'T',
num_epochs, self.num_voxels2,
1.0, corr,
i, self.num_voxels2,
0.0, kernel_matrices[i, :, :],
num_epochs)
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrices[i, 0, 0])))
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrices[i, :, :] *= proportion
data = kernel_matrices
else:
data = corr
time2 = time.time()
logger.debug(
'cross validation data preparation takes %.2f s' %
(time2 - time1)
)
return data
def _do_cross_validation(self, clf, data, task):
"""Run voxelwise cross validation based on correlation vectors.
clf: classification function
the classifier to be used in cross validation
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
task: tuple (start_voxel_id, num_processed_voxels)
depicting the voxels assigned to compute
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels
"""
time1 = time.time()
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\
and self.use_multiprocessing:
inlist = [(clf, i + task[0], self.num_folds, data[i, :, :],
self.labels) for i in range(task[1])]
with multiprocessing.Pool(self.process_num) as pool:
results = list(pool.starmap(_cross_validation_for_one_voxel,
inlist))
else:
results = []
for i in range(task[1]):
result = _cross_validation_for_one_voxel(clf, i + task[0],
self.num_folds,
data[i, :, :],
self.labels)
results.append(result)
time2 = time.time()
logger.debug(
'cross validation for %d voxels, takes %.2f s' %
(task[1], (time2 - time1))
)
return results
def _voxel_scoring(self, task, clf):
"""The voxel selection process done in the worker node.
Take the task in,
do analysis on voxels specified by the task (voxel id, num_voxels)
It is a three-stage pipeline consisting of:
1. correlation computation
2. within-subject normalization
3. voxelwise cross validation
Parameters
----------
task: tuple (start_voxel_id, num_processed_voxels),
depicting the voxels assigned to compute
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels
"""
time1 = time.time()
# correlation computation
corr = self._correlation_computation(task)
# normalization
# corr = self._correlation_normalization(corr)
time3 = time.time()
fcma_extension.normalization(corr, self.epochs_per_subj)
time4 = time.time()
logger.debug(
'within-subject normalization for %d voxels '
'using C++, takes %.2f s' %
(task[1], (time4 - time3))
)
# cross validation
data = self._prepare_for_cross_validation(corr, clf)
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# to save memory so that the process can be forked
del corr
results = self._do_cross_validation(clf, data, task)
time2 = time.time()
logger.info(
'in rank %d, task %d takes %.2f s' %
(MPI.COMM_WORLD.Get_rank(),
(int(task[0] / self.voxel_unit)), (time2 - time1))
)
return results
| 19,579 | 36.87234 | 79 | py |
brainiak | brainiak-master/brainiak/fcma/__init__.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full correlation matrix analysis
The implementation is based on the work in [Wang2015-1]_ and [Wang2015-2]_.
.. [Wang2015-1] Full correlation matrix analysis (FCMA): An unbiased method for
task-related functional connectivity",
Yida Wang, Jonathan D Cohen, Kai Li, Nicholas B Turk-Browne.
Journal of Neuroscience Methods, 2015.
.. [Wang2015-2] "Full correlation matrix analysis of fMRI data on Intel® Xeon
Phi™ coprocessors",
Yida Wang, Michael J. Anderson, Jonathan D. Cohen, Alexander Heinecke,
Kai Li, Nadathur Satish, Narayanan Sundaram, Nicholas B. Turk-Browne,
Theodore L. Willke.
In Proceedings of the International Conference for
High Performance Computing,
Networking, Storage and Analysis. 2015.
"""
| 1,338 | 40.84375 | 79 | py |
brainiak | brainiak-master/brainiak/funcalign/rsrm.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robust Shared Response Model (RSRM)
The implementation is based on the following publications:
.. [Turek2017] "Capturing Shared and Individual Information in fMRI Data",
J. Turek, C. Ellis, L. Skalaban, N. Turk-Browne, T. Willke
under review, 2017.
"""
# Authors: Javier Turek (Intel Labs), 2017
import logging
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
__all__ = [
"RSRM"
]
logger = logging.getLogger(__name__)
class RSRM(BaseEstimator, TransformerMixin):
"""Robust Shared Response Model (RSRM)
Given multi-subject data, factorize it as a shared response R among all
subjects, an orthogonal transform W per subject, and an individual
(outlying) sparse component S per subject:
.. math:: X_i \\approx W_i R + S_i, \\forall i=1 \\dots N
This unsupervised model allows to learn idiosyncratic information for
subjects and simultaneously improve the shared response estimation.
The model has similar properties to the Shared Response Model (SRM) with
the addition of the individual components.
The model is estimated solving the following optimization problem:
.. math::
\\min_{W_i, S_i, R}\\sum_i \\frac{1}{2}\\|X_i - W_i R - S_i\\|_F^2
.. math:: + \\gamma\\|S_i\\|_1
.. math:: s.t. \\qquad W_i^TW_i = I \\quad \\forall i=1 \\dots N
The solution to this problem is obtained by applying a Block-Coordinate
Descent procedure. More details can be found in [Turek2017]_.
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components. Higher values yield sparser individual components.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
r_ : array, shape=[features, timepoints]
The shared response.
s_ : list of array, element i has shape=[voxels_i, timepoints]
The individual components for each subject.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of timepoints for the alignment data must be the same across
subjects.
The Robust Shared Response Model is approximated using the
Block-Coordinate Descent (BCD) algorithm proposed in [Turek2017]_.
This is a single node version.
"""
def __init__(self, n_iter=10, features=50, gamma=1.0, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.gamma = gamma
self.rand_seed = rand_seed
def fit(self, X):
"""Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject.
"""
logger.info('Starting RSRM')
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough timepoints to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment timepoints "
"between subjects.")
# Create a new random state
self.random_state_ = np.random.RandomState(self.rand_seed)
# Run RSRM
self.w_, self.r_, self.s_ = self._rsrm(X)
return self
def transform(self, X):
"""Use the model to transform new data to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Each element in the list contains the fMRI data of one subject.
Returns
-------
r : list of 2D arrays, element i has shape=[features_i, timepoints_i]
Shared responses from input data (X)
s : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Individual data obtained from fitting model to input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
r = [None] * len(X)
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
r[subject], s[subject] = self._transform_new_data(X[subject],
subject)
return r, s
def _transform_new_data(self, X, subject):
"""Transform new data for a subjects by projecting to the shared subspace and
computing the individual information.
Parameters
----------
X : array, shape=[voxels, timepoints]
The fMRI data of the subject.
subject : int
The subject id.
Returns
-------
R : array, shape=[features, timepoints]
Shared response from input data (X)
S : array, shape=[voxels, timepoints]
Individual data obtained from fitting model to input data (X)
"""
S = np.zeros_like(X)
R = None
for i in range(self.n_iter):
R = self.w_[subject].T.dot(X - S)
S = self._shrink(X - self.w_[subject].dot(R), self.gamma)
return R, S
def transform_subject(self, X):
"""Transform a new subject using the existing model
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
s : 2D array, shape=[voxels, timepoints]
Individual term `S_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.r_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
s = np.zeros_like(X)
for i in range(self.n_iter):
w = self._update_transform_subject(X, s, self.r_)
s = self._shrink(X - w.dot(self.r_), self.gamma)
return w, s
def _rsrm(self, X):
"""Block-Coordinate Descent algorithm for fitting RSRM.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
"""
subjs = len(X)
voxels = [X[i].shape[0] for i in range(subjs)]
TRs = X[0].shape[1]
features = self.features
# Initialization
W = self._init_transforms(subjs, voxels, features, self.random_state_)
S = self._init_individual(subjs, voxels, TRs)
R = self._update_shared_response(X, S, W, features)
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(X, W, R, S, self.gamma)
logger.info('Objective function %f' % objective)
# Main loop
for i in range(self.n_iter):
W = self._update_transforms(X, S, R)
S = self._update_individual(X, W, R, self.gamma)
R = self._update_shared_response(X, S, W, features)
# Print objective function every iteration
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(X, W, R, S, self.gamma)
logger.info('Objective function %f' % objective)
return W, R, S
def _init_transforms(self, subjs, voxels, features, random_state):
"""Initialize the mappings (Wi) with random orthogonal matrices.
Parameters
----------
subjs : int
The number of subjects.
voxels : list of int
A list with the number of voxels per subject.
features : int
The number of features in the model.
random_state : `RandomState`
A random state to draw the mappings.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for
each subject.
Note
----
Not thread safe.
"""
# Init the Random seed generator
np.random.seed(self.rand_seed)
# Draw a random W for each subject
W = [random_state.random_sample((voxels[i], features))
for i in range(subjs)]
# Make it orthogonal it with QR decomposition
for i in range(subjs):
W[i], _ = np.linalg.qr(W[i])
return W
@staticmethod
def _objective_function(X, W, R, S, gamma):
"""Evaluate the objective function.
.. math:: \\sum_{i=1}^{N} 1/2 \\| X_i - W_i R - S_i \\|_F^2
.. math:: + /\\gamma * \\|S_i\\|_1
Parameters
----------
X : list of array, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
func : float
The RSRM objective function evaluated on the parameters to this
function.
"""
subjs = len(X)
func = .0
for i in range(subjs):
func += 0.5 * np.sum((X[i] - W[i].dot(R) - S[i])**2) \
+ gamma * np.sum(np.abs(S[i]))
return func
@staticmethod
def _update_individual(X, W, R, gamma):
"""Update the individual components `S_i`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
"""
subjs = len(X)
S = []
for i in range(subjs):
S.append(RSRM._shrink(X[i] - W[i].dot(R), gamma))
return S
@staticmethod
def _init_individual(subjs, voxels, TRs):
"""Initializes the individual components `S_i` to empty (all zeros).
Parameters
----------
subjs : int
The number of subjects.
voxels : list of int
A list with the number of voxels per subject.
TRs : int
The number of timepoints in the data.
Returns
-------
S : list of 2D array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject initialized
to zero.
"""
return [np.zeros((voxels[i], TRs)) for i in range(subjs)]
@staticmethod
def _update_shared_response(X, S, W, features):
"""Update the shared response `R`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
features : int
The number of features in the model.
Returns
-------
R : array, shape=[features, timepoints]
The updated shared response.
"""
subjs = len(X)
TRs = X[0].shape[1]
R = np.zeros((features, TRs))
# Project the subject data with the individual component removed into
# the shared subspace and average over all subjects.
for i in range(subjs):
R += W[i].T.dot(X[i]-S[i])
R /= subjs
return R
@staticmethod
def _update_transform_subject(Xi, Si, R):
"""Updates the mappings `W_i` for one subject.
Parameters
----------
Xi : array, shape=[voxels, timepoints]
The fMRI data :math:`X_i` for aligning the subject.
Si : array, shape=[voxels, timepoints]
The individual component :math:`S_i` for the subject.
R : array, shape=[features, timepoints]
The shared response.
Returns
-------
Wi : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for the subject.
"""
A = Xi.dot(R.T)
A -= Si.dot(R.T)
# Solve the Procrustes problem
U, _, V = np.linalg.svd(A, full_matrices=False)
return U.dot(V)
@staticmethod
def _update_transforms(X, S, R):
"""Updates the mappings `W_i` for each subject.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.ß
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
"""
subjs = len(X)
W = []
for i in range(subjs):
W.append(RSRM._update_transform_subject(X[i], S[i], R))
return W
@staticmethod
def _shrink(v, gamma):
"""Soft-shrinkage of an array with parameter gamma.
Parameters
----------
v : array
Array containing the values to be applied to the shrinkage operator
gamma : float
Shrinkage parameter.
Returns
-------
v : array
The same input array after the shrinkage operator was applied.
"""
pos = v > gamma
neg = v < -gamma
v[pos] -= gamma
v[neg] += gamma
v[np.logical_and(~pos, ~neg)] = .0
return v
| 17,711 | 30.516014 | 85 | py |
brainiak | brainiak-master/brainiak/funcalign/sssrm.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semi-Supervised Shared Response Model (SS-SRM)
The implementations are based on the following publications:
.. [Turek2016] "A Semi-Supervised Method for Multi-Subject fMRI Functional
Alignment",
J. S. Turek, T. L. Willke, P.-H. Chen, P. J. Ramadge
IEEE International Conference on Acoustics, Speech and Signal Processing
(ICASSP), 2017, pp. 1098-1102.
https://doi.org/10.1109/ICASSP.2017.7952326
"""
# Authors: Javier Turek (Intel Labs), 2016
import logging
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
from sklearn.utils.multiclass import unique_labels
import theano
import theano.tensor as T
import theano.compile.sharedvalue as S
from pymanopt.manifolds import Euclidean
from pymanopt.manifolds import Product
from pymanopt.solvers import ConjugateGradient
from pymanopt import Problem
from pymanopt.manifolds import Stiefel
import pymanopt
import gc
from brainiak.utils import utils
from brainiak.funcalign import srm
__all__ = [
"SSSRM",
]
logger = logging.getLogger(__name__)
# FIXME workaround for Theano failure on macOS Conda builds
# https://travis-ci.org/github/brainiak/brainiak/jobs/689445834#L1414
# Inspired by workaround from PyMC3
# https://github.com/pymc-devs/pymc3/pull/3767
theano.config.gcc.cxxflags = "-Wno-c++11-narrowing"
# FIXME workaround for pymanopt only working with tensorflow 1.
# We don't use pymanopt+TF so we just let pymanopt pretend TF doesn't exist.
pymanopt.tools.autodiff._tensorflow.tf = None
class SSSRM(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Semi-Supervised Shared Response Model (SS-SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject, using also labeled
data to train a Multinomial Logistic Regression (MLR) classifier (with
l2 regularization) in a semi-supervised manner:
.. math::
(1-\\alpha) Loss_{SRM}(W_i,S;X_i)
+ \\alpha/\\gamma Loss_{MLR}(\\theta, bias; {(W_i^T \\times Z_i, y_i})
+ R(\\theta)
:label: sssrm-eq
(see Equations (1) and (4) in [Turek2016]_).
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
gamma : float, default: 1.0
Regularization parameter for the classifier.
alpha : float, default: 0.5
Balance parameter between the SRM term and the MLR term.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
theta_ : array, shape=[classes, features]
The MLR class plane parameters.
bias_ : array, shape=[classes]
The MLR class biases.
classes_ : array of int, shape=[classes]
Mapping table for each classes to original class label.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples for the alignment data must be the same across
subjects. The number of labeled samples per subject can be different.
The Semi-Supervised Shared Response Model is approximated using the
Block-Coordinate Descent (BCD) algorithm proposed in [Turek2016]_.
This is a single node version.
"""
def __init__(self, n_iter=10, features=50, gamma=1.0, alpha=0.5,
rand_seed=0):
self.n_iter = n_iter
self.features = features
self.gamma = gamma
self.alpha = alpha
self.rand_seed = rand_seed
return
def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self
def _init_classes(self, y):
"""Map all possible classes to the range [0,..,C-1]
Parameters
----------
y : list of arrays of int, each element has shape=[samples_i,]
Labels of the samples for each subject
Returns
-------
new_y : list of arrays of int, each element has shape=[samples_i,]
Mapped labels of the samples for each subject
Note
----
The mapping of the classes is saved in the attribute classes_.
"""
self.classes_ = unique_labels(utils.concatenate_not_none(y))
new_y = [None] * len(y)
for s in range(len(y)):
new_y[s] = np.digitize(y[s], self.classes_) - 1
return new_y
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects.
y : not used as it only applies the mappings
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def predict(self, X):
"""Classify the output for given data
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
The number of voxels should be according to each subject at
the moment of training the model.
Returns
-------
p: list of arrays, element i has shape=[samples_i]
Predictions for each data sample.
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
X_shared = self.transform(X)
p = [None] * len(X_shared)
for subject in range(len(X_shared)):
sumexp, _, exponents = utils.sumexp_stable(
self.theta_.T.dot(X_shared[subject]) + self.bias_)
p[subject] = self.classes_[
(exponents / sumexp[np.newaxis, :]).argmax(axis=0)]
return p
def _sssrm(self, data_align, data_sup, labels):
"""Block-Coordinate Descent algorithm for fitting SS-SRM.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
classes = self.classes_.size
# Initialization:
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data_align))]
# Set Wi's to a random orthogonal voxels by TRs
w, _ = srm._init_w_transforms(data_align, self.features, random_states)
# Initialize the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Initialize theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# calculate and print the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup, labels,
w, s, theta, bias)
logger.info('Objective function %f' % objective)
# Main loop:
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update the mappings Wi
w = self._update_w(data_align, data_sup, labels, w, s, theta, bias)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating Wi %f'
% objective)
# Update the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating S %f'
% objective)
# Update the MLR classifier, theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating MLR %f'
% objective)
return w, s, theta, bias
def _update_classifier(self, data, labels, w, classes):
"""Update the classifier parameters theta and bias
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of 2D array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
classes : int
The number of classes in the classifier.
Returns
-------
theta : array, shape=[features, classes]
The MLR parameter for the class planes.
bias : array shape=[classes,]
The MLR parameter for class biases.
"""
# Stack the data and labels for training the classifier
data_stacked, labels_stacked, weights = \
SSSRM._stack_list(data, labels, w)
features = w[0].shape[1]
total_samples = weights.size
data_th = S.shared(data_stacked.astype(theano.config.floatX))
val_ = S.shared(labels_stacked)
total_samples_S = S.shared(total_samples)
theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
bias_th = T.col(name='bias', dtype=theano.config.floatX)
constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
weights_th = S.shared(weights)
log_p_y_given_x = \
T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_])
/ weights_th) + 0.5 * T.sum(theta_th ** 2)
manifold = Product((Euclidean(features, classes),
Euclidean(classes, 1)))
problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th],
verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-6)
solution = solver.solve(problem)
theta = solution[0]
bias = solution[1]
del constf2
del theta_th
del bias_th
del data_th
del val_
del solver
del solution
return theta, bias
def _update_w(self, data_align, data_sup, labels, w, s, theta, bias):
"""
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
w : list of 2D array, element i has shape=[voxels_i, features]
The updated orthogonal transforms (mappings).
"""
subjects = len(data_align)
s_th = S.shared(s.astype(theano.config.floatX))
theta_th = S.shared(theta.T.astype(theano.config.floatX))
bias_th = S.shared(bias.T.astype(theano.config.floatX),
broadcastable=(True, False))
for subject in range(subjects):
logger.info('Subject Wi %d' % subject)
# Solve for subject i
# Create the theano function
w_th = T.matrix(name='W', dtype=theano.config.floatX)
data_srm_subject = \
S.shared(data_align[subject].astype(theano.config.floatX))
constf1 = \
S.shared((1 - self.alpha) * 0.5 / data_align[subject].shape[1],
allow_downcast=True)
f1 = constf1 * T.sum((data_srm_subject - w_th.dot(s_th))**2)
if data_sup[subject] is not None:
lr_samples_S = S.shared(data_sup[subject].shape[1])
data_sup_subject = \
S.shared(data_sup[subject].astype(theano.config.floatX))
labels_S = S.shared(labels[subject])
constf2 = S.shared(-self.alpha / self.gamma
/ data_sup[subject].shape[1],
allow_downcast=True)
log_p_y_given_x = T.log(T.nnet.softmax((theta_th.dot(
w_th.T.dot(data_sup_subject))).T + bias_th))
f2 = constf2 * T.sum(
log_p_y_given_x[T.arange(lr_samples_S), labels_S])
f = f1 + f2
else:
f = f1
# Define the problem and solve
f_subject = self._objective_function_subject(data_align[subject],
data_sup[subject],
labels[subject],
w[subject],
s, theta, bias)
minstep = np.amin(((10**-np.floor(np.log10(f_subject))), 1e-1))
manifold = Stiefel(w[subject].shape[0], w[subject].shape[1])
problem = Problem(manifold=manifold, cost=f, arg=w_th, verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-2, minstepsize=minstep)
w[subject] = np.array(solver.solve(
problem, x=w[subject].astype(theano.config.floatX)))
if data_sup[subject] is not None:
del f2
del log_p_y_given_x
del data_sup_subject
del labels_S
del solver
del problem
del manifold
del f
del f1
del data_srm_subject
del w_th
del theta_th
del bias_th
del s_th
# Run garbage collector to avoid filling up the memory
gc.collect()
return w
@staticmethod
def _compute_shared_response(data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
def _objective_function(self, data_align, data_sup, labels, w, s, theta,
bias):
"""Compute the objective function of the Semi-Supervised SRM
See :eq:`sssrm-eq`.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function evaluated based on the parameters to
this function.
"""
subjects = len(data_align)
# Compute the SRM loss
f_val = 0.0
for subject in range(subjects):
samples = data_align[subject].shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align[subject] - w[subject].dot(s),
'fro')**2
# Compute the MLR loss
f_val += self._loss_lr(data_sup, labels, w, theta, bias)
return f_val
def _objective_function_subject(self, data_align, data_sup, labels, w, s,
theta, bias):
"""Compute the objective function for one subject.
.. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i)
.. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i})
.. math:: + R(\\theta)
Parameters
----------
data_align : 2D array, shape=[voxels_i, samples_align]
Contains the fMRI data for alignment of subject i.
data_sup : 2D array, shape=[voxels_i, samples_i]
Contains the fMRI data of one subject for the classification task.
labels : array of int, shape=[samples_i]
The labels for the data samples in data_sup.
w : array, shape=[voxels_i, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function for subject i evaluated on the
parameters to this function.
"""
# Compute the SRM loss
f_val = 0.0
samples = data_align.shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align - w.dot(s), 'fro')**2
# Compute the MLR loss
f_val += self._loss_lr_subject(data_sup, labels, w, theta, bias)
return f_val
def _loss_lr_subject(self, data, labels, w, theta, bias):
"""Compute the Loss MLR for a single subject (without regularization)
Parameters
----------
data : array, shape=[voxels, samples]
The fMRI data of subject i for the classification task.
labels : array of int, shape=[samples]
The labels for the data samples in data.
w : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the subject
"""
if data is None:
return 0.0
samples = data.shape[1]
thetaT_wi_zi_plus_bias = theta.T.dot(w.T.dot(data)) + bias
sum_exp, max_value, _ = utils.sumexp_stable(thetaT_wi_zi_plus_bias)
sum_exp_values = np.log(sum_exp) + max_value
aux = 0.0
for sample in range(samples):
label = labels[sample]
aux += thetaT_wi_zi_plus_bias[label, sample]
return self.alpha / samples / self.gamma * (sum_exp_values.sum() - aux)
def _loss_lr(self, data, labels, w, theta, bias):
"""Compute the Loss MLR (with the regularization)
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the SS-SRM model
"""
subjects = len(data)
loss = 0.0
for subject in range(subjects):
if labels[subject] is not None:
loss += self._loss_lr_subject(data[subject], labels[subject],
w[subject], theta, bias)
return loss + 0.5 * np.linalg.norm(theta, 'fro')**2
@staticmethod
def _stack_list(data, data_labels, w):
"""Construct a numpy array by stacking arrays in a list
Parameter
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
data_labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
data_stacked : 2D array, shape=[samples, features]
The data samples from all subjects are stacked into a single
2D array, where "samples" is the sum of samples_i.
labels_stacked : array, shape=[samples,]
The labels from all subjects are stacked into a single
array, where "samples" is the sum of samples_i.
weights : array, shape=[samples,]
The number of samples of the subject that are related to that
sample. They become a weight per sample in the MLR loss.
"""
labels_stacked = utils.concatenate_not_none(data_labels)
weights = np.empty((labels_stacked.size,))
data_shared = [None] * len(data)
curr_samples = 0
for s in range(len(data)):
if data[s] is not None:
subject_samples = data[s].shape[1]
curr_samples_end = curr_samples + subject_samples
weights[curr_samples:curr_samples_end] = subject_samples
data_shared[s] = w[s].T.dot(data[s])
curr_samples += data[s].shape[1]
data_stacked = utils.concatenate_not_none(data_shared, axis=1).T
return data_stacked, labels_stacked, weights
| 29,758 | 34.72509 | 79 | py |
brainiak | brainiak-master/brainiak/funcalign/srm.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Response Model (SRM)
The implementations are based on the following publications:
.. [Chen2015] "A Reduced-Dimension fMRI Shared Response Model",
P.-H. Chen, J. Chen, Y. Yeshurun-Dishon, U. Hasson, J. Haxby, P. Ramadge
Advances in Neural Information Processing Systems (NIPS), 2015.
http://papers.nips.cc/paper/5855-a-reduced-dimension-fmri-shared-response-model
.. [Anderson2016] "Enabling Factor Analysis on Thousand-Subject Neuroimaging
Datasets",
Michael J. Anderson, Mihai Capotă, Javier S. Turek, Xia Zhu, Theodore L.
Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning, Peter J. Ramadge,
Kenneth A. Norman,
IEEE International Conference on Big Data, 2016.
https://doi.org/10.1109/BigData.2016.7840719
"""
# Authors: Po-Hsuan Chen (Princeton Neuroscience Institute) and Javier Turek
# (Intel Labs), 2015
import logging
import numpy as np
import scipy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.exceptions import NotFittedError
from mpi4py import MPI
import sys
__all__ = [
"DetSRM",
"SRM",
]
logger = logging.getLogger(__name__)
def _init_w_transforms(data, features, random_states, comm=MPI.COMM_SELF):
"""Initialize the mappings (Wi) for the SRM with random orthogonal matrices.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
features : int
The number of features in the model.
random_states : list of `RandomState`s
One `RandomState` instance per subject.
comm : mpi4py.MPI.Intracomm
The MPI communicator containing the data
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for each
subject.
voxels : list of int
A list with the number of voxels per subject.
Note
----
This function assumes that the numpy random number generator was
initialized.
Not thread safe.
"""
w = []
subjects = len(data)
voxels = np.empty(subjects, dtype=int)
# Set Wi to a random orthogonal voxels by features matrix
for subject in range(subjects):
if data[subject] is not None:
voxels[subject] = data[subject].shape[0]
rnd_matrix = random_states[subject].random_sample((
voxels[subject], features))
q, r = np.linalg.qr(rnd_matrix)
w.append(q)
else:
voxels[subject] = 0
w.append(None)
voxels = comm.allreduce(voxels, op=MPI.SUM)
return w, voxels
def load(file):
"""Load fitted SRM from .npz file.
Parameters
----------
file : str, file-like object, or pathlib.Path
The .npz file to read containing fitted SRM saved using srm.save
Returns
--------
srm : fitted SRM model
"""
# Load file and extract SRM attributes
loaded = np.load(file)
w_ = [s for s in loaded['w_']]
s_ = loaded['s_']
sigma_s_ = loaded['sigma_s_']
mu_ = [s for s in loaded['mu_']]
rho2_ = loaded['rho2_']
features, n_iter, rand_seed = loaded['kwargs']
# Initialize new SRM object and attach loaded attributes
srm = SRM(n_iter=n_iter, features=features, rand_seed=rand_seed)
srm.w_ = w_
srm.s_ = s_
srm.sigma_s_ = sigma_s_
srm.mu_ = mu_
srm.rho2_ = rho2_
return srm
class SRM(BaseEstimator, TransformerMixin):
"""Probabilistic Shared Response Model (SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
comm : mpi4py.MPI.Intracomm
The MPI communicator containing the data
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
sigma_s_ : array, shape=[features, features]
The covariance of the shared response Normal distribution.
mu_ : list of array, element i has shape=[voxels_i]
The voxel means over the samples for each subject.
rho2_ : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
comm : mpi4py.MPI.Intracomm
The MPI communicator containing the data
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The probabilistic Shared Response Model is approximated using the
Expectation Maximization (EM) algorithm proposed in [Chen2015]_. The
implementation follows the optimizations published in [Anderson2016]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2 + K^3))` and the
memory complexity is :math:`O(V T)` with I - the number of iterations,
V - the sum of voxels from all subjects, T - the number of samples, and
K - the number of features (typically, :math:`V \\gg T \\gg K`).
"""
def __init__(self, n_iter=10, features=50, rand_seed=0,
comm=MPI.COMM_SELF):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
self.comm = comm
return
def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
number_subjects = len(X)
number_subjects_vec = self.comm.allgather(number_subjects)
for rank in range(self.comm.Get_size()):
if number_subjects_vec[rank] != number_subjects:
raise ValueError(
"Not all ranks have same number of subjects")
# Collect size information
shape0 = np.zeros((number_subjects,), dtype=np.int)
shape1 = np.zeros((number_subjects,), dtype=np.int)
for subject in range(number_subjects):
if X[subject] is not None:
assert_all_finite(X[subject])
shape0[subject] = X[subject].shape[0]
shape1[subject] = X[subject].shape[1]
shape0 = self.comm.allreduce(shape0, op=MPI.SUM)
shape1 = self.comm.allreduce(shape1, op=MPI.SUM)
# Check if all subjects have same number of TRs
number_trs = np.min(shape1)
for subject in range(number_subjects):
if shape1[subject] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
if shape1[subject] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
rho2[subject] = 1
if data[subject] is not None:
mu.append(np.mean(data[subject], 1))
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
else:
mu.append(None)
trace_xtx[subject] = 0
x.append(None)
return x, mu, rho2, trace_xtx
def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood
@staticmethod
def _update_transform_subject(Xi, S):
"""Updates the mappings `W_i` for one subject.
Parameters
----------
Xi : array, shape=[voxels, timepoints]
The fMRI data :math:`X_i` for aligning the subject.
S : array, shape=[features, timepoints]
The shared response.
Returns
-------
Wi : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for the subject.
"""
A = Xi.dot(S.T)
# Solve the Procrustes problem
U, _, V = np.linalg.svd(A, full_matrices=False)
return U.dot(V)
def transform_subject(self, X):
"""Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.s_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
w = self._update_transform_subject(X, self.s_)
return w
def save(self, file):
"""Save fitted SRM to .npz file.
Parameters
----------
file : str, file-like object, or pathlib.Path
Filename (string), open file (file-like object) or pathlib.Path
where the fitted SRM will be saved. If file is a string or a Path,
the .npz extension will be appended to the filename if it is not
already there.
Returns
-------
None
"""
# Check if the model has been estimated
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
np.savez_compressed(
file,
w_=self.w_,
s_=self.s_,
sigma_s_=self.sigma_s_,
mu_=self.mu_,
rho2_=self.rho2_,
kwargs=np.array([self.features, self.n_iter, self.rand_seed])
)
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
local_min = min([d.shape[1] for d in data if d is not None],
default=sys.maxsize)
samples = self.comm.allreduce(local_min, op=MPI.MIN)
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2 ** 32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features, random_states,
self.comm)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
rank = self.comm.Get_rank()
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
if rank == 0:
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
chol_sigma_s_rhos, lower_sigma_s_rhos = \
scipy.linalg.cho_factor(sigma_s_rhos,
check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
if data[subject] is not None:
wt_invpsi_x += (w[subject].T.dot(x[subject])) \
/ rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
wt_invpsi_x = self.comm.reduce(wt_invpsi_x, op=MPI.SUM)
trace_xt_invsigma2_x = self.comm.reduce(trace_xt_invsigma2_x,
op=MPI.SUM)
trace_sigma_s = None
if rank == 0:
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
shared_response = self.comm.bcast(shared_response)
trace_sigma_s = self.comm.bcast(trace_sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
if x[subject] is not None:
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
else:
rho2[subject] = 0
rho2 = self.comm.allreduce(rho2, op=MPI.SUM)
if rank == 0:
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
sigma_s = self.comm.bcast(sigma_s)
return sigma_s, w, mu, rho2, shared_response
class DetSRM(BaseEstimator, TransformerMixin):
"""Deterministic Shared Response Model (DetSRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
random_state_: `RandomState`
Random number generator initialized using rand_seed
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The Deterministic Shared Response Model is approximated using the
Block Coordinate Descent (BCD) algorithm proposed in [Chen2015]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2))` and the memory
complexity is :math:`O(V T)` with I - the number of iterations, V - the
sum of voxels from all subjects, T - the number of samples, K - the
number of features (typically, :math:`V \\gg T \\gg K`), and N - the
number of subjects.
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform data to the Shared Response subspace
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject.
y : not used
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro') ** 2
return objective * 0.5 / data[0].shape[1]
def _compute_shared_response(self, data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
@staticmethod
def _update_transform_subject(Xi, S):
"""Updates the mappings `W_i` for one subject.
Parameters
----------
Xi : array, shape=[voxels, timepoints]
The fMRI data :math:`X_i` for aligning the subject.
S : array, shape=[features, timepoints]
The shared response.
Returns
-------
Wi : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for the subject.
"""
A = Xi.dot(S.T)
# Solve the Procrustes problem
U, _, V = np.linalg.svd(A, full_matrices=False)
return U.dot(V)
def transform_subject(self, X):
"""Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.s_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
w = self._update_transform_subject(X, self.s_)
return w
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2 ** 32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features, random_states)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response
| 30,738 | 32.631291 | 82 | py |
brainiak | brainiak-master/brainiak/funcalign/__init__.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional alignment of volumes from different subjects."""
| 655 | 42.733333 | 75 | py |
brainiak | brainiak-master/brainiak/funcalign/fastsrm.py | """Fast Shared Response Model (FastSRM)
The implementation is based on the following publications:
.. [Richard2019] "Fast Shared Response Model for fMRI data"
H. Richard, L. Martin, A. Pinho, J. Pillow, B. Thirion, 2019
https://arxiv.org/pdf/1909.12537.pdf
"""
# Author: Hugo Richard
import hashlib
import logging
import os
import numpy as np
import scipy
from joblib import Parallel, delayed
from brainiak.funcalign.srm import DetSRM
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
import uuid
__all__ = [
"FastSRM",
]
logger = logging.getLogger(__name__)
def get_shape(path):
"""Get shape of saved np array
Parameters
----------
path: str
path to np array
"""
f = open(path, "rb")
version = np.lib.format.read_magic(f)
shape, fortran_order, dtype = np.lib.format._read_array_header(f, version)
f.close()
return shape
def safe_load(data):
"""If data is an array returns data else returns np.load(data)"""
if isinstance(data, np.ndarray):
return data
else:
return np.load(data)
def safe_encode(img):
if isinstance(img, np.ndarray):
name = hashlib.md5(img.tostring()).hexdigest()
else:
name = hashlib.md5(img.encode()).hexdigest()
return name
def assert_non_empty_list(input_list, list_name):
"""
Check that input list is not empty
Parameters
----------
input_list: list
list_name: str
Name of the list
"""
if len(input_list) == 0:
raise ValueError("%s is a list of length 0 which is not valid" %
list_name)
def assert_array_2axis(array, name_array):
"""Check that input is an np array with 2 axes
Parameters
----------
array: np array
name_array: str
Name of the array
"""
if not isinstance(array, np.ndarray):
raise ValueError("%s should be of type "
"np.ndarray but is of type %s" %
(name_array, type(array)))
if len(array.shape) != 2:
raise ValueError("%s must have exactly 2 axes "
"but has %i axes" % (name_array, len(array.shape)))
def assert_valid_index(indexes, max_value, name_indexes):
"""
Check that indexes are between 0 and max_value and number
of indexes is less than max_value
"""
for i, ind_i in enumerate(indexes):
if ind_i < 0 or ind_i >= max_value:
raise ValueError("Index %i of %s has value %i "
"whereas value should be between 0 and %i" %
(i, name_indexes, ind_i, max_value - 1))
def _check_imgs_list(imgs):
"""
Checks that imgs is a non empty list of elements of the same type
Parameters
----------
imgs : list
"""
# Check the list is non empty
assert_non_empty_list(imgs, "imgs")
# Check that all input have same type
for i in range(len(imgs)):
if not isinstance(imgs[i], type(imgs[0])):
raise ValueError("imgs[%i] has type %s whereas "
"imgs[%i] has type %s. "
"This is inconsistent." %
(i, type(imgs[i]), 0, type(imgs[0])))
def _check_imgs_list_list(imgs):
"""
Check input images if they are list of list of arrays
Parameters
----------
imgs : list of list of array of shape [n_voxels, n_components]
imgs is a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes: array
Shape of input images
"""
n_subjects = len(imgs)
# Check that the number of session is not 0
assert_non_empty_list(imgs[0], "imgs[%i]" % 0)
# Check that the number of sessions is the same for all subjects
n_sessions = None
for i in range(len(imgs)):
if n_sessions is None:
n_sessions = len(imgs[i])
if n_sessions != len(imgs[i]):
raise ValueError("imgs[%i] has length %i whereas imgs[%i] "
"has length %i. All subjects should have "
"the same number of sessions." %
(i, len(imgs[i]), 0, len(imgs[0])))
shapes = np.zeros((n_subjects, n_sessions, 2))
# Run array-level checks
for i in range(len(imgs)):
for j in range(len(imgs[i])):
assert_array_2axis(imgs[i][j], "imgs[%i][%i]" % (i, j))
shapes[i, j, :] = imgs[i][j].shape
return shapes
def _check_imgs_list_array(imgs):
"""
Check input images if they are list of arrays.
In this case returned images are a list of list of arrays
where element i,j of the array is a numpy array of
shape [n_voxels, n_timeframes] that contains the data of subject i
collected during session j.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
imgs is a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes: array
Shape of input images
new_imgs: list of list of array of shape [n_voxels, n_components]
"""
n_subjects = len(imgs)
n_sessions = 1
shapes = np.zeros((n_subjects, n_sessions, 2))
new_imgs = []
for i in range(len(imgs)):
assert_array_2axis(imgs[i], "imgs[%i]" % i)
shapes[i, 0, :] = imgs[i].shape
new_imgs.append([imgs[i]])
return new_imgs, shapes
def _check_imgs_array(imgs):
"""Check input image if it is an array
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes : array
Shape of input images
"""
assert_array_2axis(imgs, "imgs")
n_subjects, n_sessions = imgs.shape
shapes = np.zeros((n_subjects, n_sessions, 2))
for i in range(n_subjects):
for j in range(n_sessions):
if not (isinstance(imgs[i, j], str) or isinstance(
imgs[i, j], np.str_) or isinstance(imgs[i, j], np.str)):
raise ValueError("imgs[%i, %i] is stored using "
"type %s which is not a str" %
(i, j, type(imgs[i, j])))
shapes[i, j, :] = get_shape(imgs[i, j])
return shapes
def _check_shapes_components(n_components, n_timeframes):
"""Check that n_timeframes is greater than number of components"""
def _check_shapes_atlas_compatibility(n_voxels,
n_timeframes,
n_components=None,
atlas_shape=None):
if n_components is not None:
if np.sum(n_timeframes) < n_components:
raise ValueError("Total number of timeframes is shorter than "
"number of components (%i < %i)" %
(np.sum(n_timeframes), n_components))
if atlas_shape is not None:
n_supervoxels, n_atlas_voxels = atlas_shape
if n_atlas_voxels != n_voxels:
raise ValueError(
"Number of voxels in the atlas is not the same "
"as the number of voxels in input data (%i != %i)" %
(n_atlas_voxels, n_voxels))
def _check_shapes(shapes,
n_components=None,
atlas_shape=None,
ignore_nsubjects=False):
"""Check that number of voxels is the same for each subjects. Number of
timeframes can vary between sessions but must be consistent across
subjects
Parameters
----------
shapes : array of shape (n_subjects, n_sessions, 2)
Array of shapes of input images
"""
n_subjects, n_sessions, _ = shapes.shape
if n_subjects <= 1 and not ignore_nsubjects:
raise ValueError("The number of subjects should be greater than 1")
n_timeframes_list = [None] * n_sessions
n_voxels = None
for n in range(n_subjects):
for m in range(n_sessions):
if n_timeframes_list[m] is None:
n_timeframes_list[m] = shapes[n, m, 1]
if n_voxels is None:
n_voxels = shapes[m, n, 0]
if n_timeframes_list[m] != shapes[n, m, 1]:
raise ValueError("Subject %i Session %i does not have the "
"same number of timeframes "
"as Subject %i Session %i" % (n, m, 0, m))
if n_voxels != shapes[n, m, 0]:
raise ValueError("Subject %i Session %i"
" does not have the same number of voxels as "
"Subject %i Session %i." % (n, m, 0, 0))
_check_shapes_atlas_compatibility(n_voxels, np.sum(n_timeframes_list),
n_components, atlas_shape)
def check_atlas(atlas, n_components=None):
""" Check input atlas
Parameters
----------
atlas : array, shape=[n_supervoxels, n_voxels] or array, shape=[n_voxels]
or str or None
Probabilistic or deterministic atlas on which to project the data
Deterministic atlas is an array of shape [n_voxels,] where values
range from 1 to n_supervoxels. Voxels labelled 0 will be ignored.
If atlas is a str the corresponding array is loaded with numpy.load
and expected shape is (n_voxels,) for a deterministic atlas and
(n_supervoxels, n_voxels) for a probabilistic atlas.
n_components : int
Number of timecourses of the shared coordinates
Returns
-------
shape : array or None
atlas shape
"""
if atlas is None:
return None
if not (isinstance(atlas, np.ndarray) or isinstance(atlas, str)
or isinstance(atlas, np.str_) or isinstance(atlas, np.str)):
raise ValueError("Atlas is stored using "
"type %s which is neither np.ndarray or str" %
type(atlas))
if isinstance(atlas, np.ndarray):
shape = atlas.shape
else:
shape = get_shape(atlas)
if len(shape) == 1:
# We have a deterministic atlas
atlas_array = safe_load(atlas)
n_voxels = atlas_array.shape[0]
n_supervoxels = len(np.unique(atlas_array)) - 1
shape = (n_supervoxels, n_voxels)
elif len(shape) != 2:
raise ValueError(
"Atlas has %i axes. It should have either 1 or 2 axes." %
len(shape))
n_supervoxels, n_voxels = shape
if n_supervoxels > n_voxels:
raise ValueError("Number of regions in the atlas is bigger than "
"the number of voxels (%i > %i)" %
(n_supervoxels, n_voxels))
if n_components is not None:
if n_supervoxels < n_components:
raise ValueError("Number of regions in the atlas is "
"lower than the number of components "
"(%i < %i)" % (n_supervoxels, n_components))
return shape
def check_imgs(imgs,
n_components=None,
atlas_shape=None,
ignore_nsubjects=False):
"""
Check input images
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
Returns
-------
reshaped_input: bool
True if input had to be reshaped to match the
n_subjects, n_sessions input
new_imgs: list of list of array or np array
input imgs reshaped if it is a list of arrays so that it becomes a
list of list of arrays
shapes: array
Shape of input images
"""
reshaped_input = False
new_imgs = imgs
if isinstance(imgs, list):
_check_imgs_list(imgs)
if isinstance(imgs[0], list):
shapes = _check_imgs_list_list(imgs)
elif isinstance(imgs[0], np.ndarray):
new_imgs, shapes = _check_imgs_list_array(imgs)
reshaped_input = True
else:
raise ValueError(
"Since imgs is a list, it should be a list of list "
"of arrays or a list of arrays but imgs[0] has type %s" %
type(imgs[0]))
elif isinstance(imgs, np.ndarray):
shapes = _check_imgs_array(imgs)
else:
raise ValueError(
"Input imgs should either be a list or an array but has type %s" %
type(imgs))
_check_shapes(shapes, n_components, atlas_shape, ignore_nsubjects)
return reshaped_input, new_imgs, shapes
def check_indexes(indexes, name):
if not (indexes is None or isinstance(indexes, list)
or isinstance(indexes, np.ndarray)):
raise ValueError(
"%s should be either a list, an array or None but received type %s"
% (name, type(indexes)))
def _check_shared_response_list_of_list(shared_response, n_components,
input_shapes):
# Check that shared_response is indeed a list of list of arrays
n_subjects = len(shared_response)
n_sessions = None
for i in range(len(shared_response)):
if not isinstance(shared_response[i], list):
raise ValueError("shared_response[0] is a list but "
"shared_response[%i] is not a list "
"this is incompatible." % i)
assert_non_empty_list(shared_response[i], "shared_response[%i]" % i)
if n_sessions is None:
n_sessions = len(shared_response[i])
elif n_sessions != len(shared_response[i]):
raise ValueError(
"shared_response[%i] has len %i whereas "
"shared_response[0] has len %i. They should "
"have same length" %
(i, len(shared_response[i]), len(shared_response[0])))
for j in range(len(shared_response[i])):
assert_array_2axis(shared_response[i][j],
"shared_response[%i][%i]" % (i, j))
return _check_shared_response_list_sessions([
np.mean([shared_response[i][j] for i in range(n_subjects)], axis=0)
for j in range(n_sessions)
], n_components, input_shapes)
def _check_shared_response_list_sessions(shared_response, n_components,
input_shapes):
for j in range(len(shared_response)):
assert_array_2axis(shared_response[j], "shared_response[%i]" % j)
if input_shapes is not None:
if shared_response[j].shape[1] != input_shapes[0][j][1]:
raise ValueError(
"Number of timeframes in input images during "
"session %i does not match the number of "
"timeframes during session %i "
"of shared_response (%i != %i)" %
(j, j, shared_response[j].shape[1], input_shapes[0, j, 1]))
if n_components is not None:
if shared_response[j].shape[0] != n_components:
raise ValueError(
"Number of components in "
"shared_response during session %i is "
"different than "
"the number of components of the model (%i != %i)" %
(j, shared_response[j].shape[0], n_components))
return shared_response
def _check_shared_response_list_subjects(shared_response, n_components,
input_shapes):
for i in range(len(shared_response)):
assert_array_2axis(shared_response[i], "shared_response[%i]" % i)
return _check_shared_response_array(np.mean(shared_response, axis=0),
n_components, input_shapes)
def _check_shared_response_array(shared_response, n_components, input_shapes):
assert_array_2axis(shared_response, "shared_response")
if input_shapes is None:
new_input_shapes = None
else:
n_subjects, n_sessions, _ = input_shapes.shape
new_input_shapes = np.zeros((n_subjects, 1, 2))
new_input_shapes[:, 0, 0] = input_shapes[:, 0, 0]
new_input_shapes[:, 0, 1] = np.sum(input_shapes[:, :, 1], axis=1)
return _check_shared_response_list_sessions([shared_response],
n_components, new_input_shapes)
def check_shared_response(shared_response,
aggregate="mean",
n_components=None,
input_shapes=None):
"""
Check that shared response has valid input and turn it into
a session-wise shared response
Returns
-------
added_session: bool
True if an artificial sessions was added to match the list of
session input type for shared_response
reshaped_shared_response: list of arrays
shared response (reshaped to match the list of session input)
"""
# Depending on aggregate and shape of input we infer what to do
if isinstance(shared_response, list):
assert_non_empty_list(shared_response, "shared_response")
if isinstance(shared_response[0], list):
if aggregate == "mean":
raise ValueError("self.aggregate has value 'mean' but "
"shared response is a list of list. This is "
"incompatible")
return False, _check_shared_response_list_of_list(
shared_response, n_components, input_shapes)
elif isinstance(shared_response[0], np.ndarray):
if aggregate == "mean":
return False, _check_shared_response_list_sessions(
shared_response, n_components, input_shapes)
else:
return True, _check_shared_response_list_subjects(
shared_response, n_components, input_shapes)
else:
raise ValueError("shared_response is a list but "
"shared_response[0] is neither a list "
"or an array. This is invalid.")
elif isinstance(shared_response, np.ndarray):
return True, _check_shared_response_array(shared_response,
n_components, input_shapes)
else:
raise ValueError("shared_response should be either "
"a list or an array but is of type %s" %
type(shared_response))
def create_temp_dir(temp_dir):
"""
This check whether temp_dir exists and creates dir otherwise
"""
if temp_dir is None:
return None
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
else:
raise ValueError("Path %s already exists. "
"When a model is used, filesystem should be cleaned "
"by using the .clean() method" % temp_dir)
def reduce_data_single(subject_index,
session_index,
img,
atlas=None,
inv_atlas=None,
low_ram=False,
temp_dir=None):
"""Reduce data using given atlas
Parameters
----------
subject_index : int
session_index : int
img : str or array
path to data.
Data are loaded with numpy.load and expected shape is
(n_voxels, n_timeframes)
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
img can also be an array of shape (n_voxels, n_timeframes)
atlas : array, shape=[n_supervoxels, n_voxels] or [n_voxels] or None
Probabilistic or deterministic atlas on which to project the data
Deterministic atlas is an array of shape [n_voxels,] where values
range from 1 to n_supervoxels. Voxels labelled 0 will be ignored.
inv_atlas : array, shape=[n_voxels, n_supervoxels] or None
Pseudo inverse of the atlas (only for probabilistic atlases)
temp_dir : str or None
path to dir where temporary results are stored
if None temporary results will be stored in memory. This
can results in memory errors when the number of subjects
and / or sessions is large
low_ram : bool
if True and temp_dir is not None, reduced_data will be saved on disk
this increases the number of IO but reduces memory complexity when the
number
of subject and number of sessions are large
Returns
-------
reduced_data : array, shape=[n_timeframes, n_supervoxels]
reduced data
"""
# Here we return to the conventions of the paper
data = safe_load(img).T
n_timeframes, n_voxels = data.shape
# Here we check that input is normalized
if (np.max(np.abs(np.mean(data, axis=0))) > 1e-6
or np.max(np.abs(np.var(data, axis=0) - 1))) > 1e-6:
ValueError("Data in imgs[%i, %i] does not have 0 mean and unit \
variance. If you are using NiftiMasker to mask your data \
(nilearn) please use standardize=True." %
(subject_index, session_index))
if inv_atlas is None and atlas is not None:
atlas_values = np.unique(atlas)
if 0 in atlas_values:
atlas_values = atlas_values[1:]
reduced_data = np.array(
[np.mean(data[:, atlas == c], axis=1) for c in atlas_values]).T
elif inv_atlas is not None and atlas is None:
# this means that it is a probabilistic atlas
reduced_data = data.dot(inv_atlas)
else:
reduced_data = data
if low_ram:
name = safe_encode(img)
path = os.path.join(temp_dir, "reduced_data_" + name)
np.save(path, reduced_data)
return path + ".npy"
else:
return reduced_data
def reduce_data(imgs, atlas, n_jobs=1, low_ram=False, temp_dir=None):
"""Reduce data using given atlas.
Work done in parallel across subjects.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
atlas : array, shape=[n_supervoxels, n_voxels] or array, shape=[n_voxels]
or None
Probabilistic or deterministic atlas on which to project the data
Deterministic atlas is an array of shape [n_voxels,] where values
range from 1 to n_supervoxels. Voxels labelled 0 will be ignored.
n_jobs : integer, optional, default=1
The number of CPUs to use to do the computation.
-1 means all CPUs, -2 all CPUs but one, and so on.
temp_dir : str or None
path to dir where temporary results are stored
if None temporary results will be stored in memory. This
can results in memory errors when the number of subjects
and / or sessions is large
low_ram : bool
if True and temp_dir is not None, reduced_data will be saved on disk
this increases the number of IO but reduces memory complexity when
the number of subject and/or sessions is large
Returns
-------
reduced_data_list : array of str, shape=[n_subjects, n_sessions]
or array, shape=[n_subjects, n_sessions, n_timeframes, n_supervoxels]
Element i, j of the array is a path to the data of subject i collected
during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_supervoxels]
or Element i, j of the array is the data in array of
shape=[n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels
are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
"""
if atlas is None:
A = None
A_inv = None
else:
loaded_atlas = safe_load(atlas)
if len(loaded_atlas.shape) == 2:
A = None
A_inv = loaded_atlas.T.dot(
np.linalg.inv(loaded_atlas.dot(loaded_atlas.T)))
else:
A = loaded_atlas
A_inv = None
n_subjects = len(imgs)
n_sessions = len(imgs[0])
reduced_data_list = Parallel(n_jobs=n_jobs)(
delayed(reduce_data_single)(i,
j,
imgs[i][j],
atlas=A,
inv_atlas=A_inv,
low_ram=low_ram,
temp_dir=temp_dir)
for i in range(n_subjects) for j in range(n_sessions))
if low_ram:
reduced_data_list = np.reshape(reduced_data_list,
(n_subjects, n_sessions))
else:
if len(np.array(reduced_data_list).shape) == 1:
reduced_data_list = np.reshape(reduced_data_list,
(n_subjects, n_sessions))
else:
n_timeframes, n_supervoxels = np.array(reduced_data_list).shape[1:]
reduced_data_list = np.reshape(
reduced_data_list,
(n_subjects, n_sessions, n_timeframes, n_supervoxels))
return reduced_data_list
def _reduced_space_compute_shared_response(reduced_data_list,
reduced_basis_list,
n_components=50):
"""Compute shared response with basis fixed in reduced space
Parameters
----------
reduced_data_list : array of str, shape=[n_subjects, n_sessions]
or array, shape=[n_subjects, n_sessions, n_timeframes, n_supervoxels]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_supervoxels]
or Element i, j of the array is the data in array of
shape=[n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels are
assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
reduced_basis_list : None or list of array, element i has
shape=[n_components, n_supervoxels]
each subject's reduced basis
if None the basis will be generated on the fly
n_components : int or None
number of components
Returns
-------
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
"""
n_subjects, n_sessions = reduced_data_list.shape[:2]
s = [None] * n_sessions
# This is just to check that all subjects have same number of
# timeframes in a given session
for n in range(n_subjects):
for m in range(n_sessions):
data_nm = safe_load(reduced_data_list[n][m])
n_timeframes, n_supervoxels = data_nm.shape
if reduced_basis_list is None:
reduced_basis_list = []
for subject in range(n_subjects):
q = np.eye(n_components, n_supervoxels)
reduced_basis_list.append(q)
basis_n = reduced_basis_list[n]
if s[m] is None:
s[m] = data_nm.dot(basis_n.T)
else:
s[m] = s[m] + data_nm.dot(basis_n.T)
for m in range(n_sessions):
s[m] = s[m] / float(n_subjects)
return s
def _compute_and_save_corr_mat(img, shared_response, temp_dir):
"""computes correlation matrix and stores it
Parameters
----------
img : str
path to data.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
shared_response : array, shape=[n_timeframes, n_components]
shared response
"""
data = safe_load(img).T
name = safe_encode(img)
path = os.path.join(temp_dir, "corr_mat_" + name)
np.save(path, shared_response.T.dot(data))
def _compute_and_save_subject_basis(subject_number, sessions, temp_dir):
"""computes correlation matrix for all sessions
Parameters
----------
subject_number: int
Number that identifies the subject. Basis will be stored in
[temp_dir]/basis_[subject_number].npy
sessions : array of str
Element i of the array is a path to the data collected during
session i.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance
temp_dir : str or None
path to dir where temporary results are stored
if None temporary results will be stored in memory. This
can results in memory errors when the number of subjects
and / or sessions is large
Returns
-------
basis: array, shape=[n_component, n_voxels] or str
basis of subject [subject_number] or path to this basis
"""
corr_mat = None
for session in sessions:
name = safe_encode(session)
path = os.path.join(temp_dir, "corr_mat_" + name + ".npy")
if corr_mat is None:
corr_mat = np.load(path)
else:
corr_mat += np.load(path)
os.remove(path)
basis_i = _compute_subject_basis(corr_mat)
path = os.path.join(temp_dir, "basis_%i" % subject_number)
np.save(path, basis_i)
return path + ".npy"
def _compute_subject_basis(corr_mat):
"""From correlation matrix between shared response and subject data,
Finds subject's basis
Parameters
----------
corr_mat: array, shape=[n_component, n_voxels]
or shape=[n_components, n_supervoxels]
correlation matrix between shared response and subject data or
subject reduced data
element k, v is given by S.T.dot(X_i) where S is the shared response
and X_i the data of subject i.
Returns
-------
basis: array, shape=[n_components, n_voxels]
or shape=[n_components, n_supervoxels]
basis of subject or reduced_basis of subject
"""
# The perturbation is only here to be
# consistent with current implementation
# of DetSRM.
perturbation = np.zeros(corr_mat.shape)
np.fill_diagonal(perturbation, 0.001)
U, _, V = scipy.linalg.svd(corr_mat + perturbation, full_matrices=False)
return U.dot(V)
def fast_srm(reduced_data_list,
n_iter=10,
n_components=None,
low_ram=False,
seed=0):
"""Computes shared response and basis in reduced space
Parameters
----------
reduced_data_list : array, shape=[n_subjects, n_sessions]
or array, shape=[n_subjects, n_sessions, n_timeframes, n_supervoxels]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_timeframes, n_supervoxels]
or Element i, j of the array is the data in array of
shape=[n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels are
assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
n_iter : int
Number of iterations performed
n_components : int or None
number of components
Returns
-------
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
"""
if low_ram:
return lowram_srm(reduced_data_list, n_iter, n_components)
else:
# We need to switch data to DetSRM format
# Indeed in DetSRM all sessions are concatenated.
# Whereas in FastSRM multiple sessions are supported.
n_subjects, n_sessions = reduced_data_list.shape[:2]
# We store the correspondence between timeframes and session
timeframes_slices = []
current_j = 0
for j in range(n_sessions):
timeframes_slices.append(
slice(current_j, current_j + len(reduced_data_list[0, j])))
current_j += len(reduced_data_list[0][j])
# Now we can concatenate everything
X = [
np.concatenate(reduced_data_list[i], axis=0).T
for i in range(n_subjects)
]
srm = DetSRM(n_iter=n_iter, features=n_components, rand_seed=seed)
srm.fit(X)
# SRM gives a list of data projected in shared space
# we get the shared response by averaging those
concatenated_s = np.mean(srm.transform(X), axis=0).T
# Let us return the shared response sliced by sessions
return [concatenated_s[i] for i in timeframes_slices]
def lowram_srm(reduced_data_list, n_iter=10, n_components=None):
"""Computes shared response and basis in reduced space
Parameters
----------
reduced_data_list : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_timeframes, n_supervoxels]
n_timeframes and n_supervoxels are
assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
n_iter : int
Number of iterations performed
n_components : int or None
number of components
Returns
-------
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
"""
n_subjects, n_sessions = reduced_data_list.shape[:2]
shared_response = _reduced_space_compute_shared_response(
reduced_data_list, None, n_components)
reduced_basis = [None] * n_subjects
for _ in range(n_iter):
for n in range(n_subjects):
cov = None
for m in range(n_sessions):
data_nm = np.load(reduced_data_list[n, m])
if cov is None:
cov = shared_response[m].T.dot(data_nm)
else:
cov += shared_response[m].T.dot(data_nm)
reduced_basis[n] = _compute_subject_basis(cov)
shared_response = _reduced_space_compute_shared_response(
reduced_data_list, reduced_basis, n_components)
return shared_response
def _compute_basis_subject_online(sessions, shared_response_list):
"""Computes subject's basis with shared response fixed
Parameters
----------
sessions : array of str
Element i of the array is a path to the data
collected during session i.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
shared_response_list : list of array, element i has
shape=[n_timeframes, n_components]
shared response, element i is the shared response during session i
Returns
-------
basis: array, shape=[n_components, n_voxels]
basis
"""
basis_i = None
i = 0
for session in sessions:
data = safe_load(session).T
if basis_i is None:
basis_i = shared_response_list[i].T.dot(data)
else:
basis_i += shared_response_list[i].T.dot(data)
i += 1
del data
return _compute_subject_basis(basis_i)
def _compute_shared_response_online_single(subjects, basis_list, temp_dir,
subjects_indexes, aggregate):
"""Computes shared response during one session with basis fixed
Parameters
----------
subjects : array of str
Element i of the array is a path to the data of subject i.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
basis_list : None or list of array, element i has
shape=[n_components, n_voxels]
basis of all subjects, element i is the basis of subject i
temp_dir : None or str
path to basis folder where file basis_%i.npy contains the basis of
subject i
subjects_indexes : list of int or None
list of indexes corresponding to the subjects to use to compute
shared response
aggregate: str or None, default="mean"
if "mean": returns the mean shared response S from all subjects
if None: returns the subject-specific response in shared space S_i
Returns
-------
shared_response : array, shape=[n_timeframes, n_components] or list
shared response
"""
n = 0
if aggregate == "mean":
shared_response = None
if aggregate is None:
shared_response = []
for k, i in enumerate(subjects_indexes):
subject = subjects[k]
# Transpose to be consistent with paper
data = safe_load(subject).T
if temp_dir is None:
basis_i = basis_list[i]
else:
basis_i = np.load(os.path.join(temp_dir, "basis_%i.npy" % i))
if aggregate == "mean":
if shared_response is None:
shared_response = data.dot(basis_i.T)
else:
shared_response += data.dot(basis_i.T)
n += 1
if aggregate is None:
shared_response.append(data.dot(basis_i.T))
if aggregate is None:
return shared_response
if aggregate == "mean":
return shared_response / float(n)
def _compute_shared_response_online(imgs, basis_list, temp_dir, n_jobs,
subjects_indexes, aggregate):
"""Computes shared response with basis fixed
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_timeframes, n_voxels]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
basis_list : None or list of array, element i has
shape=[n_components, n_voxels]
basis of all subjects, element i is the basis of subject i
temp_dir : None or str
path to basis folder where file basis_%i.npy contains the basis of
subject i
n_jobs : integer, optional, default=1
The number of CPUs to use to do the computation.
-1 means all CPUs, -2 all CPUs but one, and so on.
subjects_indexes : list or None
list of indexes corresponding to the subjects to use to compute
shared response
aggregate: str or None, default="mean"
if "mean": returns the mean shared response S from all subjects
if None: returns the subject-specific response in shared space S_i
Returns
-------
shared_response_list : list of array or list of list of array
shared response, element i is the shared response during session i
or element i, j is the shared response of subject i during session j
"""
n_subjects = len(subjects_indexes)
n_sessions = len(imgs[0])
shared_response_list = Parallel(n_jobs=n_jobs)(
delayed(_compute_shared_response_online_single)
([imgs[i][j] for i in range(n_subjects)], basis_list, temp_dir,
subjects_indexes, aggregate) for j in range(n_sessions))
if aggregate is None:
shared_response_list = [[
shared_response_list[j][i].T for j in range(n_sessions)
] for i in range(n_subjects)]
if aggregate == "mean":
shared_response_list = [
shared_response_list[j].T for j in range(n_sessions)
]
return shared_response_list
class FastSRM(BaseEstimator, TransformerMixin):
"""SRM decomposition using a very low amount of memory and \
computational power thanks to the use of an atlas \
as described in [Richard2019]_.
Given multi-subject data, factorize it as a shared response S \
among all subjects and an orthogonal transform (basis) W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
atlas : array, shape=[n_supervoxels, n_voxels] or array,\
shape=[n_voxels] or str or None, default=None
Probabilistic or deterministic atlas on which to project the data. \
Deterministic atlas is an array of shape [n_voxels,] \
where values range from 1 \
to n_supervoxels. Voxels labelled 0 will be ignored. If atlas is a str the \
corresponding array is loaded with numpy.load and expected shape \
is (n_voxels,) for a deterministic atlas and \
(n_supervoxels, n_voxels) for a probabilistic atlas.
n_components : int
Number of timecourses of the shared coordinates
n_iter : int
Number of iterations to perform
temp_dir : str or None
Path to dir where temporary results are stored. If None \
temporary results will be stored in memory. This can results in memory \
errors when the number of subjects and/or sessions is large
low_ram : bool
If True and temp_dir is not None, reduced_data will be saved on \
disk. This increases the number of IO but reduces memory complexity when \
the number of subject and/or sessions is large
seed : int
Seed used for random sampling.
n_jobs : int, optional, default=1
The number of CPUs to use to do the computation. \
-1 means all CPUs, -2 all CPUs but one, and so on.
verbose : bool or "warn"
If True, logs are enabled. If False, logs are disabled. \
If "warn" only warnings are printed.
aggregate: str or None, default="mean"
If "mean", shared_response is the mean shared response \
from all subjects. If None, shared_response contains all \
subject-specific responses in shared space
Attributes
----------
`basis_list`: list of array, element i has \
shape=[n_components, n_voxels] or list of str
- if basis is a list of array, element i is the basis of subject i
- if basis is a list of str, element i is the path to the basis \
of subject i that is loaded with np.load yielding an array of \
shape [n_components, n_voxels].
Note that any call to the clean method erases this attribute
Note
-----
**References:**
H. Richard, L. Martin, A. Pinho, J. Pillow, B. Thirion, 2019: \
Fast shared response model for fMRI data (https://arxiv.org/pdf/1909.12537.pdf)
"""
def __init__(self,
atlas=None,
n_components=20,
n_iter=100,
temp_dir=None,
low_ram=False,
seed=None,
n_jobs=1,
verbose="warn",
aggregate="mean"):
self.seed = seed
self.n_jobs = n_jobs
self.verbose = verbose
self.n_components = n_components
self.n_iter = n_iter
self.atlas = atlas
if aggregate is not None and aggregate != "mean":
raise ValueError("aggregate can have only value mean or None")
self.aggregate = aggregate
self.basis_list = None
if temp_dir is None:
if self.verbose == "warn" or self.verbose is True:
logger.warning("temp_dir has value None. "
"All basis (spatial maps) and reconstructed "
"data will therefore be kept in memory."
"This can lead to memory errors when the "
"number of subjects "
"and/or sessions is large.")
self.temp_dir = None
self.low_ram = False
if temp_dir is not None:
self.temp_dir = os.path.join(temp_dir,
"fastsrm" + str(uuid.uuid4()))
self.low_ram = low_ram
def clean(self):
"""This erases temporary files and basis_list attribute to \
free memory. This method should be called when fitted model \
is not needed anymore.
"""
if self.temp_dir is not None:
if os.path.exists(self.temp_dir):
for root, dirs, files in os.walk(self.temp_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
os.rmdir(self.temp_dir)
if self.basis_list is not None:
self.basis_list is None
def fit(self, imgs):
"""Computes basis across subjects from input imgs
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
Returns
-------
self : object
Returns the instance itself. Contains attributes listed \
at the object level.
"""
atlas_shape = check_atlas(self.atlas, self.n_components)
reshaped_input, imgs, shapes = check_imgs(
imgs, n_components=self.n_components, atlas_shape=atlas_shape)
self.clean()
create_temp_dir(self.temp_dir)
if self.verbose is True:
logger.info("[FastSRM.fit] Reducing data")
reduced_data = reduce_data(imgs,
atlas=self.atlas,
n_jobs=self.n_jobs,
low_ram=self.low_ram,
temp_dir=self.temp_dir)
if self.verbose is True:
logger.info("[FastSRM.fit] Finds shared "
"response using reduced data")
shared_response_list = fast_srm(reduced_data,
n_iter=self.n_iter,
n_components=self.n_components,
low_ram=self.low_ram,
seed=self.seed)
if self.verbose is True:
logger.info("[FastSRM.fit] Finds basis using "
"full data and shared response")
if self.n_jobs == 1:
basis = []
for i, sessions in enumerate(imgs):
basis_i = _compute_basis_subject_online(
sessions, shared_response_list)
if self.temp_dir is None:
basis.append(basis_i)
else:
path = os.path.join(self.temp_dir, "basis_%i" % i)
np.save(path, basis_i)
basis.append(path + ".npy")
del basis_i
else:
if self.temp_dir is None:
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_basis_subject_online)(
sessions, shared_response_list) for sessions in imgs)
else:
Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_corr_mat)(
imgs[i][j], shared_response_list[j], self.temp_dir)
for j in range(len(imgs[0])) for i in range(len(imgs)))
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_subject_basis)(i, sessions,
self.temp_dir)
for i, sessions in enumerate(imgs))
self.basis_list = basis
return self
def fit_transform(self, imgs, subjects_indexes=None):
"""Computes basis across subjects and shared response from input imgs
return shared response.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
subjects_indexes : list or None:
if None imgs[i] will be transformed using basis_list[i]. \
Otherwise imgs[i] will be transformed using basis_list[subjects_index[i]]
Returns
--------
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
"""
self.fit(imgs)
return self.transform(imgs, subjects_indexes=subjects_indexes)
def transform(self, imgs, subjects_indexes=None):
"""From data in imgs and basis from training data,
computes shared response.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
subjects_indexes : list or None:
if None imgs[i] will be transformed using basis_list[i]. \
Otherwise imgs[i] will be transformed using basis[subjects_index[i]]
Returns
--------
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
"""
aggregate = self.aggregate
if self.basis_list is None:
raise NotFittedError("The model fit has not been run yet.")
atlas_shape = check_atlas(self.atlas, self.n_components)
reshaped_input, imgs, shapes = check_imgs(
imgs,
n_components=self.n_components,
atlas_shape=atlas_shape,
ignore_nsubjects=True)
check_indexes(subjects_indexes, "subjects_indexes")
if subjects_indexes is None:
subjects_indexes = np.arange(len(imgs))
else:
subjects_indexes = np.array(subjects_indexes)
# Transform specific checks
if len(subjects_indexes) < len(imgs):
raise ValueError("Input data imgs has len %i whereas "
"subject_indexes has len %i. "
"The number of basis used to compute "
"the shared response should be equal "
"to the number of subjects in imgs" %
(len(imgs), len(subjects_indexes)))
assert_valid_index(subjects_indexes, len(self.basis_list),
"subjects_indexes")
shared_response = _compute_shared_response_online(
imgs, self.basis_list, self.temp_dir, self.n_jobs,
subjects_indexes, aggregate)
# If shared response has only 1 session we need to reshape it
if reshaped_input:
if aggregate == "mean":
shared_response = shared_response[0]
if aggregate is None:
shared_response = [
shared_response[i][0] for i in range(len(subjects_indexes))
]
return shared_response
def inverse_transform(
self,
shared_response,
subjects_indexes=None,
sessions_indexes=None,
):
"""From shared response and basis from training data
reconstruct subject's data
Parameters
----------
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
subjects_indexes : list or None
if None reconstructs data of all subjects used during train. \
Otherwise reconstructs data of subjects specified by subjects_indexes.
sessions_indexes : list or None
if None reconstructs data of all sessions. \
Otherwise uses reconstructs data of sessions specified by sessions_indexes.
Returns
-------
reconstructed_data: list of list of arrays or list of arrays
- if reconstructed_data is a list of list : element i, j is \
the reconstructed data for subject subjects_indexes[i] and \
session sessions_indexes[j] as an np array of shape n_voxels, \
n_timeframes
- if reconstructed_data is a list : element i is the \
reconstructed data for subject \
subject_indexes[i] as an np array of shape n_voxels, n_timeframes
"""
added_session, shared = check_shared_response(
shared_response, self.aggregate, n_components=self.n_components)
n_subjects = len(self.basis_list)
n_sessions = len(shared)
for j in range(n_sessions):
assert_array_2axis(shared[j], "shared_response[%i]" % j)
check_indexes(subjects_indexes, "subjects_indexes")
check_indexes(sessions_indexes, "sessions_indexes")
if subjects_indexes is None:
subjects_indexes = np.arange(n_subjects)
else:
subjects_indexes = np.array(subjects_indexes)
assert_valid_index(subjects_indexes, n_subjects, "subjects_indexes")
if sessions_indexes is None:
sessions_indexes = np.arange(len(shared))
else:
sessions_indexes = np.array(sessions_indexes)
assert_valid_index(sessions_indexes, n_sessions, "sessions_indexes")
data = []
for i in subjects_indexes:
data_ = []
basis_i = safe_load(self.basis_list[i])
if added_session:
data.append(basis_i.T.dot(shared[0]))
else:
for j in sessions_indexes:
data_.append(basis_i.T.dot(shared[j]))
data.append(data_)
return data
def add_subjects(self, imgs, shared_response):
""" Add subjects to the current fit. Each new basis will be \
appended at the end of the list of basis (which can \
be accessed using self.basis)
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions] or \
list of list of arrays or list of arrays
Element i, j of the array is a path to the data of subject i \
collected during session j. Data are loaded with numpy.load and expected \
shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed \
to be the same across subjects n_timeframes can vary across sessions. \
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i \
of the array is a numpy array of shape [n_voxels, n_timeframes] \
that contains the data of subject i (number of sessions is implicitly 1)
shared_response : list of arrays, list of list of arrays or array
- if imgs is a list of array and self.aggregate="mean": shared \
response is an array of shape (n_components, n_timeframes)
- if imgs is a list of array and self.aggregate=None: shared \
response is a list of array, element i is the projection of data of \
subject i in shared space.
- if imgs is an array or a list of list of array and \
self.aggregate="mean": shared response is a list of array, \
element j is the shared response during session j
- if imgs is an array or a list of list of array and \
self.aggregate=None: shared response is a list of list of array, \
element i, j is the projection of data of subject i collected \
during session j in shared space.
"""
atlas_shape = check_atlas(self.atlas, self.n_components)
reshaped_input, imgs, shapes = check_imgs(
imgs,
n_components=self.n_components,
atlas_shape=atlas_shape,
ignore_nsubjects=True)
_, shared_response_list = check_shared_response(
shared_response,
n_components=self.n_components,
aggregate=self.aggregate,
input_shapes=shapes)
# we need to transpose shared_response_list to be consistent with
# other functions
shared_response_list = [
shared_response_list[j].T for j in range(len(shared_response_list))
]
if self.n_jobs == 1:
basis = []
for i, sessions in enumerate(imgs):
basis_i = _compute_basis_subject_online(
sessions, shared_response_list)
if self.temp_dir is None:
basis.append(basis_i)
else:
path = os.path.join(
self.temp_dir, "basis_%i" % (len(self.basis_list) + i))
np.save(path, basis_i)
basis.append(path + ".npy")
del basis_i
else:
if self.temp_dir is None:
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_basis_subject_online)(
sessions, shared_response_list) for sessions in imgs)
else:
Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_corr_mat)(
imgs[i][j], shared_response_list[j], self.temp_dir)
for j in range(len(imgs[0])) for i in range(len(imgs)))
basis = Parallel(n_jobs=self.n_jobs)(
delayed(_compute_and_save_subject_basis)(
len(self.basis_list) + i, sessions, self.temp_dir)
for i, sessions in enumerate(imgs))
self.basis_list += basis
| 65,510 | 36.413478 | 79 | py |
brainiak | brainiak-master/brainiak/searchlight/searchlight.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pool
import numpy as np
from mpi4py import MPI
from scipy.spatial.distance import cityblock, euclidean
from ..utils.utils import usable_cpu_count
"""Distributed Searchlight
"""
__all__ = [
"Ball",
"Cube",
"Diamond",
"Searchlight",
"Shape",
]
class Shape:
"""Shape
Searchlight shape which is contained in a cube sized
(2*rad+1,2*rad+1,2*rad+1)
Attributes
----------
mask_ : a 3D boolean numpy array of size (2*rad+1,2*rad+1,2*rad+1)
which is set to True within the boundaries of the desired shape
Parameters
----------
rad: radius, in voxels, of the sphere inscribed in the
searchlight cube, not counting the center voxel
"""
def __init__(self, rad):
self.rad = rad
class Cube(Shape):
"""Cube
Searchlight shape which is a cube of size (2*rad+1,2*rad+1,2*rad+1)
Parameters
----------
rad: radius, in voxels, of the sphere inscribed in the
searchlight cube, not counting the center voxel
"""
def __init__(self, rad):
super().__init__(rad)
self.rad = rad
self.mask_ = np.ones((2*rad+1, 2*rad+1, 2*rad+1), dtype=np.bool)
class Diamond(Shape):
"""Diamond
Searchlight shape which is a diamond
inscribed in a cube of size (2*rad+1,2*rad+1,2*rad+1).
Any location in the cube which has a Manhattan distance of equal to or
less than rad from the center point is set to True.
Parameters
----------
rad: radius, in voxels, of the sphere inscribed in the
searchlight cube, not counting the center voxel
"""
def __init__(self, rad):
super().__init__(rad)
self.mask_ = np.zeros((2*rad+1, 2*rad+1, 2*rad+1), dtype=np.bool)
for r1 in range(2*self.rad+1):
for r2 in range(2*self.rad+1):
for r3 in range(2*self.rad+1):
if(cityblock((r1, r2, r3),
(self.rad, self.rad, self.rad)) <= self.rad):
self.mask_[r1, r2, r3] = True
class Ball(Shape):
"""Ball
Searchlight shape which is a ball
inscribed in a cube of size (2*rad+1,2*rad+1,2*rad+1).
Any location in the cube which has a Euclidean distance of equal to or
less than rad from the center point is set to True.
Parameters
----------
rad: radius, in voxels, of the sphere inscribed in the
searchlight cube, not counting the center voxel
"""
def __init__(self, rad):
super().__init__(rad)
self.mask_ = np.zeros((2*rad+1, 2*rad+1, 2*rad+1), dtype=np.bool)
for r1 in range(2*self.rad+1):
for r2 in range(2*self.rad+1):
for r3 in range(2*self.rad+1):
if(euclidean((r1, r2, r3),
(self.rad, self.rad, self.rad)) <= self.rad):
self.mask_[r1, r2, r3] = True
class Searchlight:
"""Distributed Searchlight
Run a user-defined function over each voxel in a multi-subject
dataset.
Optionally, users can define a block function which runs over
larger portions of the volume called blocks.
Parameters
----------
sl_rad: radius, in voxels, of the sphere inscribed in the
searchlight cube, not counting the center voxel
max_blk_edge: max edge length, in voxels, of the 3D block
shape: brainiak.searchlight.searchlight.Shape indicating the
shape in voxels of the searchlight region
min_active_voxels_proportion: float
If a searchlight region does not have more than this minimum
proportion of active voxels in the mask, it is not processed by the
searchlight function. The mask used for the test is the
intersection of the global (brain) mask and the `Shape` mask. The
seed (central) voxel of the searchlight region is taken into
consideration.
"""
def __init__(self, sl_rad=1, max_blk_edge=10, shape=Cube,
min_active_voxels_proportion=0):
assert sl_rad >= 0, 'sl_rad should not be negative'
assert max_blk_edge > 0, 'max_blk_edge should be positive'
self.sl_rad = sl_rad
self.max_blk_edge = max_blk_edge
self.min_active_voxels_proportion = min_active_voxels_proportion
self.comm = MPI.COMM_WORLD
self.shape = shape(sl_rad).mask_
self.bcast_var = None
def _get_ownership(self, data):
"""Determine on which rank each subject currently resides
Parameters
----------
data: list of 4D arrays with subject data
Returns
-------
list of ranks indicating the owner of each subject
"""
rank = self.comm.rank
B = [(rank, idx) for (idx, c) in enumerate(data) if c is not None]
C = self.comm.allreduce(B)
ownership = [None] * len(data)
for c in C:
ownership[c[1]] = c[0]
return ownership
def _get_blocks(self, mask):
"""Divide the volume into a set of blocks
Ignore blocks that have no active voxels in the mask
Parameters
----------
mask: a boolean 3D array which is true at every active voxel
Returns
-------
list of tuples containing block information:
- a triple containing top left point of the block and
- a triple containing the size in voxels of the block
"""
blocks = []
outerblk = self.max_blk_edge + 2*self.sl_rad
for i in range(0, mask.shape[0], self.max_blk_edge):
for j in range(0, mask.shape[1], self.max_blk_edge):
for k in range(0, mask.shape[2], self.max_blk_edge):
block_shape = mask[i:i+outerblk,
j:j+outerblk,
k:k+outerblk
].shape
if np.any(
mask[i+self.sl_rad:i+block_shape[0]-self.sl_rad,
j+self.sl_rad:j+block_shape[1]-self.sl_rad,
k+self.sl_rad:k+block_shape[2]-self.sl_rad]):
blocks.append(((i, j, k), block_shape))
return blocks
def _get_block_data(self, mat, block):
"""Retrieve a block from a 3D or 4D volume
Parameters
----------
mat: a 3D or 4D volume
block: a tuple containing block information:
- a triple containing the lowest-coordinate voxel in the block
- a triple containing the size in voxels of the block
Returns
-------
In the case of a 3D array, a 3D subarray at the block location
In the case of a 4D array, a 4D subarray at the block location,
including the entire fourth dimension.
"""
(pt, sz) = block
if len(mat.shape) == 3:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2]].copy()
elif len(mat.shape) == 4:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2],
:].copy()
def _split_volume(self, mat, blocks):
"""Convert a volume into a list of block data
Parameters
----------
mat: A 3D or 4D array to be split
blocks: a list of tuples containing block information:
- a triple containing the top left point of the block and
- a triple containing the size in voxels of the block
Returns
-------
A list of the subarrays corresponding to each block
"""
return [self._get_block_data(mat, block) for block in blocks]
def _scatter_list(self, data, owner):
"""Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks
"""
rank = self.comm.rank
size = self.comm.size
subject_submatrices = []
nblocks = self.comm.bcast(len(data)
if rank == owner else None, root=owner)
# For each submatrix
for idx in range(0, nblocks, size):
padded = None
extra = max(0, idx+size - nblocks)
# Pad with "None" so scatter can go to all processes
if data is not None:
padded = data[idx:idx+size]
if extra > 0:
padded = padded + [None]*extra
# Scatter submatrices to all processes
mytrans = self.comm.scatter(padded, root=owner)
# Contribute submatrix to subject list
if mytrans is not None:
subject_submatrices += [mytrans]
return subject_submatrices
def distribute(self, subjects, mask):
"""Distribute data to MPI ranks
Parameters
----------
subjects : list of 4D arrays containing data for one or more subjects.
Each entry of the list must be present on at most one rank,
and the other ranks contain a "None" at this list location.
For example, for 3 ranks you may lay out the data in the
following manner:
Rank 0: [Subj0, None, None]
Rank 1: [None, Subj1, None]
Rank 2: [None, None, Subj2]
Or alternatively, you may lay out the data in this manner:
Rank 0: [Subj0, Subj1, Subj2]
Rank 1: [None, None, None]
Rank 2: [None, None, None]
mask: 3D array with "True" entries at active vertices
"""
if mask.ndim != 3:
raise ValueError('mask should be a 3D array')
for (idx, subj) in enumerate(subjects):
if subj is not None:
if subj.ndim != 4:
raise ValueError('subjects[{}] must be 4D'.format(idx))
self.mask = mask
rank = self.comm.rank
# Get/set ownership
ownership = self._get_ownership(subjects)
all_blocks = self._get_blocks(mask) if rank == 0 else None
all_blocks = self.comm.bcast(all_blocks)
# Divide data and mask
splitsubj = [self._split_volume(s, all_blocks)
if s is not None else None
for s in subjects]
submasks = self._split_volume(mask, all_blocks)
# Scatter points, data, and mask
self.blocks = self._scatter_list(all_blocks, 0)
self.submasks = self._scatter_list(submasks, 0)
self.subproblems = [self._scatter_list(s, ownership[s_idx])
for (s_idx, s) in enumerate(splitsubj)]
def broadcast(self, bcast_var):
"""Distribute data to processes
Parameters
----------
bcast_var: shared data which is broadcast to all processes
"""
self.bcast_var = self.comm.bcast(bcast_var)
def run_block_function(self, block_fn, extra_block_fn_params=None,
pool_size=None):
"""Perform a function for each block in a volume.
Parameters
----------
block_fn: function to apply to each block:
Parameters
data: list of 4D arrays containing subset of subject data,
which is padded with sl_rad voxels.
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
extra_params: extra parameters
Returns
3D array which is the same size as the mask
input with padding removed
extra_block_fn_params: tuple
Extra parameters to pass to the block function
pool_size: int
Maximum number of processes running the block function in parallel.
If None, number of available hardware threads, considering cpusets
restrictions.
"""
rank = self.comm.rank
results = []
usable_cpus = usable_cpu_count()
if pool_size is None:
processes = usable_cpus
else:
processes = min(pool_size, usable_cpus)
if processes > 1:
with Pool(processes) as pool:
for idx, block in enumerate(self.blocks):
result = pool.apply_async(
block_fn,
([subproblem[idx] for subproblem in self.subproblems],
self.submasks[idx],
self.sl_rad,
self.bcast_var,
extra_block_fn_params))
results.append((block[0], result))
local_outputs = [(result[0], result[1].get())
for result in results]
else:
# If we only are using one CPU core, no need to create a Pool,
# cause an underlying fork(), and send the data to that process.
# Just do it here in serial. This will save copying the memory
# and will stop a fork() which can cause problems in some MPI
# implementations.
for idx, block in enumerate(self.blocks):
subprob_list = [subproblem[idx]
for subproblem in self.subproblems]
result = block_fn(
subprob_list,
self.submasks[idx],
self.sl_rad,
self.bcast_var,
extra_block_fn_params)
results.append((block[0], result))
local_outputs = [(result[0], result[1]) for result in results]
# Collect results
global_outputs = self.comm.gather(local_outputs)
# Coalesce results
outmat = np.empty(self.mask.shape, dtype=np.object)
if rank == 0:
for go_rank in global_outputs:
for (pt, mat) in go_rank:
coords = np.s_[
pt[0]+self.sl_rad:pt[0]+self.sl_rad+mat.shape[0],
pt[1]+self.sl_rad:pt[1]+self.sl_rad+mat.shape[1],
pt[2]+self.sl_rad:pt[2]+self.sl_rad+mat.shape[2]
]
outmat[coords] = mat
return outmat
def run_searchlight(self, voxel_fn, pool_size=None):
"""Perform a function at each voxel which is set to True in the
user-provided mask. The mask passed to the searchlight function will be
further masked by the user-provided searchlight shape.
Parameters
----------
voxel_fn: function to apply at each voxel
Must be `serializeable using pickle
<https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled>`_.
Parameters
subj: list of 4D arrays containing subset of subject data
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
Returns
Value of any pickle-able type
Returns
-------
A volume which is the same size as the mask, however a number of voxels
equal to the searchlight radius has been removed from each border of
the volume. This volume contains the values returned from the
searchlight function at each voxel which was set to True in the mask,
and None elsewhere.
"""
extra_block_fn_params = (voxel_fn, self.shape,
self.min_active_voxels_proportion)
block_fn_result = self.run_block_function(_singlenode_searchlight,
extra_block_fn_params,
pool_size)
return block_fn_result
def _singlenode_searchlight(data, msk, mysl_rad, bcast_var, extra_params):
"""Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function.
"""
voxel_fn = extra_params[0]
shape_mask = extra_params[1]
min_active_voxels_proportion = extra_params[2]
outmat = np.empty(msk.shape, dtype=np.object)
if mysl_rad > 0:
outmat = outmat[mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad]
for i in range(0, outmat.shape[0]):
for j in range(0, outmat.shape[1]):
for k in range(0, outmat.shape[2]):
if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]:
searchlight_slice = np.s_[
i:i+2*mysl_rad+1,
j:j+2*mysl_rad+1,
k:k+2*mysl_rad+1]
voxel_fn_mask = msk[searchlight_slice] * shape_mask
if (min_active_voxels_proportion == 0
or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size
> min_active_voxels_proportion):
outmat[i, j, k] = voxel_fn(
[subject[searchlight_slice] for subject in data],
msk[searchlight_slice] * shape_mask,
mysl_rad,
bcast_var)
return outmat
| 18,584 | 32.069395 | 96 | py |
brainiak | brainiak-master/brainiak/searchlight/__init__.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed Searchlight"""
| 622 | 40.533333 | 75 | py |
brainiak | brainiak-master/brainiak/reprsimil/brsa.py | # Copyright 2016 Mingbo Cai, Princeton Neuroscience Instititute,
# Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bayesian Representational Similarity Analysis (BRSA)
This implementation is based on [Cai2016]_ and [Cai2019]_:
.. [Cai2016] "A Bayesian method for reducing bias in neural
representational similarity analysis",
M.B. Cai, N.W. Schuck, J.W. Pillow, Y. Niv,
Advances in Neural Information Processing Systems 29, 2016, 4952--4960
Available at:
http://papers.nips.cc/paper/6131-a-bayesian-method-for-reducing-bias-in-neural-representational-similarity-analysis.pdf
.. [Cai2019] "Representational structure or task structure?
Bias in neural representational similarity analysis and
a Bayesian method for reducing bias",
M.B. Cai, N.W. Schuck, J.W. Pillow, Y. Niv,
PLoS computational biology 15.5 (2019): e1006299.
https://doi.org/10.1371/journal.pcbi.1006299
`.BRSA` is based on [Cai2016] with additional consideration
of spatial noise correlation proposed in [Cai2019].
`.GBRSA` is based on [Cai2019].
`.GBRSA` may perform better than `.BRSA` due to marginalization of all
voxel-wise parameters. It can be use for single participant as well.
"""
# Authors: Mingbo Cai
# Princeton Neuroscience Institute, Princeton University, 2016
import numpy as np
import scipy
import scipy.optimize
import scipy.stats
import scipy.special
import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite, check_random_state
from sklearn.decomposition import PCA, FactorAnalysis, SparsePCA, FastICA
import logging
import brainiak.utils.utils as utils
import scipy.spatial.distance as spdist
from nitime import algorithms as alg
import copy
logger = logging.getLogger(__name__)
__all__ = [
"BRSA",
"GBRSA",
"Ncomp_SVHT_MG_DLD_approx",
"prior_GP_var_inv_gamma",
"prior_GP_var_half_cauchy",
]
def prior_GP_var_inv_gamma(y_invK_y, n_y, tau_range):
""" Imposing an inverse-Gamma prior onto the variance (tau^2)
parameter of a Gaussian Process, which is in turn a prior
imposed over an unknown function y = f(x).
The inverse-Gamma prior of tau^2, tau^2 ~ invgamma(shape, scale)
is described by a shape parameter alpha=2 and a scale parameter
beta=tau_range^2. tau_range describes the reasonable range of
tau in the inverse-Gamma prior.
The data y's at locations x's are assumed to follow Gaussian Process:
f(x, x') ~ N(0, K(x, x') / 2 tau^2), where K is a kernel
function defined on x. For n observations, K(x1, x2, ..., xn) is
an n by n positive definite matrix.
Given the prior parameter tau_range, number of observations
n_y, and y_invK_y = y * inv(K) * y',
the function returns the MAP estimate of tau^2 and
the log posterior probability of tau^2 at the MAP value:
log(p(tau^2|tau_range)).
This function is written primarily for BRSA but can also
be used elsewhere. y in this case corresponds to the log of
SNR in each voxel. GBRSA does not rely on this function.
An alternative form of prior is half-Cauchy prior on tau.
Inverse-Gamma prior penalizes for both very small and very
large values of tau, while half-Cauchy prior only penalizes
for very large values of tau.
For more information on usage, see description in BRSA class:
`.BRSA`
See also: `.prior_GP_var_half_cauchy`
Parameters
----------
y_invK_y: float
y * inv(K) * y^T, where y=f(x) is a vector of observations
of unknown function f at different locations x.
K is correlation matrix of f between different locations, based
on a Gaussian Process (GP) describing the smoothness property
of f. K fully incorporates the form of the kernel
and the length scale of the GP, but not the variance of the GP
(the purpose of this function is to estimate the variance).
n_y: int, number of observations
tau_range: float,
The reasonable range of tau, the standard deviation of the
Gaussian Process imposed on y=f(x). tau_range is parameter
of the inverse-Gamma prior. Say, if you expect the standard
deviation of the Gaussian process to be around 3, tau_range
can be set to 3.
The smaller it is, the more penalization is imposed
on large variation of y.
Returns
-------
tau2: The MAP estimation of tau^2 based on the prior on tau
and y_invK_y.
log_ptau: log(p(tau)) of the returned tau^2 based on the
inverse-Gamma prior.
"""
alpha = 2
tau2 = (y_invK_y + 2 * tau_range**2) / (alpha * 2 + 2 + n_y)
log_ptau = scipy.stats.invgamma.logpdf(
tau2, scale=tau_range**2, a=2)
return tau2, log_ptau
def prior_GP_var_half_cauchy(y_invK_y, n_y, tau_range):
""" Imposing a half-Cauchy prior onto the standard deviation (tau)
of the Gaussian Process which is in turn a prior imposed over
a function y = f(x).
The scale parameter of the half-Cauchy prior is tau_range.
The function returns the MAP estimate of tau^2 and
log(p(tau|tau_range)) for the MAP value of tau^2,
where tau_range describes the reasonable range of tau
in the half-Cauchy prior.
An alternative form of prior is inverse-Gamma prior on tau^2.
Inverse-Gamma prior penalizes for both very small and very
large values of tau, while half-Cauchy prior only penalizes
for very large values of tau.
For more information on usage, see description in BRSA class:
`.BRSA`
"""
tau2 = (y_invK_y - n_y * tau_range**2
+ np.sqrt(n_y**2 * tau_range**4 + (2 * n_y + 8)
* tau_range**2 * y_invK_y + y_invK_y**2))\
/ 2 / (n_y + 2)
log_ptau = scipy.stats.halfcauchy.logpdf(
tau2**0.5, scale=tau_range)
return tau2, log_ptau
def Ncomp_SVHT_MG_DLD_approx(X, zscore=True):
""" This function implements the approximate calculation of the
optimal hard threshold for singular values, by Matan Gavish
and David L. Donoho:
"The optimal hard threshold for singular values is 4 / sqrt(3)"
http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=6846297
Parameters
----------
X: 2-D numpy array of size [n_T, n_V]
The data to estimate the optimal rank for selecting principal
components.
zscore: Boolean
Whether to z-score the data before calculating number of components.
Returns
-------
ncomp: integer
The optimal number of components determined by the method of MG
and DLD
"""
beta = X.shape[0] / X.shape[1]
if beta > 1:
beta = 1 / beta
omega = 0.56 * beta ** 3 - 0.95 * beta ** 2 + 1.82 * beta + 1.43
if zscore:
sing = np.linalg.svd(_zscore(X), False, False)
else:
sing = np.linalg.svd(X, False, False)
thresh = omega * np.median(sing)
ncomp = int(np.sum(np.logical_and(sing > thresh, np.logical_not(
np.isclose(sing, thresh)))))
# In the line above, we look for the singular values larger than
# the threshold but excluding those that happen to be "just" larger
# than the threshold by an amount close to the numerical precision.
# This is to prevent close-to-zero singular values to be included if
# the median of the eigenvalues is close to 0 (which could happen
# when the input X has lower rank than its minimal size.
return ncomp
def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore
class BRSA(BaseEstimator, TransformerMixin):
"""Bayesian representational Similarity Analysis (BRSA)
Given the time series of neural imaging data in a region of interest
(ROI) and the hypothetical neural response (design matrix) to
each experimental condition of interest,
calculate the shared covariance matrix U of
the voxels(recording unit)' response profiles \\beta_i to each condition,
and the relative SNR of each voxels.
The relative SNR could be considered as the degree of contribution
of each voxel to this shared covariance matrix.
A correlation matrix converted from the covariance matrix U
will be provided as a quantification of neural representational similarity.
.. math::
Y = X \\cdot \\beta + X_0 \\cdot \\beta_0 + \\epsilon
\\beta_i \\sim N(0,(s_{i} \\sigma_{i})^2 U)
\\epsilon_i \\sim AR(1)
Please note that the model assumes that the covariance matrix U which
all \\beta_i follow is zero-meaned. This assumption does not imply
there must be both positive and negative responses across voxels.
However, it means that Bayesian RSA treats the task-evoked activity
against baseline BOLD level as signal, while in other RSA tools
the deviation of task-evoked activity in each voxel from the average
task-evoked activity level across voxels may be considered as signal
of interest. Due to this assumption in BRSA, relatively high degree
of similarity may be expected when the activity patterns of two
task conditions both include strong sensory driven signals regardless
of their specific stimuli. When two task conditions elicit exactly
the same activity patterns but only differ in their global magnitudes,
under the assumption in BRSA, their similarity is 1; under the assumption
that only deviation of pattern from average patterns is signal of interest,
their similarity should be -1.
Parameters
----------
n_iter : int.
Number of maximum iterations to run the algorithm.
rank : int. Default: None
The rank of the covariance matrix.
If not provided, the covariance matrix will be assumed
to be full rank. When you have many conditions
(e.g., calculating the similarity matrix of responses to each event),
you might try specifying a lower rank.
auto_nuisance: boolean.
In order to model spatial correlation between voxels that cannot
be accounted for by common response captured in the design matrix,
we assume that a set of time courses not related to the task
conditions are shared across voxels with unknown amplitudes.
One approach is for users to provide time series which they consider
as nuisance but exist in the noise (such as head motion).
The other way is to take the first n_nureg principal components
in the residual after subtracting the response to the design matrix
from the data, and use these components as the nuisance regressor.
This flag is for the second approach. If turned on,
PCA or factor analysis will be applied to the residuals
to obtain new nuisance regressors in each round of fitting.
These two approaches can be combined. If the users provide nuisance
regressors and set this flag as True, then the first n_nureg
principal components of the residuals after subtracting
both the responses to design matrix and the user-supplied nuisance
regressors will be used in addition to the nuisance regressors
provided by the users.
Note that nuisance regressor is not required from user. If it is
not provided, DC components for each run will be included as nuisance
regressor regardless of the auto_nuisance parameter.
n_nureg: Optional[int].
Number of nuisance regressors to use in order to model signals
shared across voxels not captured by the design matrix.
This number is in addition to any nuisance regressor that the user
has already provided.
If set to None, the number of nuisance regressors will be
automatically determined based on M Gavish
and D Donoho's approximate estimation of optimal hard
threshold for singular values.
This only takes effect if auto_nuisance is True.
nureg_zscore: boolean.
A flag to tell the algorithm whether data is z-scored before
estimating the number of nuisance regressor components necessary to
account for spatial noise correlation. It also determinie whether
the residual noise is z-scored before estimating the nuisance
regressors from residual.
This only takes effect if auto_nuisance is True.
nureg_method: string, naming a method from sklearn.decomposition.
'PCA', 'ICA', 'FA' or 'SPCA' are currently supported.
The method to estimate the shared component in noise across voxels.
This only takes effect if auto_nuisance is True.
baseline_single: boolean.
A time course of constant 1 will be included to the nuisance
regressor regardless of whether the user requests.
If baseline_single is set to False, one such regressor is included
for each fMRI run, but a single component in beta0\\_ will be
computed as the average of the weight maps corresponding to
these regressors. This might cause underestimation of noise variance.
If baseline_single is True, only one regressor of constant 1 will be
used for the whole dataset. This might be desirable if you
believe the average image intensity might not scale with the
same proportion for different voxels across scan. In other words,
it is possible that some part of the brain is more vulnerable to
change in baseline intensity due to facts such as
field inhomogeneity. Setting baseline_single to True will force the
nuisance regressors automatically estimated from residuals to
capture this. However, when each task condition only occurs in one
run and when the design matrix in each run sums together close to
a flat line, this option can cause the estimated similarity to be
extremely high between conditions occuring in the same run.
GP_space: boolean.
Whether to impose a Gaussion Process (GP) prior on the log(pseudo-SNR).
If true, the GP has a kernel defined over spatial coordinate
of each voxel. The idea behind this option is that
adjacent voxels should have similar SNRs.
This is relatively slow for big ROI. We find that when SNR
is generally low, smoothness can be overestimated.
But such regularization may reduce variance in the estimated
SNR map and similarity matrix.
GP_inten: boolean.
Whether to include a kernel defined over the intensity of image.
GP_space should be True as well if you want to use this,
because the smoothness should be primarily in space.
Smoothness in intensity is just complementary. The idea
behind this option is that voxels should have similar
SNRs when they are both adjacent (imposed by GP_space)
and are of the same tissue type (when their image intensities
are close). If you accept the second assumption, then
you can set GP_inten as True and provide an array to the `inten`
variable, expressing the intensities (brightness) for each voxel.
space_smooth_range: float.
The distance (in unit the same as what
you would use when supplying the spatial coordiates of
each voxel, typically millimeter) which you believe is
the maximum range of the length scale parameter of
Gaussian Process defined over voxel location. This is
used to impose a half-Cauchy prior on the length scale.
If set to None, the program will default to half of the
maximum distance between all voxels.
inten_smooth_range: float.
The difference in image intensity which
you believe is the maximum range of plausible length
scale for the Gaussian Process defined over image
intensity. Length scales larger than this are allowed,
but will be penalized. If set to None, this parameter
will default to half of the maximal intensity difference.
tau_range: float.
The reasonable range of the standard deviation
of log(SNR). This range should not be too
large. 5 is a loose range.
When a Gaussian Process is imposed on the log(SNR),
this parameter is used in a half-Cauchy prior
on the standard deviation, or an inverse-Gamma prior
on the variance of the GP.
tau2_prior: Callable[[float, int, float]], [float, float]],
Default: prior_GP_var_inv_gamma.
Can be prior_GP_var_inv_gamma or prior_GP_var_half_cauchy,
or a custom function.
The function which impose a prior for tau^2, the variance of the
GP prior on log(SNR), and returns the MAP estimate of tau^2.
It can be either prior_GP_var_inv_gamma for inverse-Gamma
or prior_GP_var_half_cauchy for half-Cauchy.
half-Cauchy prior is in fact imposed on tau.
But tau_range describes the range of tau in the prior in both cases.
Both functions are part of brsa module.
See also `.prior_GP_var_inv_gamma` and
`.prior_GP_var_half_cauchy`
To use the default inverse-Gamma prior, you can ignore this argument::
from brainiak.reprsimil.brsa import BRSA
brsa = BRSA()
If you want to try the alternative half-Cauchy prior,
then you need to import it in addition to BRSA::
from brainiak.reprsimil.brsa import BRSA, prior_GP_var_half_cauchy
brsa = BRSA(tau2_prior=prior_GP_var_half_cauchy)
eta: float.
A small number added to the diagonal element of the
covariance matrix in the Gaussian Process prior. This is
to ensure that the matrix is invertible.
init_iter: int.
How many initial iterations to fit the model
without introducing the GP prior before fitting with it,
if GP_space or GP_inten is requested. This initial
fitting is to give the parameters a good starting point.
optimizer: str or callable.
The optimizer to use for minimizing cost function which
scipy.optimize.minimize can accept.
We use 'L-BFGS-B' as a default. Users can try other strings
corresponding to optimizer provided by scipy.optimize.minimize,
or a custom optimizer, such as 'BFGS' or 'CG'.
Note that BRSA fits a lot of parameters. So a chosen optimizer
should accept gradient (Jacobian) of the cost function. Otherwise
the fitting is likely to be unbarely slow. We do not calculate
Hessian of the objective function. So an optimizer which requires
Hessian cannot be used.
random_state : RandomState or an int seed.
A random number generator instance to define the state of
the random permutations generator whenever the module
needs to generate random number (e.g., initial parameter
of the Cholesky factor).
anneal_speed: float.
Annealing is introduced in fitting of the Cholesky
decomposition of the shared covariance matrix. The amount
of perturbation decays exponentially. This parameter sets
the ratio of the maximum number of iteration to the
time constant of the exponential.
anneal_speed=10 means by n_iter/10 iterations,
the amount of perturbation is reduced by 2.713 times.
minimize_options: dictionary.
Default: {'gtol': 1e-4, 'disp': False, 'maxiter': 6}
This is the dictionary passed as the options argument to
scipy.optimize.minize which minimizes the cost function during
fitting. Notice that the minimization is performed for many times,
alternating between optimizing the covariance matrix U underlying
the pattern similarity matrix, and SNR. At most n_iter times
of this alternation is performed. So within each step of fitting,
the step of iteration performed by scipy.optimize.minize does not
have to be very large. In other words, scipy.optimize.minize does
not need to converge within each step of the alternating fitting
procedure.
tol: float.
Tolerance parameter passed to scipy.optimize.minimize. It is also
used for determining convergence of the alternating fitting
procedure.
Attributes
----------
U_ : numpy array, shape=[condition,condition].
The shared covariance matrix.
L_ : numpy array, shape=[condition,rank].
The Cholesky factor of the shared covariance matrix
(lower-triangular matrix).
C_: numpy array, shape=[condition,condition].
The correlation matrix derived from the shared covariance matrix.
This is the estimated similarity matrix between neural patterns
to your task conditions. Notice that it is recommended that
you also check U\\_, which is the covariance matrix underlying
this correlation matrix. In cases there is almost no response
to your task conditions, the diagonal values of U\\_ would become
very small and C\\_ might contain many correlation coefficients
close to 1 or -1. This might not reflect true strong correlation
or strong negative correlation, but a result of lack of
task-related neural activity, design matrix that does not match
true neural response, or not enough data.
It is also recommended to check nSNR\\_ after mapping it back to
the brain. A "reasonable" map should at least have higher values
in gray matter in than white matter.
nSNR_ : numpy array, shape=[voxels,].
The normalized pseuso-SNR of all voxels.
They are normalized such that the geometric mean is 1.
Note that this attribute can not be interpreted as true SNR,
but the relative ratios between voxel indicates the contribution
of each voxel to the representational similarity structure.
sigma_ : numpy array, shape=[voxels,].
The estimated standard deviation of the noise in each voxel
Assuming AR(1) model, this means the standard deviation
of the innovation noise.
rho_ : numpy array, shape=[voxels,].
The estimated autoregressive coefficient of each voxel
bGP_ : float, only if GP_space or GP_inten is True.
The standard deviation of the GP prior
lGPspace_ : float, only if GP_space or GP_inten is True
The length scale of Gaussian Process prior of log(SNR)
lGPinten_: float, only if GP_inten is True
The length scale in fMRI intensity of the GP prior of log(SNR)
beta_: array, shape=[conditions, voxels]
The maximum a posterior estimation of the response amplitudes
of each voxel to each task condition.
beta0_: numpy array, shape=[n_nureg + n_base, voxels]
The loading weights of each voxel for the shared time courses
not captured by the design matrix. This helps capture the
structure of spatial covariance of task-unrelated signal.
n_base is the number of columns of the user-supplied nuisance
regressors plus one for DC component
X0_: numpy array, shape=[time_points, n_nureg + n_base]
The estimated time course that is shared across voxels but
unrelated to the events of interest (design matrix).
beta0_null_: numpy array, shape=[n_nureg + n_base, voxels]
The equivalent of beta0\\_ in a null model which does not
include the design matrix and response pattern beta.
X0_null_: numpy array, shape=[time_points, n_nureg + n_base]
The equivalent of X0\\_ in a null model which does not
include the design matrix and response pattern beta
n_nureg_: int
Number of nuisance regressor in addition to such
regressors provided by the user (if any), if auto_nuisance
is set to True. If n_nureg is set to 'opt',
this will be estimated from data. 'opt' will use M Gavish
and D Donoho's approximate estimation of optimal hard
threshold for singular values.
random_state_: `RandomState`
Random number generator initialized using random_state.
"""
def __init__(
self, n_iter=100, rank=None,
auto_nuisance=True, n_nureg=None, nureg_zscore=True,
nureg_method='PCA', baseline_single=False,
GP_space=False, GP_inten=False,
space_smooth_range=None, inten_smooth_range=None,
tau_range=5.0,
tau2_prior=prior_GP_var_inv_gamma,
eta=0.0001, init_iter=20, optimizer='L-BFGS-B',
random_state=None, anneal_speed=10, tol=1e-4,
minimize_options={'gtol': 1e-4, 'disp': False,
'maxiter': 6}):
self.n_iter = n_iter
self.rank = rank
self.GP_space = GP_space
self.GP_inten = GP_inten
self.tol = tol
self.auto_nuisance = auto_nuisance
self.n_nureg = n_nureg
self.nureg_zscore = nureg_zscore
if auto_nuisance:
assert (n_nureg is None) \
or (isinstance(n_nureg, int) and n_nureg > 0), \
'n_nureg should be a positive integer or None'\
' if auto_nuisance is True.'
if self.nureg_zscore:
self.preprocess_residual = lambda x: _zscore(x)
else:
self.preprocess_residual = lambda x: x
if nureg_method == 'FA':
self.nureg_method = lambda x: FactorAnalysis(n_components=x)
elif nureg_method == 'PCA':
self.nureg_method = lambda x: PCA(n_components=x, whiten=True)
elif nureg_method == 'SPCA':
self.nureg_method = lambda x: SparsePCA(n_components=x,
max_iter=20, tol=tol)
elif nureg_method == 'ICA':
self.nureg_method = lambda x: FastICA(n_components=x,
whiten=True)
else:
raise ValueError('nureg_method can only be FA, PCA, '
'SPCA(for sparse PCA) or ICA')
self.baseline_single = baseline_single
self.minimize_options = minimize_options
self.eta = eta
# This is a tiny ridge added to the Gaussian Process
# covariance matrix template to gaurantee that it is invertible.
# Mathematically it means we assume that this proportion of the
# variance is always independent between voxels for the log(SNR2).
self.space_smooth_range = space_smooth_range
self.inten_smooth_range = inten_smooth_range
# The kernel of the Gaussian Process is the product of a kernel
# defined on spatial coordinate and a kernel defined on
# image intensity.
self.tau_range = tau_range
self.tau2_prior = tau2_prior
self.init_iter = init_iter
# When imposing smoothness prior, fit the model without this
# prior for this number of iterations.
self.optimizer = optimizer
self.random_state = random_state
self.anneal_speed = anneal_speed
return
def fit(self, X, design, nuisance=None, scan_onsets=None, coords=None,
inten=None):
"""Compute the Bayesian RSA
Parameters
----------
X: numpy array, shape=[time_points, voxels]
If you have multiple scans of the same participants that you
want to analyze together, you should concatenate them along
the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: numpy array, shape=[time_points, conditions]
This is the design matrix. It should only include the hypothetic
response for task conditions. You should not include
regressors for a DC component or motion parameters, unless you
want to estimate their pattern similarity with response patterns
to your task conditions. If you want to model head motion,
you should include them in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension,
with every column for one condition across runs.
For example, if you have 3 runs of experiment of one participant,
with each run lasting 200 TR. And you have 4 conditions,
then design should be a 600 x 4 numpy array.
nuisance: optional, numpy array, shape=[time_points, nuisance_factors]
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors
and a constant baseline)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
Please do not include time course of constant baseline in nuisance.
scan_onsets: optional, numpy array, shape=[runs,]
This specifies the indices of X which correspond to the onset
of each scanning run. For example, if you have two experimental
runs of the same subject, each with 100 TRs, then scan_onsets
should be [0,100].
If you do not provide the argument, the program will
assume all data are from the same run.
The effect of them is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
coords: optional, numpy array, shape=[voxels,3]
This is the coordinate of each voxel,
used for implementing Gaussian Process prior.
inten: optional, numpy array, shape=[voxel,]
This is the average fMRI intensity in each voxel.
It should be calculated from your data without any preprocessing
such as z-scoring. Because it should reflect
whether a voxel is bright (grey matter) or dark (white matter).
A Gaussian Process kernel defined on both coordinate and intensity
imposes a smoothness prior on adjcent voxels
but with the same tissue type. The Gaussian Process
is experimental and has shown good performance on
some visual datasets.
"""
logger.info('Running Bayesian RSA')
self.random_state_ = check_random_state(self.random_state)
# setting random seed
logger.debug('RandState set to {}'.format(self.random_state_))
assert not self.GP_inten or (self.GP_inten and self.GP_space),\
'You must speficiy GP_space to True'\
'if you want to use GP_inten'
# Check input data
assert_all_finite(X)
assert X.ndim == 2, 'The data should be 2-dimensional ndarray'
assert np.all(np.std(X, axis=0) > 0),\
'The time courses of some voxels do not change at all.'\
' Please make sure all voxels are within the brain'
# check design matrix
assert_all_finite(design)
assert design.ndim == 2,\
'The design matrix should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(design) == design.shape[1], \
'Your design matrix has rank smaller than the number of'\
' columns. Some columns can be explained by linear '\
'combination of other columns. Please check your design matrix.'
assert np.size(design, axis=0) == np.size(X, axis=0),\
'Design matrix and data do not '\
'have the same number of time points.'
assert self.rank is None or self.rank <= design.shape[1],\
'Your design matrix has fewer columns than the rank you set'
# Check the nuisance regressors.
if nuisance is not None:
assert_all_finite(nuisance)
assert nuisance.ndim == 2,\
'The nuisance regressor should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(nuisance) == nuisance.shape[1], \
'The nuisance regressor has rank smaller than the number of'\
'columns. Some columns can be explained by linear '\
'combination of other columns. Please check your nuisance' \
'regressors.'
assert np.size(nuisance, axis=0) == np.size(X, axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
# check scan_onsets validity
assert scan_onsets is None or\
(np.max(scan_onsets) <= X.shape[0] and np.min(scan_onsets) >= 0),\
'Some scan onsets provided are out of the range of time points.'
# check the size of coords and inten
if self.GP_space:
logger.info('Fitting with Gaussian Process prior on log(SNR)')
assert coords is not None and coords.shape[0] == X.shape[1],\
'Spatial smoothness was requested by setting GP_space. '\
'But the voxel number of coords does not match that of '\
'data X, or voxel coordinates are not provided. '\
'Please make sure that coords is in the shape of '\
'[n_voxel x 3].'
assert coords.ndim == 2,\
'The coordinate matrix should be a 2-d array'
if self.GP_inten:
assert inten is not None and inten.shape[0] == X.shape[1],\
'The voxel number of intensity does not '\
'match that of data X, or intensity not provided.'
assert np.var(inten) > 0,\
'All voxels have the same intensity.'
if (not self.GP_space and coords is not None) or\
(not self.GP_inten and inten is not None):
logger.warning('Coordinates or image intensity provided'
' but GP_space or GP_inten is not set '
'to True. The coordinates or intensity are'
' ignored.')
# Estimate the number of necessary nuisance regressors
if self.auto_nuisance:
if self.n_nureg is None:
logger.info('number of nuisance regressors is determined '
'automatically.')
run_TRs, n_runs = self._run_TR_from_scan_onsets(
X.shape[0], scan_onsets)
ts_dc = self._gen_legendre(run_TRs, [0])
_, ts_base, _ = self._merge_DC_to_base(
ts_dc, nuisance, False)
ts_reg = np.concatenate((ts_base, design), axis=1)
beta_hat = np.linalg.lstsq(ts_reg, X, rcond=None)[0]
residuals = X - np.dot(ts_reg, beta_hat)
self.n_nureg_ = np.max(
[1, Ncomp_SVHT_MG_DLD_approx(residuals,
self.nureg_zscore)])
logger.info('Use {} nuisance regressors to model the spatial '
'correlation in noise.'.format(self.n_nureg_))
self.n_nureg_ = np.int32(self.n_nureg_)
else:
self.n_nureg_ = self.n_nureg
self.n_nureg_ = np.int32(self.n_nureg_)
# Run Bayesian RSA
# Note that we have a change of notation here. Within _fit_RSA_UV,
# design matrix is named X and data is named Y, to reflect the
# generative model that data Y is generated by mixing the response
# X to experiment conditions and other neural activity.
# However, in fit(), we keep the tradition of scikit-learn that
# X is the input data to fit and y, a reserved name not used, is
# the label to map to from X.
if not self.GP_space:
# If GP_space is not requested, then the model is fitted
# without imposing any Gaussian Process prior on log(SNR^2)
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, _, _, _,\
self.X0_ = self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
elif not self.GP_inten:
# If GP_space is requested, but GP_inten is not, a GP prior
# based on spatial locations of voxels will be imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, _, \
self.X0_ = self._fit_RSA_UV(
X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets, coords=coords)
else:
# If both self.GP_space and self.GP_inten are True,
# a GP prior based on both location and intensity is imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, self.lGPinten_, self.X0_ = \
self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets,
coords=coords, inten=inten)
self.C_ = utils.cov2corr(self.U_)
self.design_ = design.copy()
self._rho_design_, self._sigma2_design_ = \
self._est_AR1(self.design_, same_para=True)
self._rho_X0_, self._sigma2_X0_ = self._est_AR1(self.X0_)
# AR(1) parameters of the design matrix and nuisance regressors,
# which will be used in transform or score.
# Finally, we fit a null model with the same setting except
# that there is no response to X
self.beta0_null_, self.sigma_null_, self.rho_null_, \
self.X0_null_ = self._fit_null(Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
self._rho_X0_null_, self._sigma2_X0_null_ =\
self._est_AR1(self.X0_null_)
return self
def transform(self, X, y=None, scan_onsets=None):
""" Use the model to estimate the time course of response to
each condition (ts), and the time course unrelated to task
(ts0) which is spread across the brain.
This is equivalent to "decoding" the design matrix and
nuisance regressors from a new dataset different from the
training dataset on which fit() was applied. An AR(1) smooth
prior is imposed on the decoded ts and ts0 with the AR(1)
parameters learnt from the corresponding time courses in the
training data.
Notice: if you set the rank to be lower than the number of
experimental conditions (number of columns in the design
matrix), the recovered task-related activity will have
collinearity (the recovered time courses of some conditions
can be linearly explained by the recovered time courses
of other conditions).
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
y : not used (as it is unsupervised learning)
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ts : numpy arrays, shape = [time_points, condition]
The estimated response to the task conditions which have the
response amplitudes estimated during the fit step.
ts0: numpy array, shape = [time_points, n_nureg]
The estimated time course spread across the brain, with the
loading weights estimated during the fit step.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ts, ts0, log_p = self._transform(
Y=X, scan_onsets=scan_onsets, beta=self.beta_,
beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_,
rho_X=self._rho_design_, sigma2_X=self._sigma2_design_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
return ts, ts0
def score(self, X, design, scan_onsets=None):
""" Use the model and parameters estimated by fit function
from some data of a participant to evaluate the log
likelihood of some new data of the same participant.
Design matrix of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
to testing data and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : numpy array, shape=[time_points, conditions]
Design matrix expressing the hypothetical response of
the task conditions in data X.
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: float.
The log likelihood of the new data based on the model and its
parameters fit to the training data.
ll_null: float.
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ll = self._score(Y=X, design=design, beta=self.beta_,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
ll_null = self._score(Y=X, design=None, beta=None,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_,
sigma2_X0=self._sigma2_X0_)
return ll, ll_null
# The following 2 functions _D_gen and _F_gen generate templates used
# for constructing inverse of covariance matrix of AR(1) noise
# The inverse of covarian matrix is
# (I - rho1 * D + rho1**2 * F) / sigma**2. D is a matrix where all the
# elements adjacent to the diagonal are 1 and all others are 0. F is
# a matrix which is 1 on all diagonal elements except for in the first
# and last columns. We denote (I - rho1 * D + rho1**2 * F) with A.
# In the function calculating likelihood function,
# XTAX, YTAY_diag, YTAX all mean multiplying the inverse covariance matrix
# in between either the design matrix or the data.
# As one can see, even though rho1 and sigma2 might update as we keep
# fitting parameters, several terms stay unchanged and do not need to
# be re-calculated.
# For example, in X'AX = X'(I + rho1*D + rho1**2*F)X / sigma2,
# the products X'X, X'DX, X'FX, etc. can always be re-used if they
# are pre-calculated. Therefore, _D_gen and _F_gen constructs matrices
# D and F, and _prepare_data_* calculates these products that can be
# re-used. In principle, once parameters have been fitted for a
# dataset, they can be updated for new incoming data by adding the
# products X'X, X'DX, X'FX, X'Y etc. from new data to those from
# existing data, and refit the parameters starting from the ones
# fitted from existing data.
def _D_gen(self, TR):
if TR > 0:
return np.diag(np.ones(TR - 1), -1) \
+ np.diag(np.ones(TR - 1), 1)
else:
return np.empty([0, 0])
def _F_gen(self, TR):
if TR > 0:
F = np.eye(TR)
F[0, 0] = 0
F[TR - 1, TR - 1] = 0
return F
else:
return np.empty([0, 0])
def _run_TR_from_scan_onsets(self, n_T, scan_onsets=None):
if scan_onsets is None:
# assume that all data are acquired within the same scan.
n_run = 1
run_TRs = np.array([n_T], dtype=int)
else:
# Each value in the scan_onsets tells the index at which
# a new scan starts. For example, if n_T = 500, and
# scan_onsets = [0,100,200,400], this means that the time points
# of 0-99 are from the first scan, 100-199 are from the second,
# 200-399 are from the third and 400-499 are from the fourth
run_TRs = np.int32(np.diff(np.append(scan_onsets, n_T)))
run_TRs = np.delete(run_TRs, np.where(run_TRs == 0))
n_run = run_TRs.size
# delete run length of 0 in case of duplication in scan_onsets.
logger.info('I infer that the number of volumes'
' in each scan are: {}'.format(run_TRs))
return run_TRs, n_run
def _prepare_DF(self, n_T, scan_onsets=None):
""" Prepare the essential template matrices D and F for
pre-calculating some terms to be re-used.
The inverse covariance matrix of AR(1) noise is
sigma^-2 * (I - rho1*D + rho1**2 * F).
And we denote A = I - rho1*D + rho1**2 * F"""
run_TRs, n_run = self._run_TR_from_scan_onsets(n_T, scan_onsets)
D_ele = map(self._D_gen, run_TRs)
F_ele = map(self._F_gen, run_TRs)
D = scipy.linalg.block_diag(*D_ele)
F = scipy.linalg.block_diag(*F_ele)
# D and F above are templates for constructing
# the inverse of temporal covariance matrix of noise
return D, F, run_TRs, n_run
def _prepare_data_XY(self, X, Y, D, F):
"""Prepares different forms of products of design matrix X
and data Y, or between themselves.
These products are re-used a lot during fitting.
So we pre-calculate them. Because these are reused,
it is in principle possible to update the fitting
as new data come in, by just incrementally adding
the products of new data and their corresponding parts
of design matrix to these pre-calculated terms.
"""
XTY, XTDY, XTFY = self._make_templates(D, F, X, Y)
YTY_diag = np.sum(Y * Y, axis=0)
YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0)
YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0)
XTX, XTDX, XTFX = self._make_templates(D, F, X, X)
return XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \
XTDX, XTFX
def _gen_X_DC(self, run_TRs):
if self.baseline_single:
X_DC = np.ones((np.sum(run_TRs), 1))
else:
X_DC = scipy.linalg.block_diag(*map(np.ones, run_TRs)).T
return X_DC
def _gen_legendre(self, run_TRs, orders):
def reg(x):
return np.concatenate(
[scipy.special.legendre(o)(np.linspace(-1, 1, x))[None, :]
for o in orders], axis=0)
reg_poly = scipy.linalg.block_diag(
*map(reg, run_TRs)).T
return reg_poly
def _prepare_data_XYX0(self, X, Y, X_base, X_res, D, F, run_TRs,
no_DC=False):
"""Prepares different forms of products between design matrix X or
data Y or nuisance regressors X0.
These products are re-used a lot during fitting.
So we pre-calculate them.
no_DC means not inserting regressors for DC components
into nuisance regressor.
It will only take effect if X_base is not None.
"""
X_DC = self._gen_X_DC(run_TRs)
reg_sol = np.linalg.lstsq(X_DC, X, rcond=None)
if np.any(np.isclose(reg_sol[1], 0)):
raise ValueError('Your design matrix appears to have '
'included baseline time series.'
'Either remove them, or move them to'
' nuisance regressors.')
X_DC, X_base, idx_DC = self._merge_DC_to_base(X_DC, X_base,
no_DC)
if X_res is None:
X0 = X_base
else:
X0 = np.concatenate((X_base, X_res), axis=1)
n_X0 = X0.shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0)
XTX0, XTDX0, XTFX0 = self._make_templates(D, F, X, X0)
X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y)
return X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC
def _merge_DC_to_base(self, X_DC, X_base, no_DC):
""" Merge DC components X_DC to the baseline time series
X_base (By baseline, this means any fixed nuisance
regressors not updated during fitting, including DC
components and any nuisance regressors provided by
the user.
X_DC is always in the first few columns of X_base.
"""
if X_base is not None:
reg_sol = np.linalg.lstsq(X_DC, X_base, rcond=None)
if not no_DC:
if not np.any(np.isclose(reg_sol[1], 0)):
# No columns in X_base can be explained by the
# baseline regressors. So we insert them.
X_base = np.concatenate((X_DC, X_base), axis=1)
idx_DC = np.arange(0, X_DC.shape[1])
else:
logger.warning('Provided regressors for uninteresting '
'time series already include baseline. '
'No additional baseline is inserted.')
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
# If a set of regressors for non-interested signals is not
# provided, then we simply include one baseline for each run.
X_base = X_DC
idx_DC = np.arange(0, X_base.shape[1])
logger.info('You did not provide time series of no interest '
'such as DC component. Trivial regressors of'
' DC component are included for further modeling.'
' The final covariance matrix won''t '
'reflect these components.')
return X_DC, X_base, idx_DC
def _make_ar1_quad_form(self, XTX, XTDX, XTFX, rho1):
# Calculate the matrix X'AX = X'X - rho1 * X'DX + rho1^2 * X'FX
# Here, rho1 is the AR(1) coefficient. X is a matrix of time series
# with each row corresponding to a vector at one
# time point. The forms of matrices D and F are defined in _prepare_DF
# function. sigma^-2 * A would be the inverse of covariance matrix
# of AR(1) process (precision matrix) with rho1 as the AR coefficient
# and sigma^2 as the variance of independent noise at each time point.
return XTX - rho1 * XTDX + rho1**2 * XTFX
def _make_ar1_quad_form_grad(self, XTDX, XTFX, rho1):
# Calculate the derivative of the quadratic form X'AX with respect to
# AR1 coefficient rho1, given precalculated terms X'DX and X'FX,
# and rho1.
return - XTDX + 2 * rho1 * XTFX
def _make_templates(self, D, F, X, Y):
XTY = np.dot(X.T, Y)
XTDY = np.dot(np.dot(X.T, D), Y)
XTFY = np.dot(np.dot(X.T, F), Y)
return XTY, XTDY, XTFY
def _precompute_ar1_quad_forms(self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag,
YTFY_diag, XTX, XTDX, XTFX, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0, X0TY, X0TDY,
X0TFY, L, rho1, n_V, n_X0):
# Calculate the sandwich terms which put A between X, Y and X0
# These terms are used a lot in the likelihood. But in the _fitV
# step, they only need to be calculated once, since A is fixed.
# In _fitU step, they need to be calculated at each iteration,
# because rho1 changes.
XTAY = self._make_ar1_quad_form(XTY, XTDY, XTFY, rho1)
# dimension: feature*space
YTAY = self._make_ar1_quad_form(YTY_diag, YTDY_diag, YTFY_diag, rho1)
# dimension: space,
# A/sigma2 is the inverse of noise covariance matrix in each voxel.
# YTAY means Y'AY
XTAX = XTX[None, :, :] - rho1[:, None, None] \
* XTDX[None, :, :] \
+ rho1[:, None, None]**2 * XTFX[None, :, :]
# dimension: space*feature*feature
X0TAX0 = X0TX0[None, :, :] - rho1[:, None, None] \
* X0TDX0[None, :, :] \
+ rho1[:, None, None]**2 * X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
XTAX0 = XTX0[None, :, :] - rho1[:, None, None] \
* XTDX0[None, :, :] \
+ rho1[:, None, None]**2 * XTFX0[None, :, :]
# dimension: space*feature*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY, rho1)
# dimension: #baseline*space
X0TAX0_i = np.linalg.solve(X0TAX0, np.identity(n_X0)[None, :, :])
# dimension: space*#baseline*#baseline
XTAcorrX = XTAX
# dimension: space*feature*feature
XTAcorrY = XTAY
# dimension: feature*space
for i_v in range(n_V):
XTAcorrX[i_v, :, :] -= \
np.dot(np.dot(XTAX0[i_v, :, :], X0TAX0_i[i_v, :, :]),
XTAX0[i_v, :, :].T)
XTAcorrY[:, i_v] -= np.dot(np.dot(XTAX0[i_v, :, :],
X0TAX0_i[i_v, :, :]),
X0TAY[:, i_v])
XTAcorrXL = np.dot(XTAcorrX, L)
# dimension: space*feature*rank
LTXTAcorrXL = np.tensordot(XTAcorrXL, L, axes=(1, 0))
# dimension: rank*feature*rank
LTXTAcorrY = np.dot(L.T, XTAcorrY)
# dimension: rank*space
YTAcorrY = YTAY - np.sum(X0TAY * np.einsum('ijk,ki->ji',
X0TAX0_i, X0TAY), axis=0)
# dimension: space
return X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL
def _calc_LL(self, rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, X0TAX0, SNR2,
n_V, n_T, n_run, rank, n_X0):
# Calculate the log likelihood (excluding the GP prior of log(SNR))
# for both _loglike_AR1_diagV_fitU and _loglike_AR1_diagV_fitV,
# in addition to a few other terms.
LAMBDA_i = LTXTAcorrXL * SNR2[:, None, None] + np.eye(rank)
# dimension: space*rank*rank
LAMBDA = np.linalg.solve(LAMBDA_i, np.identity(rank)[None, :, :])
# dimension: space*rank*rank
# LAMBDA is essentially the inverse covariance matrix of the
# posterior probability of alpha, which bears the relation with
# beta by beta = L * alpha. L is the Cholesky factor of the
# shared covariance matrix U. Refer to the explanation below
# Equation 5 in the NIPS paper.
YTAcorrXL_LAMBDA = np.einsum('ji,ijk->ik', LTXTAcorrY, LAMBDA)
# dimension: space*rank
sigma2 = (YTAcorrY - np.sum(LTXTAcorrY * YTAcorrXL_LAMBDA.T, axis=0)
* SNR2) / (n_T - n_X0)
# dimension: space
LL = - np.sum(np.log(sigma2)) * (n_T - n_X0) * 0.5 \
+ np.sum(np.log(1 - rho1**2)) * n_run * 0.5 \
- np.sum(self._half_log_det(X0TAX0)) \
- np.sum(self._half_log_det(LAMBDA_i)) \
- (n_T - n_X0) * n_V * (1 + np.log(2 * np.pi)) * 0.5
# Log likelihood
return LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2
def _calc_dist2_GP(self, coords=None, inten=None,
GP_space=False, GP_inten=False):
# calculate the square of difference between each voxel's location
# coorinates and image intensity.
if GP_space:
assert coords is not None, 'coordinate is not provided'
# square of spatial distance between every two voxels
dist2 = spdist.squareform(spdist.pdist(coords, 'sqeuclidean'))
# set the hyperparameter for the GP process:
if self.space_smooth_range is None:
space_smooth_range = np.max(dist2)**0.5 / 2.0
# By default, we assume the length scale should be
# within half the size of ROI.
else:
space_smooth_range = self.space_smooth_range
if GP_inten:
assert inten is not None, 'intensity is not provided'
# squre of difference between intensities of
# # every two voxels
inten_diff2 = spdist.squareform(
spdist.pdist(inten[:, None], 'sqeuclidean'))
# set the hyperparameter for the GP process:
if self.inten_smooth_range is None:
inten_smooth_range = np.max(inten_diff2)**0.5 / 2.0
# By default, we assume the length scale should be
# within half the maximum difference of intensity.
else:
inten_smooth_range = self.inten_smooth_range
n_smooth = 2
else:
inten_diff2 = None
inten_smooth_range = None
n_smooth = 1
else:
n_smooth = 0
dist2 = None
inten_diff2 = None
GP_inten = False
space_smooth_range = None
inten_smooth_range = None
return dist2, inten_diff2, space_smooth_range, inten_smooth_range,\
n_smooth
def _build_index_param(self, n_l, n_V, n_smooth):
""" Build dictionaries to retrieve each parameter
from the combined parameters.
"""
idx_param_sing = {'Cholesky': np.arange(n_l), 'a1': n_l}
# for simplified fitting
idx_param_fitU = {'Cholesky': np.arange(n_l),
'a1': np.arange(n_l, n_l + n_V)}
# for the likelihood function when we fit U (the shared covariance).
idx_param_fitV = {'log_SNR2': np.arange(n_V - 1),
'c_space': n_V - 1, 'c_inten': n_V,
'c_both': np.arange(n_V - 1, n_V - 1 + n_smooth)}
# for the likelihood function when we fit V (reflected by SNR of
# each voxel)
return idx_param_sing, idx_param_fitU, idx_param_fitV
def _half_log_det(self, M):
""" Return log(|M|)*0.5. For positive definite matrix M
of more than 2 dimensions, calculate this for the
last two dimension and return a value corresponding
to each element in the first few dimensions.
"""
chol = np.linalg.cholesky(M)
if M.ndim == 2:
return np.sum(np.log(np.abs(np.diag(chol))))
else:
return np.sum(np.log(np.abs(np.diagonal(
chol, axis1=-2, axis2=-1))), axis=-1)
def _chol_idx(self, n_C, rank):
l_idx = np.tril_indices(n_C)
if rank is not None:
# The rank of covariance matrix is specified
idx_rank = np.where(l_idx[1] < rank)
l_idx = (l_idx[0][idx_rank], l_idx[1][idx_rank])
logger.info('Using the rank specified by the user: '
'{}'.format(rank))
else:
rank = n_C
# if not specified, we assume you want to
# estimate a full rank matrix
logger.warning('Please be aware that you did not specify the'
' rank of covariance matrix to estimate.'
'I will assume that the covariance matrix '
'shared among voxels is of full rank.'
'Rank = {}'.format(rank))
logger.warning('Please be aware that estimating a matrix of '
'high rank can be very slow.'
'If you have a good reason to specify a rank '
'lower than the number of experiment conditions,'
' do so.')
return l_idx, rank
def _fit_RSA_UV(self, X, Y, X_base,
scan_onsets=None, coords=None, inten=None):
""" The major utility of fitting Bayesian RSA.
Note that there is a naming change of variable. X in fit()
is changed to Y here, and design in fit() is changed to X here.
This is because we follow the tradition that X expresses the
variable defined (controlled) by the experimenter, i.e., the
time course of experimental conditions convolved by an HRF,
and Y expresses data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
GP_inten = self.GP_inten
GP_space = self.GP_space
rank = self.rank
n_V = np.size(Y, axis=1)
n_T = np.size(Y, axis=0)
n_C = np.size(X, axis=1)
l_idx, rank = self._chol_idx(n_C, rank)
n_l = np.size(l_idx[0]) # the number of parameters for L
t_start = time.time()
D, F, run_TRs, n_run = self._prepare_DF(
n_T, scan_onsets=scan_onsets)
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \
XTDX, XTFX = self._prepare_data_XY(X, Y, D, F)
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC = \
self._prepare_data_XYX0(
X, Y, X_base, None, D, F, run_TRs, no_DC=False)
# Prepare the data for fitting. These pre-calculated matrices
# will be re-used a lot in evaluating likelihood function and
# gradient.
# DC component will be added to the nuisance regressors.
# In later steps, we do not need to add DC components again
dist2, inten_diff2, space_smooth_range, inten_smooth_range,\
n_smooth = self._calc_dist2_GP(
coords=coords, inten=inten,
GP_space=GP_space, GP_inten=GP_inten)
# Calculating the distance between voxel locations and betweeen
# voxel intensities. These are used if a Gaussian Process prior
# is requested to regularize log(SNR^2)
idx_param_sing, idx_param_fitU, idx_param_fitV = \
self._build_index_param(n_l, n_V, n_smooth)
# Indexes to find each parameter in a combined parameter vector.
current_GP = np.zeros(n_smooth)
# We will perform the fitting in 2~3 steps:
# (1) A preliminary fitting assuming all voxels share
# exactly the same temporal covariance matrix for their noise.
# SNR is assumed to be 1 for all voxels in this fitting.
# Therefore, there are only n_l+2 free parameters.
# (2) (optional) A fitting which allows each voxel to have their
# own pseudo-SNR and AR(1) coefficients. But no Gaussian Process
# prior is imposed on log(SNR). This step is neglected if GP
# prior is not requested. This step allows the SNR parameters to
# move closer to their correct values before GP is introduced.
# This step alternately fits the shared covariance and voxel-
# specific variance. It fits for init_iter steps and the
# tolerance is also increased by a factor of 5 to speed up
# fitting.
# (3) Final fitting. If GP prior is requested, it will be
# introduced in this step. Otherwise, just fit as the previous
# step, but using un-altered tolerance setting, and n_iter
# as the number of iteration.
# Step 1 fitting, with a simplified model
current_vec_U_chlsk_l, current_a1, current_logSigma2 = \
self._initial_fit_singpara(
XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
X, Y, X0, idx_param_sing,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank)
current_logSNR2 = -current_logSigma2
norm_factor = np.mean(current_logSNR2)
current_logSNR2 = current_logSNR2 - norm_factor
X_res = None
# Step 2 fitting, which only happens if
# GP prior is requested
if GP_space:
current_vec_U_chlsk_l, current_a1, current_logSNR2, X_res\
= self._fit_diagV_noGP(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2,
idx_param_fitU, idx_param_fitV,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank)
current_GP[0] = np.log(np.min(
dist2[np.tril_indices_from(dist2, k=-1)]))
# We start fitting the model with GP prior with a small
# length scale: the size of voxels.
# Alternatively, initialize with a large distance.
# Further testing of initial parameters need to be done.
# current_GP[0] = np.log(np.max(dist2)/4.0)
logger.debug('current GP[0]:{}'.format(current_GP[0]))
if GP_inten:
current_GP[1] = np.log(np.maximum(
np.percentile(inten_diff2[np.tril_indices_from(
inten_diff2, k=-1)], 2), 0.5))
logger.debug(
'current GP[1]:{}'.format(current_GP[1]))
# We start the length scale for intensity with
# a small value. A heuristic is 2 percentile of
# all the square differences. But it should not be
# smaller than 0.5. This limit is set in case
# many voxels have close to equal intensities,
# which might render 2 percentile to 0.
# Step 3 fitting. GP prior is imposed if requested.
# In this step, unless auto_nuisance is set to False, X_res
# will be re-estimated from the residuals after each step
# of fitting. And X0 will be concatenation of X_base and X_res
logger.debug('indexing:{}'.format(idx_param_fitV))
logger.debug('initial GP parameters:{}'.format(current_GP))
current_vec_U_chlsk_l, current_a1, current_logSNR2,\
current_GP, X_res = self._fit_diagV_GP(
XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2, current_GP, n_smooth,
idx_param_fitU, idx_param_fitV,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank,
GP_space, GP_inten, dist2, inten_diff2,
space_smooth_range, inten_smooth_range)
estU_chlsk_l_AR1_UV = np.zeros([n_C, rank])
estU_chlsk_l_AR1_UV[l_idx] = current_vec_U_chlsk_l
est_cov_AR1_UV = np.dot(estU_chlsk_l_AR1_UV, estU_chlsk_l_AR1_UV.T)
est_rho1_AR1_UV = 2 / np.pi * np.arctan(current_a1)
est_SNR_AR1_UV = np.exp(current_logSNR2 / 2.0)
# Calculating est_sigma_AR1_UV, est_sigma_AR1_UV,
# est_beta_AR1_UV and est_beta0_AR1_UV
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, _ \
= self._prepare_data_XYX0(
X, Y, X_base, X_res, D, F, run_TRs, no_DC=True)
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, LTXTAcorrY, XTAcorrXL, LTXTAcorrXL\
= self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0, X0TY,
X0TDY, X0TFY,
estU_chlsk_l_AR1_UV,
est_rho1_AR1_UV, n_V, n_X0)
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \
= self._calc_LL(est_rho1_AR1_UV, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, est_SNR_AR1_UV**2,
n_V, n_T, n_run, rank, n_X0)
est_sigma_AR1_UV = sigma2**0.5
est_beta_AR1_UV = est_SNR_AR1_UV**2 \
* np.dot(estU_chlsk_l_AR1_UV, YTAcorrXL_LAMBDA.T)
est_beta_AR1_UV_latent = \
est_SNR_AR1_UV**2 * YTAcorrXL_LAMBDA.T
# the latent term means that X*L multiplied by this term
# is the same as X*beta. This will be used for decoding
# and cross-validating, in case L is low-rank
est_beta0_AR1_UV = np.einsum(
'ijk,ki->ji', X0TAX0_i,
(X0TAY - np.einsum('ikj,ki->ji', XTAX0, est_beta_AR1_UV)))
# Now we want to collapse all beta0 corresponding to DC components
# of different runs to a single map, and preserve only one DC component
# across runs. This is because they should express the same component
# and the new data to transform do not necessarily have the same
# numbers of runs as the training data.
if idx_DC.size > 1:
collapsed_DC = np.sum(X0[:, idx_DC], axis=1)
X0 = np.insert(np.delete(X0, idx_DC, axis=1), 0,
collapsed_DC, axis=1)
collapsed_beta0 = np.mean(est_beta0_AR1_UV[idx_DC, :], axis=0)
est_beta0_AR1_UV = np.insert(
np.delete(est_beta0_AR1_UV, idx_DC, axis=0),
0, collapsed_beta0, axis=0)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
logger.debug('final GP parameters:{}'.format(current_GP))
if GP_space:
est_space_smooth_r = np.exp(current_GP[0] / 2.0)
if GP_inten:
est_intensity_kernel_r = np.exp(current_GP[1] / 2.0)
K_major = np.exp(- (dist2 / est_space_smooth_r**2 +
inten_diff2 / est_intensity_kernel_r**2)
/ 2.0)
else:
est_intensity_kernel_r = None
K_major = np.exp(- dist2 / est_space_smooth_r**2 / 2.0)
K = K_major + np.diag(np.ones(n_V) * self.eta)
invK_tilde_log_SNR = np.linalg.solve(K, current_logSNR2) / 2
log_SNR_invK_tilde_log_SNR = np.dot(current_logSNR2,
invK_tilde_log_SNR) / 2
tau2, _ = self.tau2_prior(log_SNR_invK_tilde_log_SNR, n_V,
self.tau_range)
est_std_log_SNR = tau2 ** 0.5
else:
est_space_smooth_r = None
est_intensity_kernel_r = None
est_std_log_SNR = None
return est_cov_AR1_UV, estU_chlsk_l_AR1_UV, est_SNR_AR1_UV, \
est_beta_AR1_UV, est_beta0_AR1_UV, est_beta_AR1_UV_latent,\
est_sigma_AR1_UV, est_rho1_AR1_UV, est_space_smooth_r, \
est_std_log_SNR, est_intensity_kernel_r, X0
def _transform(self, Y, scan_onsets, beta, beta0,
rho_e, sigma_e, rho_X, sigma2_X, rho_X0, sigma2_X0):
""" Given the data Y and the response amplitudes beta and beta0
estimated in the fit step, estimate the corresponding X and X0.
It is done by a forward-backward algorithm.
We assume X and X0 both are vector autoregressive (VAR)
processes, to capture temporal smoothness. Their VAR
parameters are estimated from training data at the fit stage.
"""
logger.info('Transforming new data.')
# Constructing the transition matrix and the variance of
# innovation noise as prior for the latent variable X and X0
# in new data.
n_C = beta.shape[0]
n_T = Y.shape[0]
weight = np.concatenate((beta, beta0), axis=0)
T_X = np.diag(np.concatenate((rho_X, rho_X0)))
Var_X = np.concatenate((sigma2_X / (1 - rho_X**2),
sigma2_X0 / (1 - rho_X0**2)))
Var_dX = np.concatenate((sigma2_X, sigma2_X0))
sigma2_e = sigma_e ** 2
scan_onsets = np.setdiff1d(scan_onsets, n_T)
n_scan = scan_onsets.size
X = [None] * scan_onsets.size
X0 = [None] * scan_onsets.size
total_log_p = 0
for scan, onset in enumerate(scan_onsets):
# Forward step
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
mu, mu_Gamma_inv, Gamma_inv, log_p_data, Lambda_0, \
Lambda_1, H, deltaY, deltaY_sigma2inv_rho_weightT = \
self._forward_step(Y[onset:offset, :],
T_X, Var_X, Var_dX, rho_e, sigma2_e,
weight)
total_log_p += log_p_data
# Backward step
mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat \
= self._backward_step(
deltaY, deltaY_sigma2inv_rho_weightT, sigma2_e,
weight, mu, mu_Gamma_inv, Gamma_inv,
Lambda_0, Lambda_1, H)
X[scan] = np.concatenate(
[mu_t[None, :n_C] for mu_t in mu_hat])
X0[scan] = np.concatenate(
[mu_t[None, n_C:] for mu_t in mu_hat])
X = np.concatenate(X)
X0 = np.concatenate(X0)
return X, X0, total_log_p
def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e,
rho_X0, sigma2_X0):
""" Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized.
"""
logger.info('Estimating cross-validated score for new data.')
n_T = Y.shape[0]
if design is not None:
Y = Y - np.dot(design, beta)
# The function works for both full model and null model.
# If design matrix is not provided, the whole data is
# used as input for _forward_step. If design matrix is provided,
# residual after subtracting design * beta is fed to _forward_step
T_X = np.diag(rho_X0)
Var_X = sigma2_X0 / (1 - rho_X0**2)
Var_dX = sigma2_X0
# Prior parmeters for X0: T_X is transitioning matrix, Var_X
# is the marginal variance of the first time point. Var_dX is the
# variance of the updating noise.
sigma2_e = sigma_e ** 2
# variance of voxel-specific updating noise component
scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int)
n_scan = scan_onsets.size
total_log_p = 0
for scan, onset in enumerate(scan_onsets):
# Forward step
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
_, _, _, log_p_data, _, _, _, _, _ = \
self._forward_step(
Y[onset:offset, :], T_X, Var_X, Var_dX, rho_e, sigma2_e,
beta0)
total_log_p += log_p_data
return total_log_p
def _est_AR1(self, x, same_para=False):
""" Estimate the AR(1) parameters of input x.
Each column of x is assumed as independent from other columns,
and each column is treated as an AR(1) process.
If same_para is set as True, then all columns of x
are concatenated and a single set of AR(1) parameters
is estimated. Strictly speaking the breaking point
between each concatenated column should be considered.
But for long time series, this is ignored.
"""
if same_para:
n_c = x.shape[1]
x = np.reshape(x, x.size, order='F')
rho, sigma2 = alg.AR_est_YW(x, 1)
# We concatenate all the design matrix to estimate common AR(1)
# parameters. This creates some bias because the end of one column
# and the beginning of the next column of the design matrix are
# treated as consecutive samples.
rho = np.ones(n_c) * rho
sigma2 = np.ones(n_c) * sigma2
else:
rho = np.zeros(np.shape(x)[1])
sigma2 = np.zeros(np.shape(x)[1])
for c in np.arange(np.shape(x)[1]):
rho[c], sigma2[c] = alg.AR_est_YW(x[:, c], 1)
return rho, sigma2
def _forward_step(self, Y, T_X, Var_X, Var_dX, rho_e, sigma2_e, weight):
""" forward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value.
"""
# We currently only implement diagonal form
# of covariance matrix for Var_X, Var_dX and T_X, which means
# each dimension of X is independent and their innovation noise
# are also independent. Note that log_p_data takes this assumption.
if Var_X.ndim == 1:
inv_Var_X = np.diag(1 / Var_X)
half_log_det_Var_X = np.sum(np.log(Var_X)) / 2.0
Var_X = np.diag(Var_X)
# the marginal variance of X
else:
half_log_det_Var_X = self._half_log_det(Var_X)
inv_Var_X = np.linalg.inv(Var_X)
if Var_dX.ndim == 1:
inv_Var_dX = np.diag(1 / Var_dX)
half_log_det_Var_dX = np.sum(np.log(Var_dX)) / 2.0
Var_dX = np.diag(Var_dX)
# the marginal variance of Delta X (the change of X from
# previous time point)
else:
inv_Var_dX = np.linalg.inv(Var_dX)
half_log_det_Var_dX = self._half_log_det(Var_dX)
if T_X.ndim == 1:
T_X = np.diag(T_X)
# Transfer function of X: the expected mean of X at t+1
# time point is T_x * X
[n_T, n_V] = np.shape(Y)
# numbers of time points and voxels
mu = [None] * n_T
# posterior mean of X, conditioned on all data up till the current
# time point
Gamma_inv = [None] * n_T
# inverse of poterior Gamma.
mu_Gamma_inv = [None] * n_T
# mu * inv(Gamma)
log_p_data = - np.log(np.pi * 2) * (n_T * n_V) / 2 \
- half_log_det_Var_X - np.sum(np.log(sigma2_e)) * n_T / 2.0\
+ np.sum(np.log(1 - rho_e**2)) / 2.0 - half_log_det_Var_dX \
* (n_T - 1)
# This is the term to be incremented by c_n at each time step.
# We first add all the fixed terms to it.
# The following are a few fixed terms.
Lambda_0 = np.dot(T_X, np.dot(inv_Var_dX, T_X.T)) \
+ np.dot(weight * rho_e**2 / sigma2_e, weight.T)
H = np.dot(inv_Var_dX, T_X.T) + np.dot(weight * rho_e / sigma2_e,
weight.T)
Lambda_1 = inv_Var_dX + np.dot(weight / sigma2_e, weight.T)
Gamma_inv[0] = inv_Var_X + np.dot(
weight * (1 - rho_e**2) / sigma2_e, weight.T)
# We might not need this and only use linalg.solve for related terms.
mu_Gamma_inv[0] = np.dot(
Y[0, :] * (1 - rho_e**2) / sigma2_e, weight.T)
mu[0] = np.linalg.solve(Gamma_inv[0], mu_Gamma_inv[0])
log_p_data -= 0.5 * np.sum(Y[0, :]**2 * (1 - rho_e**2) / sigma2_e)
# This is the term added for the first time point.
deltaY = Y[1:, :] - rho_e * Y[:-1, :]
deltaY_sigma2inv_rho_weightT = np.dot(
deltaY / sigma2_e * rho_e, weight.T)
for t in np.arange(1, n_T):
Gamma_tilde_inv = Lambda_0 + Gamma_inv[t - 1]
tmp = np.linalg.solve(Gamma_tilde_inv, H.T)
Gamma_inv[t] = Lambda_1 - np.dot(H, tmp)
mu_Gamma_inv[t] = np.dot(deltaY[t - 1, :] / sigma2_e, weight.T) \
+ np.dot(mu_Gamma_inv[t - 1]
- deltaY_sigma2inv_rho_weightT[t - 1, :], tmp)
mu[t] = np.linalg.solve(Gamma_inv[t], mu_Gamma_inv[t])
tmp2 = mu_Gamma_inv[t - 1] - deltaY_sigma2inv_rho_weightT[t - 1, :]
log_p_data += -self._half_log_det(Gamma_tilde_inv) \
+ np.dot(tmp2, np.linalg.solve(Gamma_tilde_inv, tmp2)) / 2.0
log_p_data += -self._half_log_det(Gamma_inv[-1]) \
+ np.dot(mu_Gamma_inv[-1], mu[-1]) / 2.0 \
- np.sum(deltaY**2 / sigma2_e) / 2.0
return mu, mu_Gamma_inv, Gamma_inv, log_p_data, Lambda_0, \
Lambda_1, H, deltaY, deltaY_sigma2inv_rho_weightT
def _backward_step(self, deltaY, deltaY_sigma2inv_rho_weightT,
sigma2_e, weight, mu, mu_Gamma_inv, Gamma_inv,
Lambda_0, Lambda_1, H):
""" backward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value.
"""
n_T = len(Gamma_inv)
# All the terms with hat before are parameters of posterior
# distributions of X conditioned on data from all time points,
# whereas the ones without hat calculated by _forward_step
# are mean and covariance of posterior of X conditioned on
# data up to the time point.
Gamma_inv_hat = [None] * n_T
mu_Gamma_inv_hat = [None] * n_T
mu_hat = [None] * n_T
mu_hat[-1] = mu[-1].copy()
mu_Gamma_inv_hat[-1] = mu_Gamma_inv[-1].copy()
Gamma_inv_hat[-1] = Gamma_inv[-1].copy()
for t in np.arange(n_T - 2, -1, -1):
tmp = np.linalg.solve(Gamma_inv_hat[t + 1] - Gamma_inv[t + 1]
+ Lambda_1, H)
Gamma_inv_hat[t] = Gamma_inv[t] + Lambda_0 - np.dot(H.T, tmp)
mu_Gamma_inv_hat[t] = mu_Gamma_inv[t] \
- deltaY_sigma2inv_rho_weightT[t, :] + np.dot(
mu_Gamma_inv_hat[t + 1] - mu_Gamma_inv[t + 1]
+ np.dot(deltaY[t, :] / sigma2_e, weight.T), tmp)
mu_hat[t] = np.linalg.solve(Gamma_inv_hat[t],
mu_Gamma_inv_hat[t])
return mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat
def _initial_fit_singpara(self, XTX, XTDX, XTFX,
YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
X, Y, X0, idx_param_sing, l_idx,
n_C, n_T, n_V, n_l, n_run, n_X0, rank):
""" Perform initial fitting of a simplified model, which assumes
that all voxels share exactly the same temporal covariance
matrix for their noise (the same noise variance and
auto-correlation). The SNR is implicitly assumed to be 1
for all voxels.
"""
logger.info('Initial fitting assuming single parameter of '
'noise for all voxels')
X_joint = np.concatenate((X0, X), axis=1)
beta_hat = np.linalg.lstsq(X_joint, Y, rcond=None)[0]
residual = Y - np.dot(X_joint, beta_hat)
# point estimates of betas and fitting residuals without assuming
# the Bayesian model underlying RSA.
# There are several possible ways of initializing the covariance.
# (1) start from the point estimation of covariance
cov_point_est = np.cov(beta_hat[n_X0:, :]) / np.var(residual)
current_vec_U_chlsk_l = \
np.linalg.cholesky((cov_point_est + np.eye(n_C)) / 2)[l_idx]
# We use the average of covariance of point estimation and an identity
# matrix as the initial value of the covariance matrix, just in case
# the user provides data in which n_V is smaller than n_C.
# (2) start from identity matrix
# current_vec_U_chlsk_l = np.eye(n_C)[l_idx]
# (3) random initialization
# current_vec_U_chlsk_l = self.random_state_.randn(n_l)
# vectorized version of L, Cholesky factor of U, the shared
# covariance matrix of betas across voxels.
rho1 = np.sum(
residual[0:-1, :] * residual[1:, :], axis=0) / \
np.sum(residual[0:-1, :] * residual[0:-1, :], axis=0)
# Estimate of auto correlation assuming data includes pure noise.
log_sigma2 = np.log(np.var(
residual[1:, :] - residual[0:-1, :] * rho1, axis=0))
# log of estimates of the variance of the "innovation" noise
# of AR(1) process at each time point.
param0 = np.empty(np.sum(np.fromiter(
(np.size(v) for v in idx_param_sing.values()), int)))
# Initial parameter
# Then we fill each part of the original guess of parameters
param0[idx_param_sing['Cholesky']] = current_vec_U_chlsk_l
param0[idx_param_sing['a1']] = np.median(np.tan(rho1 * np.pi / 2))
# Fit it.
res = scipy.optimize.minimize(
self._loglike_AR1_singpara, param0,
args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing, rank),
method=self.optimizer, jac=True, tol=self.tol,
options={'disp': self.minimize_options['disp'],
'maxiter': 100})
current_vec_U_chlsk_l = res.x[idx_param_sing['Cholesky']]
current_a1 = res.x[idx_param_sing['a1']] * np.ones(n_V)
# log(sigma^2) assuming the data include no signal is returned,
# as a starting point for the iteration in the next step.
# Although it should overestimate the variance,
# setting it this way might allow it to track log(sigma^2)
# more closely for each voxel.
return current_vec_U_chlsk_l, current_a1, log_sigma2
def _fit_diagV_noGP(
self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2,
idx_param_fitU, idx_param_fitV,
l_idx, n_C, n_T, n_V, n_l, n_run, n_X0, rank):
""" (optional) second step of fitting, full model but without
GP prior on log(SNR). This step is only done if GP prior
is requested.
"""
init_iter = self.init_iter
logger.info('second fitting without GP prior'
' for {} times'.format(init_iter))
# Initial parameters
param0_fitU = np.empty(np.sum(np.fromiter(
(np.size(v) for v in idx_param_fitU.values()), int)))
param0_fitV = np.empty(np.size(idx_param_fitV['log_SNR2']))
# We cannot use the same logic as the line above because
# idx_param_fitV also includes entries for GP parameters.
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l.copy()
param0_fitU[idx_param_fitU['a1']] = current_a1.copy()
param0_fitV[idx_param_fitV['log_SNR2']] = \
current_logSNR2[:-1].copy()
L = np.zeros((n_C, rank))
tol = self.tol * 5
for it in range(0, init_iter):
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, _ \
= self._prepare_data_XYX0(
X, Y, X_base, X_res, D, F, run_TRs, no_DC=True)
# fit U, the covariance matrix, together with AR(1) param
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l \
+ self.random_state_.randn(n_l) \
* np.linalg.norm(current_vec_U_chlsk_l) \
/ n_l**0.5 * np.exp(-it / init_iter * self.anneal_speed - 1)
param0_fitU[idx_param_fitU['a1']] = current_a1
res_fitU = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitU, param0_fitU,
args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
current_logSNR2, l_idx, n_C,
n_T, n_V, n_run, n_X0, idx_param_fitU, rank),
method=self.optimizer, jac=True, tol=tol,
options=self.minimize_options)
current_vec_U_chlsk_l = \
res_fitU.x[idx_param_fitU['Cholesky']]
current_a1 = res_fitU.x[idx_param_fitU['a1']]
norm_fitUchange = np.linalg.norm(res_fitU.x - param0_fitU)
logger.debug('norm of parameter change after fitting U: '
'{}'.format(norm_fitUchange))
param0_fitU = res_fitU.x.copy()
# fit V, reflected in the log(SNR^2) of each voxel
rho1 = np.arctan(current_a1) * 2 / np.pi
L[l_idx] = current_vec_U_chlsk_l
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, \
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L, rho1, n_V, n_X0)
res_fitV = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitV, param0_fitV,
args=(X0TAX0, XTAX0, X0TAY,
X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY,
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL,
current_vec_U_chlsk_l,
current_a1, l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, rank,
False, False),
method=self.optimizer, jac=True, tol=tol,
options=self.minimize_options)
current_logSNR2[0:n_V - 1] = res_fitV.x
current_logSNR2[-1] = - np.sum(current_logSNR2[0:n_V - 1])
norm_fitVchange = np.linalg.norm(res_fitV.x - param0_fitV)
logger.debug('norm of parameter change after fitting V: '
'{}'.format(norm_fitVchange))
logger.debug('E[log(SNR2)^2]: {}'.format(
np.mean(current_logSNR2**2)))
# The lines below are for debugging purpose.
# If any voxel's log(SNR^2) gets to non-finite number,
# something might be wrong -- could be that the data has
# nothing to do with the design matrix.
if np.any(np.logical_not(np.isfinite(current_logSNR2))):
logger.warning('Initial fitting: iteration {}'.format(it))
logger.warning('current log(SNR^2): '
'{}'.format(current_logSNR2))
logger.warning('log(sigma^2) has non-finite number')
param0_fitV = res_fitV.x.copy()
# Re-estimating X_res from residuals
current_SNR2 = np.exp(current_logSNR2)
if self.auto_nuisance:
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, current_sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, current_SNR2,
n_V, n_T, n_run, rank, n_X0)
betas = current_SNR2 * np.dot(L, YTAcorrXL_LAMBDA.T)
beta0s = np.einsum(
'ijk,ki->ji', X0TAX0_i,
(X0TAY - np.einsum('ikj,ki->ji', XTAX0, betas)))
residuals = Y - np.dot(X, betas) - np.dot(
X_base, beta0s[:np.shape(X_base)[1], :])
X_res = self.nureg_method(
self.n_nureg_).fit_transform(
self.preprocess_residual(residuals))
if norm_fitVchange / np.sqrt(param0_fitV.size) < tol \
and norm_fitUchange / np.sqrt(param0_fitU.size) \
< tol:
break
return current_vec_U_chlsk_l, current_a1, current_logSNR2, X_res
def _fit_diagV_GP(
self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X, Y, X_base, X_res, D, F, run_TRs,
current_vec_U_chlsk_l,
current_a1, current_logSNR2, current_GP, n_smooth,
idx_param_fitU, idx_param_fitV, l_idx,
n_C, n_T, n_V, n_l, n_run, n_X0, rank, GP_space, GP_inten,
dist2, inten_diff2, space_smooth_range, inten_smooth_range):
""" Last step of fitting. If GP is not requested, this step will
still be done, just without GP prior on log(SNR).
"""
tol = self.tol
n_iter = self.n_iter
logger.info('Last step of fitting.'
' for maximum {} times'.format(n_iter))
# Initial parameters
param0_fitU = np.empty(np.sum(np.fromiter(
(np.size(v) for v in idx_param_fitU.values()), int)))
param0_fitV = np.empty(np.size(idx_param_fitV['log_SNR2'])
+ np.size(idx_param_fitV['c_both']))
# We cannot use the same logic as the line above because
# idx_param_fitV also includes entries for GP parameters.
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l.copy()
param0_fitU[idx_param_fitU['a1']] = current_a1.copy()
param0_fitV[idx_param_fitV['log_SNR2']] = \
current_logSNR2[:-1].copy()
L = np.zeros((n_C, rank))
L[l_idx] = current_vec_U_chlsk_l
if self.GP_space:
param0_fitV[idx_param_fitV['c_both']] = current_GP.copy()
for it in range(0, n_iter):
X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, _ = \
self._prepare_data_XYX0(
X, Y, X_base, X_res, D, F, run_TRs, no_DC=True)
# fit U
param0_fitU[idx_param_fitU['Cholesky']] = \
current_vec_U_chlsk_l \
+ self.random_state_.randn(n_l) \
* np.linalg.norm(current_vec_U_chlsk_l) \
/ n_l**0.5 * np.exp(-it / n_iter * self.anneal_speed - 1)
param0_fitU[idx_param_fitU['a1']] = current_a1
res_fitU = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitU, param0_fitU,
args=(XTX, XTDX, XTFX, YTY_diag, YTDY_diag, YTFY_diag,
XTY, XTDY, XTFY, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
current_logSNR2, l_idx, n_C, n_T, n_V,
n_run, n_X0, idx_param_fitU, rank),
method=self.optimizer, jac=True,
tol=tol,
options=self.minimize_options)
current_vec_U_chlsk_l = \
res_fitU.x[idx_param_fitU['Cholesky']]
current_a1 = res_fitU.x[idx_param_fitU['a1']]
L[l_idx] = current_vec_U_chlsk_l
fitUchange = res_fitU.x - param0_fitU
norm_fitUchange = np.linalg.norm(fitUchange)
logger.debug('norm of parameter change after fitting U: '
'{}'.format(norm_fitUchange))
param0_fitU = res_fitU.x.copy()
# fit V
rho1 = np.arctan(current_a1) * 2 / np.pi
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, \
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L, rho1, n_V, n_X0)
res_fitV = scipy.optimize.minimize(
self._loglike_AR1_diagV_fitV, param0_fitV, args=(
X0TAX0, XTAX0, X0TAY, X0TAX0_i,
XTAcorrX, XTAcorrY, YTAcorrY,
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL,
current_vec_U_chlsk_l, current_a1,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_fitV, rank,
GP_space, GP_inten, dist2, inten_diff2,
space_smooth_range, inten_smooth_range),
method=self.optimizer, jac=True,
tol=tol,
options=self.minimize_options)
current_logSNR2[0:n_V - 1] = \
res_fitV.x[idx_param_fitV['log_SNR2']]
current_logSNR2[n_V - 1] = -np.sum(current_logSNR2[0:n_V - 1])
current_GP = res_fitV.x[idx_param_fitV['c_both']]
fitVchange = res_fitV.x - param0_fitV
norm_fitVchange = np.linalg.norm(fitVchange)
param0_fitV = res_fitV.x.copy()
logger.debug('norm of parameter change after fitting V: '
'{}'.format(norm_fitVchange))
logger.debug('E[log(SNR2)^2]: {}'.format(
np.mean(current_logSNR2**2)))
# Re-estimating X_res from residuals
current_SNR2 = np.exp(current_logSNR2)
if self.auto_nuisance:
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, current_sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, current_SNR2,
n_V, n_T, n_run, rank, n_X0)
betas = current_SNR2 \
* np.dot(L, YTAcorrXL_LAMBDA.T)
beta0s = np.einsum(
'ijk,ki->ji', X0TAX0_i,
(X0TAY - np.einsum('ikj,ki->ji', XTAX0, betas)))
residuals = Y - np.dot(X, betas) - np.dot(
X_base, beta0s[:np.shape(X_base)[1], :])
X_res = self.nureg_method(self.n_nureg_).fit_transform(
self.preprocess_residual(residuals))
if GP_space:
logger.debug('current GP[0]: {}'.format(current_GP[0]))
logger.debug('gradient for GP[0]: {}'.format(
res_fitV.jac[idx_param_fitV['c_space']]))
if GP_inten:
logger.debug('current GP[1]: {}'.format(current_GP[1]))
logger.debug('gradient for GP[1]: {}'.format(
res_fitV.jac[idx_param_fitV['c_inten']]))
if np.max(np.abs(fitVchange)) < tol and \
np.max(np.abs(fitUchange)) < tol:
break
return current_vec_U_chlsk_l, current_a1, current_logSNR2,\
current_GP, X_res
def _fit_null(self, Y, X_base, scan_onsets=None):
""" Fit a null model.
"""
n_V = np.size(Y, axis=1)
n_T = np.size(Y, axis=0)
t_start = time.time()
D, F, run_TRs, n_run = self._prepare_DF(
n_T, scan_onsets=scan_onsets)
YTY_diag = np.sum(Y * Y, axis=0)
YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0)
YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0)
tol = self.tol
n_iter = self.n_iter
logger.info('Fitting null model'
' for maximum {} times'.format(n_iter))
# Add DC components capturing run-specific baselines.
X_DC = self._gen_X_DC(run_TRs)
X_DC, X_base, idx_DC = self._merge_DC_to_base(
X_DC, X_base, no_DC=False)
X_res = None
param0 = np.zeros(n_V)
for it in range(0, n_iter):
if X_res is None:
X0 = X_base
else:
X0 = np.concatenate((X_base, X_res), axis=1)
n_X0 = X0.shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0)
X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y)
res_null = scipy.optimize.minimize(
self._loglike_AR1_null, param0, args=(
YTY_diag, YTDY_diag, YTFY_diag,
X0TX0, X0TDX0, X0TFX0, X0TY, X0TDY, X0TFY,
n_T, n_V, n_run, n_X0),
method=self.optimizer, jac=True, tol=tol,
options=self.minimize_options)
param_change = res_null.x - param0
param0 = res_null.x.copy()
est_rho1_AR1_null = 2.0 / np.pi * np.arctan(param0)
if self.auto_nuisance:
X0TAX0 = X0TX0[None, :, :] \
- est_rho1_AR1_null[:, None, None] \
* X0TDX0[None, :, :] \
+ est_rho1_AR1_null[:, None, None]**2 \
* X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY,
est_rho1_AR1_null)
# dimension: #baseline*space
beta0s = np.linalg.solve(X0TAX0, X0TAY.T).T
residuals = Y - np.dot(X_base, beta0s[:np.shape(X_base)[1], :])
X_res = self.nureg_method(self.n_nureg_).fit_transform(
self.preprocess_residual(residuals))
if np.max(np.abs(param_change)) < self.tol:
logger.info('The change of parameters is smaller than '
'the tolerance value {}. Fitting is finished '
'after {} iterations'.format(self.tol, it + 1))
break
X0TAX0 = X0TX0[None, :, :] \
- est_rho1_AR1_null[:, None, None] \
* X0TDX0[None, :, :] \
+ est_rho1_AR1_null[:, None, None]**2 \
* X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY,
est_rho1_AR1_null)
# dimension: #baseline*space
est_beta0_AR1_null = np.linalg.solve(X0TAX0, X0TAY.T).T
YTAY = self._make_ar1_quad_form(YTY_diag, YTDY_diag, YTFY_diag,
est_rho1_AR1_null)
# dimension: space,
YTAcorrY = YTAY - np.sum(X0TAY * est_beta0_AR1_null, axis=0)
# dimension: space,
est_sigma_AR1_null = (YTAcorrY / (n_T - n_X0)) ** 0.5
if idx_DC.size > 1:
collapsed_DC = np.sum(X0[:, idx_DC], axis=1)
X0 = np.insert(np.delete(X0, idx_DC, axis=1), 0,
collapsed_DC, axis=1)
collapsed_beta0 = np.mean(est_beta0_AR1_null[idx_DC, :], axis=0)
est_beta0_AR1_null = np.insert(
np.delete(est_beta0_AR1_null, idx_DC, axis=0),
0, collapsed_beta0, axis=0)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
return est_beta0_AR1_null, est_sigma_AR1_null, est_rho1_AR1_null, X0
# We fit two parts of the parameters iteratively.
# The following are the corresponding negative log likelihood functions.
def _loglike_AR1_diagV_fitU(self, param, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY, XTFY,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
log_SNR2, l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_fitU, rank):
# This function calculates the log likelihood of data given cholesky
# decomposition of U and AR(1) parameters of noise as free parameters.
# Free parameters are in param.
# The log of the square of signal to noise level in each voxel
# (the ratio of the diagonal elements in V and
# the noise variance) are fixed. This likelihood is iteratively
# optimized with the one with suffix _fitV.
#
# The meaing of U and V follow this wiki page of matrix normal
# distribution:
# https://en.wikipedia.org/wiki/Matrix_normal_distribution
#
# We assume betas of all voxels as a matrix follow this distribution.
# U describe the covariance between conditions. V describe the
# covariance between voxels.
#
# In this version, we assume that beta is independent between voxels
# and noise is also independent.
# By the assumption that noise is independent, we only need to pass
# the products X'X, X'Y and Y'Y, instead of X and Y
# Y'Y is passed in the form of its diagonal elements.
# DiagV means we assume that the variance of beta can be different
# between voxels. This means that V is a diagonal matrix instead of
# an identity matrix. The parameter includes the lower triangular
# part of the cholesky decomposition
# of U (flattened), then tan(rho1*pi/2) where rho1 is
# each voxel's autoregressive coefficient (assumging AR(1) model).
# Such parametrization avoids the need of boundaries
# for parameters.
L = np.zeros([n_C, rank])
# lower triagular matrix L, cholesky decomposition of U
L[l_idx] = param[idx_param_fitU['Cholesky']]
a1 = param[idx_param_fitU['a1']]
rho1 = 2.0 / np.pi * np.arctan(a1) # auto-regressive coefficients
SNR2 = np.exp(log_SNR2)
# each element of SNR2 is the ratio of the diagonal element on V
# to the variance of the fresh noise in that voxel
X0TAX0, XTAX0, X0TAY, X0TAX0_i, \
XTAcorrX, XTAcorrY, YTAcorrY, \
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL = \
self._precompute_ar1_quad_forms(XTY, XTDY, XTFY,
YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X0TX0, X0TDX0,
X0TFX0, XTX0, XTDX0, XTFX0,
X0TY, X0TDY, X0TFY,
L, rho1, n_V, n_X0)
# Only starting from this point, SNR2 is involved
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY,
X0TAX0, SNR2, n_V, n_T, n_run, rank, n_X0)
if not np.isfinite(LL):
logger.warning('NaN detected!')
logger.warning('LL: {}'.format(LL))
logger.warning('sigma2: {}'.format(sigma2))
logger.warning('YTAcorrY: {}'.format(YTAcorrY))
logger.warning('LTXTAcorrY: {}'.format(LTXTAcorrY))
logger.warning('YTAcorrXL_LAMBDA: {}'.format(YTAcorrXL_LAMBDA))
logger.warning('SNR2: {}'.format(SNR2))
YTAcorrXL_LAMBDA_LT = np.dot(YTAcorrXL_LAMBDA, L.T)
# dimension: space*feature (feature can be larger than rank)
deriv_L = -np.einsum('ijk,ikl,i', XTAcorrXL, LAMBDA, SNR2) \
- np.dot(np.einsum('ijk,ik->ji', XTAcorrXL, YTAcorrXL_LAMBDA)
* SNR2**2 / sigma2, YTAcorrXL_LAMBDA) \
+ np.dot(XTAcorrY / sigma2 * SNR2, YTAcorrXL_LAMBDA)
# dimension: feature*rank
# The following are for calculating the derivative to a1
deriv_a1 = np.empty(n_V)
dXTAX_drho1 = -XTDX + 2 * rho1[:, None, None] * XTFX
# dimension: space*feature*feature
dXTAY_drho1 = self._make_ar1_quad_form_grad(XTDY, XTFY, rho1)
# dimension: feature*space
dYTAY_drho1 = self._make_ar1_quad_form_grad(YTDY_diag, YTFY_diag, rho1)
# dimension: space,
dX0TAX0_drho1 = - X0TDX0 \
+ 2 * rho1[:, None, None] * X0TFX0
# dimension: space*rank*rank
dXTAX0_drho1 = - XTDX0 \
+ 2 * rho1[:, None, None] * XTFX0
# dimension: space*feature*rank
dX0TAY_drho1 = self._make_ar1_quad_form_grad(X0TDY, X0TFY, rho1)
# dimension: rank*space
# The following are executed for each voxel.
for i_v in range(n_V):
# All variables with _ele as suffix are for data of just one voxel
invX0TAX0_X0TAX_ele = np.dot(X0TAX0_i[i_v, :, :],
XTAX0[i_v, :, :].T)
invX0TAX0_X0TAY_ele = np.dot(X0TAX0_i[i_v, :, :], X0TAY[:, i_v])
dXTAX0_drho1_invX0TAX0_X0TAX_ele = np.dot(dXTAX0_drho1[i_v, :, :],
invX0TAX0_X0TAX_ele)
# preparation for the variable below
dXTAcorrX_drho1_ele = dXTAX_drho1[i_v, :, :] \
- dXTAX0_drho1_invX0TAX0_X0TAX_ele \
- dXTAX0_drho1_invX0TAX0_X0TAX_ele.T \
+ np.dot(np.dot(invX0TAX0_X0TAX_ele.T,
dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAX_ele)
dXTAcorrY_drho1_ele = dXTAY_drho1[:, i_v] \
- np.dot(invX0TAX0_X0TAX_ele.T, dX0TAY_drho1[:, i_v]) \
- np.dot(dXTAX0_drho1[i_v, :, :], invX0TAX0_X0TAY_ele) \
+ np.dot(np.dot(invX0TAX0_X0TAX_ele.T,
dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAY_ele)
dYTAcorrY_drho1_ele = dYTAY_drho1[i_v] \
- np.dot(dX0TAY_drho1[:, i_v], invX0TAX0_X0TAY_ele) * 2\
+ np.dot(np.dot(invX0TAX0_X0TAY_ele, dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAY_ele)
deriv_a1[i_v] = 2 / np.pi / (1 + a1[i_v]**2) \
* (- n_run * rho1[i_v] / (1 - rho1[i_v]**2)
- np.einsum('ij,ij', X0TAX0_i[i_v, :, :],
dX0TAX0_drho1[i_v, :, :]) * 0.5
- np.einsum('ij,ij', LAMBDA[i_v, :, :],
np.dot(np.dot(
L.T, dXTAcorrX_drho1_ele), L))
* (SNR2[i_v] * 0.5)
- dYTAcorrY_drho1_ele * 0.5 / sigma2[i_v]
+ SNR2[i_v] / sigma2[i_v]
* np.dot(dXTAcorrY_drho1_ele,
YTAcorrXL_LAMBDA_LT[i_v, :])
- (0.5 * SNR2[i_v]**2 / sigma2[i_v])
* np.dot(np.dot(YTAcorrXL_LAMBDA_LT[i_v, :],
dXTAcorrX_drho1_ele),
YTAcorrXL_LAMBDA_LT[i_v, :]))
deriv = np.empty(np.size(param))
deriv[idx_param_fitU['Cholesky']] = deriv_L[l_idx]
deriv[idx_param_fitU['a1']] = deriv_a1
return -LL, -deriv
def _loglike_AR1_diagV_fitV(self, param,
X0TAX0, XTAX0, X0TAY, X0TAX0_i,
XTAcorrX, XTAcorrY, YTAcorrY,
LTXTAcorrY, XTAcorrXL, LTXTAcorrXL,
L_l, a1, l_idx, n_C, n_T, n_V, n_run,
n_X0, idx_param_fitV, rank=None,
GP_space=False, GP_inten=False,
dist2=None, inten_dist2=None,
space_smooth_range=None,
inten_smooth_range=None):
# This function calculates the log likelihood of data given
# the log of the square of pseudo signal to noise ratio in each voxel.
# The free parameter log(SNR^2) is in param
# This likelihood is iteratively optimized with the one with _fitU.
# The cholesky factor of U and autoregressive coefficient
# in temporal AR(1) model for noise are fixed.
# Because the ML estimate of the variance of noise in each voxel
# (sigma^2) given other parameters has analytic form,
# we do not need to explicitly parametrize it.
# Just set it to the ML value.
#
# L_l is the lower triangular part of L, a1 is tan(rho1*pi/2),
# where rho1 is the autoregressive coefficient in each voxel
# We can optionally include Gaussion Process prior to log(SNR).
# This term is not included in _fitU, because log(SNR)
# are fixed in _fitU.
# GP_space and GP_inten are Boolean, indicating whether we want to
# include GP kernels either on voxel coordinates or intensity.
# dist2 and inten_dist2 are the squares of spatial distances and
# intensity differences ([n_voxel x n_voxel]. space_smooth_range
# and inten_smooth_range are the range we believe the GP length
# scale should reside in. They are used in additional half-cauchy
# prior to constraint these length scales.
n_l = np.size(l_idx[0])
# the number of parameters in the index of lower-triangular matrix
if rank is None:
rank = int((2 * n_C + 1 -
np.sqrt(n_C**2 * 4 + n_C * 4 + 1 - 8 * n_l)) / 2)
L = np.zeros([n_C, rank])
L[l_idx] = L_l
log_SNR2 = np.empty(n_V)
log_SNR2[0:n_V - 1] = param[idx_param_fitV['log_SNR2']]
log_SNR2[-1] = -np.sum(log_SNR2[0:n_V - 1])
# This is following the restriction that SNR's have geometric mean
# of 1. That is why they are called pseudo-SNR. This restriction
# is imposed because SNR and L are determined only up to a scale
# Be cautious that during simulation, when there is absolute
# no signal in the data, sometimes the fitting diverges,
# presumably because we have created correlation between logS_NR2
# due to the constraint. But I have not reproduced this often.
SNR2 = np.exp(log_SNR2)
# If requested, a GP prior is imposed on log(SNR).
rho1 = 2.0 / np.pi * np.arctan(a1)
# AR(1) coefficient, dimension: space
LL, LAMBDA_i, LAMBDA, YTAcorrXL_LAMBDA, sigma2 \
= self._calc_LL(rho1, LTXTAcorrXL, LTXTAcorrY, YTAcorrY, X0TAX0,
SNR2, n_V, n_T, n_run, rank, n_X0)
# Log likelihood of data given parameters, without the GP prior.
deriv_log_SNR2 = (-rank + np.trace(LAMBDA, axis1=1, axis2=2)) * 0.5\
+ np.sum(YTAcorrXL_LAMBDA**2, axis=1) * SNR2 / sigma2 / 2
# Partial derivative of log likelihood over log(SNR^2)
# dimension: space,
# The second term above is due to the equation for calculating
# sigma2
if GP_space:
# Imposing GP prior on log(SNR) at least over
# spatial coordinates
c_space = param[idx_param_fitV['c_space']]
l2_space = np.exp(c_space)
# The square of the length scale of the GP kernel defined on
# the spatial coordinates of voxels
dl2_dc_space = l2_space
# partial derivative of l^2 over b
if GP_inten:
c_inten = param[idx_param_fitV['c_inten']]
l2_inten = np.exp(c_inten)
# The square of the length scale of the GP kernel defined
# on the image intensity of voxels
dl2_dc_inten = l2_inten
# partial derivative of l^2 over b
K_major = np.exp(- (dist2 / l2_space
+ inten_dist2 / l2_inten)
/ 2.0)
else:
K_major = np.exp(- dist2 / l2_space / 2.0)
# The kernel defined over the spatial coordinates of voxels.
# This is a template: the diagonal values are all 1, meaning
# the variance of log(SNR) has not been multiplied
K_tilde = K_major + np.diag(np.ones(n_V) * self.eta)
# We add a small number to the diagonal to make sure the matrix
# is invertible.
# Note that the K_tilder here is still template:
# It is the correct K divided by the variance tau^2
# So it does not depend on the variance of the GP.
L_K_tilde = np.linalg.cholesky(K_tilde)
inv_L_K_tilde = np.linalg.solve(L_K_tilde, np.identity(n_V))
inv_K_tilde = np.dot(inv_L_K_tilde.T, inv_L_K_tilde)
log_det_K_tilde = np.sum(np.log(np.diag(L_K_tilde)**2))
invK_tilde_log_SNR = np.dot(inv_K_tilde, log_SNR2) / 2
log_SNR_invK_tilde_log_SNR = np.dot(log_SNR2,
invK_tilde_log_SNR) / 2
# MAP estimate of the variance of the Gaussian Process given
# other parameters.
tau2, log_ptau = self.tau2_prior(log_SNR_invK_tilde_log_SNR, n_V,
self.tau_range)
# log_ptau is log(p(tau)) given the form of prior for tau
LL += log_ptau
# GP prior terms added to the log likelihood
LL = LL - log_det_K_tilde / 2.0 - n_V / 2.0 * np.log(tau2) \
- np.log(2 * np.pi) * n_V / 2.0 \
- log_SNR_invK_tilde_log_SNR / tau2 / 2
deriv_log_SNR2 -= invK_tilde_log_SNR / tau2 / 2.0
# Note that the derivative to log(SNR) is
# invK_tilde_log_SNR / tau2, but we are calculating the
# derivative to log(SNR^2)
dK_tilde_dl2_space = dist2 * (K_major) / 2.0 \
/ l2_space**2
deriv_c_space = \
(np.dot(np.dot(invK_tilde_log_SNR, dK_tilde_dl2_space),
invK_tilde_log_SNR) / tau2 / 2.0
- np.sum(inv_K_tilde * dK_tilde_dl2_space) / 2.0)\
* dl2_dc_space
# Prior on the length scales
LL += scipy.stats.halfcauchy.logpdf(
l2_space**0.5, scale=space_smooth_range)
deriv_c_space -= 1 / (l2_space + space_smooth_range**2)\
* dl2_dc_space
if GP_inten:
dK_tilde_dl2_inten = inten_dist2 * K_major \
/ 2.0 / l2_inten**2
deriv_c_inten = \
(np.dot(np.dot(invK_tilde_log_SNR, dK_tilde_dl2_inten),
invK_tilde_log_SNR) / tau2 / 2.0
- np.sum(inv_K_tilde * dK_tilde_dl2_inten) / 2.0)\
* dl2_dc_inten
# Prior on the length scale
LL += scipy.stats.halfcauchy.logpdf(
l2_inten**0.5, scale=inten_smooth_range)
deriv_c_inten -= 1 / (l2_inten + inten_smooth_range**2)\
* dl2_dc_inten
else:
LL += np.sum(scipy.stats.norm.logpdf(log_SNR2 / 2.0,
scale=self.tau_range))
# If GP prior is not requested, we still want to regularize on
# the magnitude of log(SNR).
deriv_log_SNR2 += - log_SNR2 / self.tau_range**2 / 4.0
deriv = np.empty(np.size(param))
deriv[idx_param_fitV['log_SNR2']] = \
deriv_log_SNR2[0:n_V - 1] - deriv_log_SNR2[n_V - 1]
if GP_space:
deriv[idx_param_fitV['c_space']] = deriv_c_space
if GP_inten:
deriv[idx_param_fitV['c_inten']] = deriv_c_inten
return -LL, -deriv
def _loglike_AR1_singpara(self, param, XTX, XTDX, XTFX, YTY_diag,
YTDY_diag, YTFY_diag, XTY, XTDY, XTFY,
X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
l_idx, n_C, n_T, n_V, n_run, n_X0,
idx_param_sing, rank=None):
# In this version, we assume that beta is independent
# between voxels and noise is also independent.
# singpara version uses single parameter of sigma^2 and rho1
# to all voxels. This serves as the initial fitting to get
# an estimate of L and sigma^2 and rho1. The SNR is inherently
# assumed to be 1.
n_l = np.size(l_idx[0])
# the number of parameters in the index of lower-triangular matrix
if rank is None:
rank = int((2 * n_C + 1
- np.sqrt(n_C**2 * 4 + n_C * 4 + 1 - 8 * n_l)) / 2)
L = np.zeros([n_C, rank])
L[l_idx] = param[idx_param_sing['Cholesky']]
a1 = param[idx_param_sing['a1']]
rho1 = 2.0 / np.pi * np.arctan(a1)
XTAX = XTX - rho1 * XTDX + rho1**2 * XTFX
X0TAX0 = X0TX0 - rho1 * X0TDX0 + rho1**2 * X0TFX0
XTAX0 = XTX0 - rho1 * XTDX0 + rho1**2 * XTFX0
XTAcorrX = XTAX - np.dot(XTAX0, np.linalg.solve(X0TAX0, XTAX0.T))
XTAcorrXL = np.dot(XTAcorrX, L)
LAMBDA_i = np.dot(np.dot(L.T, XTAcorrX), L) + np.eye(rank)
XTAY = XTY - rho1 * XTDY + rho1**2 * XTFY
X0TAY = X0TY - rho1 * X0TDY + rho1**2 * X0TFY
XTAcorrY = XTAY - np.dot(XTAX0, np.linalg.solve(X0TAX0, X0TAY))
LTXTAcorrY = np.dot(L.T, XTAcorrY)
YTAY = YTY_diag - rho1 * YTDY_diag + rho1**2 * YTFY_diag
YTAcorrY = YTAY \
- np.sum(X0TAY * np.linalg.solve(X0TAX0, X0TAY), axis=0)
LAMBDA_LTXTAcorrY = np.linalg.solve(LAMBDA_i, LTXTAcorrY)
L_LAMBDA_LTXTAcorrY = np.dot(L, LAMBDA_LTXTAcorrY)
sigma2 = np.mean(YTAcorrY -
np.sum(LTXTAcorrY * LAMBDA_LTXTAcorrY, axis=0))\
/ (n_T - n_X0)
LL = n_V * (-np.log(sigma2) * (n_T - n_X0) * 0.5
+ np.log(1 - rho1**2) * n_run * 0.5
- self._half_log_det(X0TAX0)
- self._half_log_det(LAMBDA_i))
deriv_L = np.dot(XTAcorrY, LAMBDA_LTXTAcorrY.T) / sigma2 \
- np.dot(np.dot(XTAcorrXL, LAMBDA_LTXTAcorrY),
LAMBDA_LTXTAcorrY.T) / sigma2 \
- np.linalg.solve(LAMBDA_i, XTAcorrXL.T).T * n_V
# These terms are used to construct derivative to a1.
dXTAX_drho1 = - XTDX + 2 * rho1 * XTFX
dX0TAX0_drho1 = - X0TDX0 + 2 * rho1 * X0TFX0
dXTAX0_drho1 = - XTDX0 + 2 * rho1 * XTFX0
invX0TAX0_X0TAX = np.linalg.solve(X0TAX0, XTAX0.T)
dXTAX0_drho1_invX0TAX0_X0TAX = np.dot(dXTAX0_drho1, invX0TAX0_X0TAX)
dXTAcorrX_drho1 = dXTAX_drho1 - dXTAX0_drho1_invX0TAX0_X0TAX \
- dXTAX0_drho1_invX0TAX0_X0TAX.T \
+ np.dot(np.dot(invX0TAX0_X0TAX.T, dX0TAX0_drho1),
invX0TAX0_X0TAX)
dLTXTAcorrXL_drho1 = np.dot(np.dot(L.T, dXTAcorrX_drho1), L)
dYTAY_drho1 = - YTDY_diag + 2 * rho1 * YTFY_diag
dX0TAY_drho1 = - X0TDY + 2 * rho1 * X0TFY
invX0TAX0_X0TAY = np.linalg.solve(X0TAX0, X0TAY)
dYTAX0_drho1_invX0TAX0_X0TAY = np.sum(dX0TAY_drho1
* invX0TAX0_X0TAY, axis=0)
dYTAcorrY_drho1 = dYTAY_drho1 - dYTAX0_drho1_invX0TAX0_X0TAY * 2\
+ np.sum(invX0TAX0_X0TAY *
np.dot(dX0TAX0_drho1, invX0TAX0_X0TAY), axis=0)
dXTAY_drho1 = - XTDY + 2 * rho1 * XTFY
dXTAcorrY_drho1 = dXTAY_drho1 \
- np.dot(dXTAX0_drho1, invX0TAX0_X0TAY) \
- np.dot(invX0TAX0_X0TAX.T, dX0TAY_drho1) \
+ np.dot(np.dot(invX0TAX0_X0TAX.T, dX0TAX0_drho1),
invX0TAX0_X0TAY)
deriv_a1 = 2.0 / (np.pi * (1 + a1**2)) \
* (n_V * (- n_run * rho1 / (1 - rho1**2)
- 0.5 * np.trace(np.linalg.solve(
X0TAX0, dX0TAX0_drho1))
- 0.5 * np.trace(np.linalg.solve(
LAMBDA_i, dLTXTAcorrXL_drho1)))
- 0.5 * np.sum(dYTAcorrY_drho1) / sigma2
+ np.sum(dXTAcorrY_drho1 * L_LAMBDA_LTXTAcorrY) / sigma2
- 0.5 * np.sum(np.dot(dXTAcorrX_drho1, L_LAMBDA_LTXTAcorrY)
* L_LAMBDA_LTXTAcorrY) / sigma2)
deriv = np.empty(np.size(param))
deriv[idx_param_sing['Cholesky']] = deriv_L[l_idx]
deriv[idx_param_sing['a1']] = deriv_a1
return -LL, -deriv
def _loglike_AR1_null(self, param, YTY_diag, YTDY_diag, YTFY_diag,
X0TX0, X0TDX0, X0TFX0, X0TY, X0TDY, X0TFY,
n_T, n_V, n_run, n_X0):
# This function calculates the log likelihood of data given AR(1)
# parameters of noise as free parameters.
# Free parameters are in param.
# It serves as a null model which assumes no response to design
# matrix.
a1 = param
rho1 = 2.0 / np.pi * np.arctan(a1) # auto-regressive coefficients
YTAY = self._make_ar1_quad_form(YTY_diag, YTDY_diag, YTFY_diag, rho1)
# dimension: space,
# A/sigma2 is the inverse of noise covariance matrix in each voxel.
# YTAY means Y'AY
X0TAX0 = X0TX0[None, :, :] - rho1[:, None, None] \
* X0TDX0[None, :, :] \
+ rho1[:, None, None]**2 * X0TFX0[None, :, :]
# dimension: space*#baseline*#baseline
X0TAY = self._make_ar1_quad_form(X0TY, X0TDY, X0TFY, rho1)
# dimension: #baseline*space
# X0TAX0_i = np.linalg.solve(X0TAX0, np.identity(n_X0)[None, :, :])
X0TAX0_i = np.linalg.inv(X0TAX0)
# dimension: space*#baseline*#baseline
YTAcorrY = YTAY - np.sum(X0TAY * np.einsum('ijk,ki->ji',
X0TAX0_i, X0TAY), axis=0)
# dimension: space,
sigma2 = YTAcorrY / (n_T - n_X0)
# dimension: space,
LL = - np.sum(np.log(sigma2)) * (n_T - n_X0) * 0.5 \
+ np.sum(np.log(1 - rho1**2)) * n_run * 0.5 \
- np.sum(self._half_log_det(X0TAX0)) \
- (n_T - n_X0) * n_V * (1 + np.log(2 * np.pi)) * 0.5
# The following are for calculating the derivative to a1
deriv_a1 = np.empty(n_V)
dYTAY_drho1 = self._make_ar1_quad_form_grad(YTDY_diag, YTFY_diag, rho1)
# dimension: space,
dX0TAX0_drho1 = - X0TDX0 \
+ 2 * rho1[:, None, None] * X0TFX0
# dimension: space*rank*rank
dX0TAY_drho1 = self._make_ar1_quad_form_grad(X0TDY, X0TFY, rho1)
# dimension: rank*space
# The following are executed for each voxel.
for i_v in range(n_V):
# All variables with _ele as suffix are for data of just one voxel
invX0TAX0_X0TAY_ele = np.dot(X0TAX0_i[i_v, :, :], X0TAY[:, i_v])
# preparation for the variable below
dYTAcorrY_drho1_ele = dYTAY_drho1[i_v] \
- np.dot(dX0TAY_drho1[:, i_v], invX0TAX0_X0TAY_ele) * 2\
+ np.dot(np.dot(invX0TAX0_X0TAY_ele, dX0TAX0_drho1[i_v, :, :]),
invX0TAX0_X0TAY_ele)
deriv_a1[i_v] = 2 / np.pi / (1 + a1[i_v]**2) \
* (- n_run * rho1[i_v] / (1 - rho1[i_v]**2)
- np.einsum('ij,ij', X0TAX0_i[i_v, :, :],
dX0TAX0_drho1[i_v, :, :]) * 0.5
- dYTAcorrY_drho1_ele * 0.5 / sigma2[i_v])
deriv = deriv_a1
return -LL, -deriv
class GBRSA(BRSA):
"""Group Bayesian representational Similarity Analysis (GBRSA)
Given the time series of neural imaging data in a region of interest
(ROI) and the hypothetical neural response (design matrix) to
each experimental condition of interest,
calculate the shared covariance matrix of
the voxels(recording unit)' response to each condition,
and the relative SNR of each voxels.
The relative SNR could be considered as the degree of contribution
of each voxel to this shared covariance matrix.
A correlation matrix converted from the covariance matrix
will be provided as a quantification of neural representational similarity.
Both tools provide estimation of SNR and noise parameters at the end,
and both tools provide empirical Bayesian estimates of activity patterns
beta, together with weight map of nuisance signals beta0.
The differences of this tool from BRSA are:
(1) It allows fitting a shared covariance matrix (which can be converted
to similarity matrix) across multiple subjects.
This is analogous to SRM under funcalign submodule. Because of using
multiple subjects, the result is less noisy.
(2) In the fitting process, the SNR and noise parameters are marginalized
for each voxel. Therefore, this tool should be faster than BRSA
when analyzing an ROI of hundreds to thousands voxels. It does not
provide a spatial smoothness prior on SNR though.
(3) The voxel-wise pseudo-SNR and noise parameters estimated are
posterior mean estimates, while those estimated by BRSA are
maximum-a-posterior estimates.
If your goal is to perform searchlight RSA with relatively fewer voxels
on single subject, BRSA should be faster. However, GBRSA can in principle
be used together with searchlight in a template space such as MNI.
.. math::
Y = X \\cdot \\beta + X_0 \\cdot \\beta_0 + \\epsilon
\\beta_i \\sim N(0,(s_{i} \\sigma_{i})^2 U)
See also `.BRSA`.
Please note that the model assumes that the covariance matrix U which
all \\beta_i follow is zero-meaned. For more details of its implication,
see documentation of `.BRSA`
Parameters
----------
n_iter : int.
Number of maximum iterations to run the algorithm.
rank : int.
The rank of the covariance matrix.
If not provided, the covariance matrix will be assumed
to be full rank. When you have many conditions
(e.g., calculating the similarity matrix of responses to each event),
you might want to start with specifying a lower rank and use metrics
such as AIC or BIC to decide the optimal rank. The log likelihood
for the fitted data can be retrieved through private attributes
_LL_train\\_. Note that this log likelihood score is only used
here for selecting hyperparameters such as rank. For any formal
model comparison, we recommend using score() function on left-out
data.
auto_nuisance: Boolean.
In order to model spatial correlation between voxels that cannot
be accounted for by common response captured in the design matrix,
we assume that a set of time courses not related to the task
conditions are shared across voxels with unknown amplitudes.
One approach is for users to provide time series which they consider
as nuisance but exist in the noise (such as head motion).
The other way is to take the first n_nureg principal components
in the residual after subtracting the response to the design matrix
from the data, and use these components as the nuisance regressor.
This flag is for the second approach. If turned on,
PCA or factor analysis will be applied to the residuals
to obtain new nuisance regressors in each round of fitting.
These two approaches can be combined. If the users provide nuisance
regressors and set this flag as True, then the first n_nureg
principal components of the residuals after subtracting
both the responses to design matrix and the user-supplied nuisance
regressors will be used in addition to the nuisance regressors
provided by the users.
Note that nuisance regressor is not required from user. If it is
not provided, DC components for each run will be included as nuisance
regressor regardless of the auto_nuisance parameter.
n_nureg: Optional[int].
Number of nuisance regressors to use in order to model signals
shared across voxels not captured by the design matrix.
This number is in addition to any nuisance regressor that the user
has already provided.
If set to None, the number of nuisance regressors will be
automatically determined based on M Gavish
and D Donoho's approximate estimation of optimal hard
threshold for singular values. (Gavish & Donoho,
IEEE Transactions on Information Theory 60.8 (2014): 5040-5053.)
This only takes effect if auto_nuisance is True.
nureg_zscore: Boolean.
A flag to tell the algorithm whether data is z-scored before
estimating the number of nuisance regressor components necessary to
account for spatial noise correlation. It also determinie whether
the residual noise is z-scored before estimating the nuisance
regressors from residual.
This only takes effect if auto_nuisance is True.
nureg_method: string, naming a method from sklearn.decomposition.
'PCA', 'ICA', 'FA' or 'SPCA' are currently supported.
The method to estimate the shared component in noise across voxels.
This only takes effect if auto_nuisance is True.
baseline_single: Boolean.
A time course of constant 1 will be included to the nuisance
regressor for each participant. If baseline_single is set to False,
one such regressor is included for each fMRI run, but at the end of
fitting, a single component in beta0\\_ will be computed as the average
of the weight maps corresponding to these regressors. This might
cause underestimation of noise variance.
If baseline_single is True, only one regressor of constant 1 will be
used for the whole dataset. This might be desirable if you
believe the average image intensity might not scale with the
same proportion for different voxels across scan. In other words,
it is possible that some part of the brain is more vulnerable to
change in baseline intensity due to facts such as
field inhomogeneity. Setting baseline_single to True will force the
nuisance regressors automatically estimated from residuals to
capture this. However, when each task condition only occurs in one
run and when the design matrix in each run sums together close to
a flat line, this option can cause the estimated similarity to be
extremely high between conditions occuring in the same run.
SNR_prior: string.
The type of prior for pseudo-SNR.
If set to 'exp', truncated exponential distribution with scale
parameter of 1 is imposed on pseudo-SNR.
If set to 'lognorm', a truncated log normal prior is imposed.
In this case, the standard deviation of log(SNR) is set
by the parameter logS_range.
If set to 'unif', a uniform prior in [0,1] is imposed.
In all above cases, SNR is numerically
marginalized on a grid of parameters. So the parameter SNR_bins
determines how accurate the numerical integration is. The more
number of bins are used, the more accurate the numerical
integration becomes.
If set to 'equal', all voxels are assumed to have the same fixed
SNR. Pseudo-SNR is 1.0 for all voxels.
In all the cases, the grids used for pseudo-SNR do not really
set an upper bound for SNR, because the real SNR is determined
by both pseudo-SNR and U, the shared covariance structure.
logS_range: float.
The reasonable range of the spread of SNR in log scale.
This parameter only takes effect if SNR_prior is set to 'lognorm'.
It is effectively the `s` parameter of `scipy.stats.lognorm`,
or the standard deviation of the distribution in log scale.
logS_range specifies how variable you believe the SNRs
to vary across voxels in log scale.
This range should not be set too large, otherwise the fitting
may encounter numerical issue.
If it is set too small, the estimated SNR will turn to be too
close to each other and the estimated similarity matrix might
overfit to voxels of low SNR.
If you increase logS_range, it is recommended to increase
SNR_bins accordingly, otherwise the pseudo-SNR values evaluated might
be too sparse, causing the posterior pseudo-SNR estimations
to be clustered around the bins.
SNR_bins: integer.
The number of bins used to numerically marginalize the pseudo-SNR
parameter. In general, you should try to choose a large number
to the degree that decreasing SNR_bins does not change the result
of fitting result. However, very large number of bins also causes
slower computation and larger memory consumption.
For SNR_prior='lognorm', the default value 21 is based on
the default value of logS_range=1.0 and bin width of 0.3 on log scale.
But it is also a reasonable choice for the other two options
for SNR_prior.
rho_bins: integer.
The number of bins to divide the region of (-1, 1) for rho.
This only takes effect for fitting the marginalized version.
If set to 20, discrete numbers of {-0.95, -0.85, ..., 0.95} will
be used to numerically integrate rho from -1 to 1.
optimizer: str or callable.
The optimizer to use for minimizing cost function which
scipy.optimize.minimize can accept.
We use 'L-BFGS-B' as a default. Users can try other strings
corresponding to optimizer provided by scipy.optimize.minimize,
or a custom optimizer, such as 'BFGS' or 'CG'.
Note that BRSA fits a lot of parameters. So a chosen optimizer
should accept gradient (Jacobian) of the cost function. Otherwise
the fitting is likely to be unbarely slow. We do not calculate
Hessian of the objective function. So an optimizer which requires
Hessian cannot be used.
minimize_options: dictionary.
This is the dictionary passed as the options argument to
scipy.optimize.minize which minimizes the cost function during
fitting. Notice that the minimization is performed for up to
n_iter times, with the nuisance regressor re-estimated each time.
So within each of the n_iter steps of fitting,
scipy.optimize.minize does not need to fully converge. The key
'maxiter' in this dictionary determines the maximum number of
iteration done by scipy.optimize.minimize within each of the n_iter
steps of fitting.
tol: float.
Tolerance parameter passed to scipy.optimize.minimize. It is also
used for determining convergence of the alternating fitting
procedure.
random_state : RandomState or an int seed.
A random number generator instance to define the state of
the random permutations generator whenever the module
needs to generate random number (e.g., initial parameter
of the Cholesky factor).
anneal_speed: float.
Annealing is introduced in fitting of the Cholesky
decomposition of the shared covariance matrix. The amount
of perturbation decays exponentially. This parameter sets
the ratio of the maximum number of iteration to the
time constant of the exponential.
anneal_speed=10 means by n_iter/10 iterations,
the amount of perturbation is reduced by 2.713 times.
Attributes
----------
U_ : numpy array, shape=[condition,condition].
The shared covariance matrix
L_ : numpy array, shape=[condition,condition].
The Cholesky factor of the shared covariance matrix
(lower-triangular matrix).
C_: numpy array, shape=[condition,condition].
The correlation matrix derived from the shared covariance matrix.
This is the estimated similarity matrix between neural patterns
to your task conditions. Notice that it is recommended that
you also check U\\_, which is the covariance matrix underlying
this correlation matrix. In cases there is almost no response
to your task conditions, the diagonal values of U\\_ would become
very small and C\\_ might contain many correlation coefficients
close to 1 or -1. This might not reflect true strong correlation
or strong negative correlation, but a result of lack of
task-related neural activity, design matrix that does not match
true neural response, or not enough data.
It is also recommended to check nSNR\\_ after mapping it back to
the brain. A "reasonable" map should at least have higher values
in gray matter in than white matter.
nSNR_ : list of numpy arrays, shape=[voxels,] for each subject in the list.
The pseuso-SNR of all voxels. If SNR_prior='lognormal',
the geometric mean of nSNR\\_ would be approximately 1.
If SNR_prior='unif', all nSNR\\_ would be in the range of (0,1).
If SNR_prior='exp' (default), the range of values would vary
depending on the data and SNR_bins, but many should have low
values with few voxels with high values.
Note that this attribute can not be interpreted as true SNR,
but the relative ratios between voxels indicate the contribution
of each voxel to the representational similarity structure.
sigma_ : list of numpy arrays, shape=[voxels,] for each subject.
The estimated standard deviation of the noise in each voxel
Assuming AR(1) model, this means the standard deviation
of the innovation noise.
rho_ : list of numpy arrays, shape=[voxels,] for each subject.
The estimated autoregressive coefficient of each voxel
beta_: list of numpy arrays, shape=[conditions, voxels] for each subject.
The posterior mean estimation of the response amplitudes
of each voxel to each task condition.
beta0_: list of numpy arrays, shape=[n_nureg + n_base, voxels]
for each subject.
The loading weights of each voxel for the shared time courses
not captured by the design matrix.
n_base is the number of columns of the user-supplied nuisance
regressors plus one for DC component.
X0_: list of numpy arrays, shape=[time_points, n_nureg + n_base]
for each subject.
The estimated time course that is shared across voxels but
unrelated to the events of interest (design matrix).
beta0_null_: list of numpy arrays, shape=[n_nureg + n_base, voxels]
for each subject.
The equivalent of beta0\\_ in a null model which does not
include the design matrix and response pattern beta
X0_null_: list of numpy arrays, shape=[time_points, n_nureg + n_base]
for each subject.
The equivalent of X0\\_ in a null model which does not
include the design matrix and response pattern beta
n_nureg_: 1-d numpy array
Number of nuisance regressor used to model the spatial noise
correlation of each participant.
random_state_: `RandomState`
Random number generator initialized using random_state.
"""
def __init__(
self, n_iter=100, rank=None,
auto_nuisance=True, n_nureg=None, nureg_zscore=True,
nureg_method='PCA',
baseline_single=False, logS_range=1.0, SNR_prior='exp',
SNR_bins=21, rho_bins=20, tol=1e-4, optimizer='L-BFGS-B',
minimize_options={'gtol': 1e-4, 'disp': False,
'maxiter': 20}, random_state=None,
anneal_speed=10):
self.n_iter = n_iter
self.rank = rank
self.auto_nuisance = auto_nuisance
self.n_nureg = n_nureg
self.nureg_zscore = nureg_zscore
if auto_nuisance:
assert (n_nureg is None) \
or (isinstance(n_nureg, int) and n_nureg > 0), \
'n_nureg should be a positive integer or None'\
' if auto_nuisance is True.'
if self.nureg_zscore:
self.preprocess_residual = lambda x: _zscore(x)
else:
self.preprocess_residual = lambda x: x
if nureg_method == 'FA':
self.nureg_method = lambda x: FactorAnalysis(n_components=x)
elif nureg_method == 'PCA':
self.nureg_method = lambda x: PCA(n_components=x, whiten=True)
elif nureg_method == 'SPCA':
self.nureg_method = lambda x: SparsePCA(n_components=x,
max_iter=20, tol=tol)
elif nureg_method == 'ICA':
self.nureg_method = lambda x: FastICA(n_components=x,
whiten=True)
else:
raise ValueError('nureg_method can only be FA, PCA, '
'SPCA(for sparse PCA) or ICA')
self.baseline_single = baseline_single
if type(logS_range) is int:
logS_range = float(logS_range)
self.logS_range = logS_range
assert SNR_prior in ['unif', 'lognorm', 'exp', 'equal'], \
'SNR_prior can only be chosen from ''unif'', ''lognorm''' \
' ''exp'' and ''equal'''
self.SNR_prior = SNR_prior
if self.SNR_prior == 'equal':
self.SNR_bins = 1
else:
self.SNR_bins = SNR_bins
self.rho_bins = rho_bins
self.tol = tol
self.optimizer = optimizer
self.minimize_options = minimize_options
self.random_state = random_state
self.anneal_speed = anneal_speed
return
def fit(self, X, design, nuisance=None, scan_onsets=None):
""" Fit the model to data of all participants jointly.
Parameters
----------
X: list of numpy arrays, shape=[time_points, voxels] for each entry.
Data to be fitted. Each participant corresponds to one item in
the list. If you have multiple scans of the same participants
that you want to analyze together, you should concatenate them
along the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: list of numpy arrays, shape=[time_points, conditions] for each.
This is the design matrix of each participant.
It should only include the hypothetic response for task conditions.
You should not include regressors for a DC component or
motion parameters, unless with a strong reason.
If you want to model head motion, you should include them
in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension for
each participant, with every column for one condition across runs.
If the design matrix is the same for all subjects,
either provide a list as required, or provide single numpy array.
nuisance: optional, list of numpy arrays,
shape=[time_points, nuisance_factors] for each subject in the list.
Nuisance regressors of each participant.
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
scan_onsets: optional, list numpy arrays, shape=[runs,] for each.
Each item in the list specifies the indices of X which correspond
to the onset of each scanning run for one participant.
For example, if you have two experimental runs of
the first participant, each with 100 TRs, and one run of the
second participant, with 150 TR, then scan_onsets should be
[ndarry([0, 100]), ndarry([150])].
The effect of this argument is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
If you do not provide the argument, the program will
assume all data are from the same run for each participant.
"""
logger.info('Running Group Bayesian RSA (which can also analyze'
' data of a single participant). Voxel-specific parameters'
'are all marginalized.')
self.random_state_ = check_random_state(self.random_state)
# setting random seed
logger.debug('RandState set to {}'.format(self.random_state_))
# Checking all inputs.
X = self._check_data_GBRSA(X)
design = self._check_design_GBRSA(design, X)
nuisance = self._check_nuisance_GBRSA(
copy.deepcopy(nuisance), X)
# The reason that we use copy of nuisance is because they
# may be modified inside our code.
scan_onsets = self._check_scan_onsets_GBRSA(scan_onsets, X)
# Run Marginalized Bayesian RSA
# Note that we have a change of notation here.
# Within _fit_RSA_marginalized, design matrix is named X
# and data is named Y, to reflect the
# generative model that data Y is generated by mixing the response
# X to experiment conditions and other neural activity.
# However, in fit(), we keep the scikit-learn API that
# X is the input data to fit and y, a reserved name not used, is
# the label to map to from X.
assert self.SNR_bins >= 10 and self.SNR_prior != 'equal' or \
self.SNR_bins == 1 and self.SNR_prior == 'equal', \
'At least 10 bins are required to perform the numerical'\
' integration over SNR, unless choosing SNR_prior=''equal'','\
' in which case SNR_bins should be 1.'
assert self.rho_bins >= 10, \
'At least 10 bins are required to perform the numerical'\
' integration over rho'
assert self.logS_range * 6 / self.SNR_bins < 0.5 \
or self.SNR_prior != 'lognorm', \
'The minimum grid of log(SNR) should not be larger than 0.5 '\
'if log normal prior is chosen for SNR.' \
' Please consider increasing SNR_bins or reducing logS_range'
self.n_subj_ = len(X)
self.n_V_ = [None] * self.n_subj_
for subj, x in enumerate(X):
self.n_V_[subj] = x.shape[1]
if self.auto_nuisance:
if self.n_nureg is None:
logger.info('numbers of nuisance regressors are determined '
'automatically.')
n_runs = np.zeros(self.n_subj_)
n_comps = np.ones(self.n_subj_)
for s_id in np.arange(self.n_subj_):
# For each subject, determine the number of nuisance
# regressors needed to account for the covariance
# in residuals.
# Residual is calculated by regrssing
# out the design matrix and DC component and linear trend
# from data of each run.
run_TRs, n_runs[s_id] = self._run_TR_from_scan_onsets(
X[s_id].shape[0], scan_onsets[s_id])
ts_dc = self._gen_legendre(run_TRs, [0])
_, ts_base, _ = self._merge_DC_to_base(
ts_dc, nuisance[s_id], False)
ts_reg = np.concatenate((ts_base, design[s_id]), axis=1)
beta_hat = np.linalg.lstsq(ts_reg, X[s_id], rcond=None)[0]
residuals = X[s_id] - np.dot(ts_reg, beta_hat)
n_comps[s_id] = np.min(
[np.max([Ncomp_SVHT_MG_DLD_approx(
residuals, self.nureg_zscore), 1]),
np.linalg.matrix_rank(residuals) - 1])
# n_nureg_ should not exceed the rank of
# residual minus 1.
self.n_nureg_ = n_comps
logger.info('Use {} nuisance regressors to model the spatial '
'correlation in noise.'.format(self.n_nureg_))
else:
self.n_nureg_ = self.n_nureg * np.ones(self.n_subj_)
self.n_nureg_ = np.int32(self.n_nureg_)
self.beta0_null_, self.sigma_null_, self.rho_null_, self.X0_null_,\
self._LL_null_train_ = self._fit_RSA_marginalized_null(
Y=X, X_base=nuisance, scan_onsets=scan_onsets)
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self.sigma_, self.rho_, self.X0_, self._LL_train_ = \
self._fit_RSA_marginalized(
X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
self.C_ = utils.cov2corr(self.U_)
self.design_ = design.copy()
self._rho_design_ = [None] * self.n_subj_
self._sigma2_design_ = [None] * self.n_subj_
self._rho_X0_ = [None] * self.n_subj_
self._sigma2_X0_ = [None] * self.n_subj_
self._rho_X0_null_ = [None] * self.n_subj_
self._sigma2_X0_null_ = [None] * self.n_subj_
for subj in np.arange(self.n_subj_):
self._rho_design_[subj], self._sigma2_design_[subj] = \
self._est_AR1(self.design_[subj], same_para=True)
self._rho_X0_[subj], self._sigma2_X0_[subj] = \
self._est_AR1(self.X0_[subj])
self._rho_X0_null_[subj], self._sigma2_X0_null_[subj] =\
self._est_AR1(self.X0_null_[subj])
# AR(1) parameters of the design matrix and nuisance regressors,
# which will be used in transform or score.
return self
def transform(self, X, y=None, scan_onsets=None):
""" Use the model to estimate the time course of response to
each condition (ts), and the time course unrelated to task
(ts0) which is spread across the brain.
This is equivalent to "decoding" the design matrix and
nuisance regressors from a new dataset different from the
training dataset on which fit() was applied. An AR(1) smooth
prior is imposed on the decoded ts and ts0 with the AR(1)
parameters learnt from the corresponding time courses in the
training data.
Parameters
----------
X : list of 2-D arrays. For each item, shape=[time_points, voxels]
New fMRI data of the same subjects. The voxels should
match those used in the fit() function.
The size of the list should match the size of the list X fed
to fit(), with each item in the list corresponding to data
from the same subject in the X fed to fit(). If you do not
need to transform some subjects' data, leave the entry
corresponding to that subject as None.
If data are z-scored when fitting the model,
data should be z-scored as well when calling transform()
y : not used (as it is unsupervised learning)
scan_onsets : list of 1-D numpy arrays,
Each array corresponds to the onsets of
scans in the data X for the particular subject.
If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ts : list of 2-D arrays. For each, shape = [time_points, condition]
The estimated response to the cognitive dimensions
(task dimensions) whose response amplitudes were estimated
during the fit step.
One item for each subject. If some subjects' data are
not provided, None will be returned.
ts0: list of 2-D array. For each, shape = [time_points, n_nureg]
The estimated time courses spread across the brain, with the
loading weights estimated during the fit step.
One item for each subject. If some subjects' data are
not provided, None will be returned.
"""
X = self._check_data_GBRSA(X, for_fit=False)
scan_onsets = self._check_scan_onsets_GBRSA(scan_onsets, X)
assert len(X) == self.n_subj_
ts = [None] * self.n_subj_
ts0 = [None] * self.n_subj_
log_p = [None] * self.n_subj_
for i, x in enumerate(X):
if x is not None:
s = scan_onsets[i]
ts[i], ts0[i], log_p[i] = self._transform(
Y=x, scan_onsets=s, beta=self.beta_[i],
beta0=self.beta0_[i], rho_e=self.rho_[i],
sigma_e=self.sigma_[i], rho_X=self._rho_design_[i],
sigma2_X=self._sigma2_design_[i],
rho_X0=self._rho_X0_[i], sigma2_X0=self._sigma2_X0_[i])
return ts, ts0
def score(self, X, design, scan_onsets=None):
""" After fit() is applied to the data of a group of participants,
use the parameters estimated by fit() function to evaluate
from some data of a set of participants to evaluate
the log likelihood of some new data of the same participants
given these estimated parameters.
Design matrices of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
in testing data; X is assumed given by the user,
and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data. This is done
at the cost of using point estimation for beta and beta0.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : List of 2-D arrays. For each item, shape=[time_points, voxels]
fMRI data of new data of the same participants.
The voxels of each participants should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : List of 2-D arrays. shape=[time_points, conditions] for each
Each corresponds to one participant.
Design matrices expressing the hypothetical response of
the task conditions in data X.
scan_onsets : List of 2-D arrays, shape=[#fMRI runs] for each
Each array corresponds to one participant.
Lists of indices corresponding to the onsets of
scans in the data X.
If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: list, shape=[number of participants]
The log likelihoods of the new data based on the model and its
parameters fit to the training data.
If data of some participants are not provided, the corresponding
entry will be None.
ll_null: list, shape=[number of participants]
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions.
"""
X = self._check_data_GBRSA(X, for_fit=False)
scan_onsets = self._check_scan_onsets_GBRSA(scan_onsets, X)
design = self._check_design_GBRSA(design, X)
assert len(X) == self.n_subj_
ll = [None] * self.n_subj_
ll_null = [None] * self.n_subj_
for subj in np.arange(self.n_subj_):
if X[subj] is not None:
ll[subj] = self._score(
Y=X[subj], design=design[subj], beta=self.beta_[subj],
scan_onsets=scan_onsets[subj], beta0=self.beta0_[subj],
rho_e=self.rho_[subj], sigma_e=self.sigma_[subj],
rho_X0=self._rho_X0_[subj],
sigma2_X0=self._sigma2_X0_[subj])
ll_null[subj] = self._score(
Y=X[subj], design=None, beta=None,
scan_onsets=scan_onsets[subj], beta0=self.beta0_[subj],
rho_e=self.rho_[subj], sigma_e=self.sigma_[subj],
rho_X0=self._rho_X0_[subj],
sigma2_X0=self._sigma2_X0_[subj])
return ll, ll_null
def _precompute_ar1_quad_forms_marginalized(
self, XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag,
XTX, XTDX, XTFX, X0TX0, X0TDX0, X0TFX0,
XTX0, XTDX0, XTFX0, X0TY, X0TDY, X0TFY,
rho1, n_V, n_X0):
# Calculate the sandwich terms which put Acorr between X, Y and X0
# These terms are used a lot in the likelihood. This function
# is used for the marginalized version.
XTAY = XTY - rho1[:, None, None] * XTDY \
+ rho1[:, None, None]**2 * XTFY
# dimension: #rho*feature*space
YTAY_diag = YTY_diag - rho1[:, None] * YTDY_diag \
+ rho1[:, None]**2 * YTFY_diag
# dimension: #rho*space,
# A/sigma2 is the inverse of noise covariance matrix in each voxel.
# YTAY means Y'AY
XTAX = XTX - rho1[:, None, None] * XTDX \
+ rho1[:, None, None]**2 * XTFX
# dimension: n_rho*feature*feature
X0TAX0 = X0TX0[None, :, :] - rho1[:, None, None] \
* X0TDX0[None, :, :] \
+ rho1[:, None, None]**2 * X0TFX0[None, :, :]
# dimension: #rho*#baseline*#baseline
XTAX0 = XTX0[None, :, :] - rho1[:, None, None] \
* XTDX0[None, :, :] \
+ rho1[:, None, None]**2 * XTFX0[None, :, :]
# dimension: n_rho*feature*#baseline
X0TAY = X0TY - rho1[:, None, None] * X0TDY \
+ rho1[:, None, None]**2 * X0TFY
# dimension: #rho*#baseline*space
X0TAX0_i = np.linalg.solve(X0TAX0, np.identity(n_X0)[None, :, :])
# dimension: #rho*#baseline*#baseline
XTAcorrX = XTAX
# dimension: #rho*feature*feature
XTAcorrY = XTAY
# dimension: #rho*feature*space
YTAcorrY_diag = YTAY_diag
for i_r in range(np.size(rho1)):
XTAcorrX[i_r, :, :] -= \
np.dot(np.dot(XTAX0[i_r, :, :], X0TAX0_i[i_r, :, :]),
XTAX0[i_r, :, :].T)
XTAcorrY[i_r, :, :] -= np.dot(np.dot(XTAX0[i_r, :, :],
X0TAX0_i[i_r, :, :]),
X0TAY[i_r, :, :])
YTAcorrY_diag[i_r, :] -= np.sum(
X0TAY[i_r, :, :] * np.dot(X0TAX0_i[i_r, :, :],
X0TAY[i_r, :, :]), axis=0)
return X0TAX0, X0TAX0_i, XTAcorrX, XTAcorrY, YTAcorrY_diag, \
X0TAY, XTAX0
def _fit_RSA_marginalized(self, X, Y, X_base,
scan_onsets=None):
""" The major utility of fitting Bayesian RSA
(marginalized version).
Note that there is a naming change of variable. X in fit()
is changed to Y here, and design in fit() is changed to X here.
This is because we follow the tradition that X expresses the
variable defined (controlled) by the experimenter, i.e., the
time course of experimental conditions convolved by an HRF,
and Y expresses data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
rank = self.rank
n_subj = len(Y)
n_V = [np.size(y, axis=1) for y in Y]
n_T = [np.size(y, axis=0) for y in Y]
n_C = np.size(X[0], axis=1)
l_idx, rank = self._chol_idx(n_C, rank)
n_l = np.size(l_idx[0]) # the number of parameters for L
t_start = time.time()
logger.info('Starting to fit the model. Maximum iteration: '
'{}.'.format(self.n_iter))
# log_SNR_grids, SNR_weights \
# = np.polynomial.hermite.hermgauss(SNR_bins)
# SNR_weights = SNR_weights / np.pi**0.5
# SNR_grids = np.exp(log_SNR_grids * self.logS_range * 2**.5)
SNR_grids, SNR_weights = self._set_SNR_grids()
logger.info('The grids of pseudo-SNR used for numerical integration '
'is {}.'.format(SNR_grids))
assert np.max(SNR_grids) < 1e10, \
'ATTENTION!! The range of grids of pseudo-SNR' \
' to be marginalized is too large. Please ' \
'consider reducing logS_range to 1 or 2'
rho_grids, rho_weights = self._set_rho_grids()
logger.info('The grids of rho used to do numerical integration '
'is {}.'.format(rho_grids))
n_grid = self.SNR_bins * self.rho_bins
log_weights = np.reshape(
np.log(SNR_weights[:, None]) + np.log(rho_weights), n_grid)
all_rho_grids = np.reshape(np.repeat(
rho_grids[None, :], self.SNR_bins, axis=0), n_grid)
all_SNR_grids = np.reshape(np.repeat(
SNR_grids[:, None], self.rho_bins, axis=1), n_grid)
# Prepare the data for fitting. These pre-calculated matrices
# will be re-used a lot in evaluating likelihood function and
# gradient.
D = [None] * n_subj
F = [None] * n_subj
run_TRs = [None] * n_subj
n_run = [None] * n_subj
XTY = [None] * n_subj
XTDY = [None] * n_subj
XTFY = [None] * n_subj
YTY_diag = [None] * n_subj
YTDY_diag = [None] * n_subj
YTFY_diag = [None] * n_subj
XTX = [None] * n_subj
XTDX = [None] * n_subj
XTFX = [None] * n_subj
X0TX0 = [None] * n_subj
X0TDX0 = [None] * n_subj
X0TFX0 = [None] * n_subj
XTX0 = [None] * n_subj
XTDX0 = [None] * n_subj
XTFX0 = [None] * n_subj
X0TY = [None] * n_subj
X0TDY = [None] * n_subj
X0TFY = [None] * n_subj
X0 = [None] * n_subj
X_res = [None] * n_subj
n_X0 = [None] * n_subj
idx_DC = [None] * n_subj
log_fixed_terms = [None] * n_subj
# Initialization for L.
# There are several possible ways of initializing the covariance.
# (1) start from the point estimation of covariance
cov_point_est = np.zeros((n_C, n_C))
for subj in range(n_subj):
D[subj], F[subj], run_TRs[subj], n_run[subj] = self._prepare_DF(
n_T[subj], scan_onsets=scan_onsets[subj])
XTY[subj], XTDY[subj], XTFY[subj], YTY_diag[subj], \
YTDY_diag[subj], YTFY_diag[subj], XTX[subj], XTDX[subj], \
XTFX[subj] = self._prepare_data_XY(
X[subj], Y[subj], D[subj], F[subj])
# The contents above stay fixed during fitting.
# Initializing X0 as DC baseline
# DC component will be added to the nuisance regressors.
# In later steps, we do not need to add DC components again
X0TX0[subj], X0TDX0[subj], X0TFX0[subj], XTX0[subj], XTDX0[subj], \
XTFX0[subj], X0TY[subj], X0TDY[subj], X0TFY[subj], X0[subj], \
X_base[subj], n_X0[subj], idx_DC[subj] = \
self._prepare_data_XYX0(
X[subj], Y[subj], X_base[subj], None, D[subj], F[subj],
run_TRs[subj], no_DC=False)
X_joint = np.concatenate((X0[subj], X[subj]), axis=1)
beta_hat = np.linalg.lstsq(X_joint, Y[subj], rcond=None)[0]
residual = Y[subj] - np.dot(X_joint, beta_hat)
# point estimates of betas and fitting residuals without assuming
# the Bayesian model underlying RSA.
cov_point_est += np.cov(beta_hat[n_X0[subj]:, :]
/ np.std(residual, axis=0))
log_fixed_terms[subj] = - (n_T[subj] - n_X0[subj]) \
/ 2 * np.log(2 * np.pi) + n_run[subj] \
/ 2 * np.log(1 - all_rho_grids**2) \
+ scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 2) / 2) \
+ (n_T[subj] - n_X0[subj] - 2) / 2 * np.log(2)
# These are terms in the log likelihood that do not
# depend on L. Notice that the last term comes from
# ther term of marginalizing sigma. We take the 2 in
# the denominator out. Accordingly, the "denominator"
# variable in the _raw_loglike_grids() function is not
# divided by 2
cov_point_est = cov_point_est / n_subj
current_vec_U_chlsk_l = np.linalg.cholesky(
(cov_point_est + np.eye(n_C)) / 2)[l_idx]
# We use the average of covariance of point estimation and an identity
# matrix as the initial value of the covariance matrix, just in case
# the user provides data in which n_V is smaller than n_C.
# (2) start from identity matrix
# current_vec_U_chlsk_l = np.eye(n_C)[l_idx]
# (3) random initialization
# current_vec_U_chlsk_l = self.random_state_.randn(n_l)
# vectorized version of L, Cholesky factor of U, the shared
# covariance matrix of betas across voxels.
L = np.zeros((n_C, rank))
L[l_idx] = current_vec_U_chlsk_l
X0TAX0 = [None] * n_subj
X0TAX0_i = [None] * n_subj
XTAcorrX = [None] * n_subj
s2XTAcorrX = [None] * n_subj
YTAcorrY_diag = [None] * n_subj
XTAcorrY = [None] * n_subj
sXTAcorrY = [None] * n_subj
X0TAY = [None] * n_subj
XTAX0 = [None] * n_subj
half_log_det_X0TAX0 = [None] * n_subj
s_post = [None] * n_subj
rho_post = [None] * n_subj
sigma_post = [None] * n_subj
beta_post = [None] * n_subj
beta0_post = [None] * n_subj
# The contents below can be updated during fitting.
# e.g., X0 will be re-estimated
logger.info('start real fitting')
LL = np.zeros(n_subj)
for it in range(self.n_iter):
logger.info('Iteration {}'.format(it))
# Re-estimate part of X0: X_res
for subj in range(n_subj):
if self.auto_nuisance and it > 0:
residuals = Y[subj] - np.dot(X[subj], beta_post[subj]) \
- np.dot(
X_base[subj],
beta0_post[subj][:np.shape(X_base[subj])[1], :])
X_res[subj] = self.nureg_method(
self.n_nureg_[subj]).fit_transform(
self.preprocess_residual(residuals))
X0TX0[subj], X0TDX0[subj], X0TFX0[subj], XTX0[subj],\
XTDX0[subj], XTFX0[subj], X0TY[subj], X0TDY[subj], \
X0TFY[subj], X0[subj], X_base[subj], n_X0[subj], _ = \
self._prepare_data_XYX0(
X[subj], Y[subj], X_base[subj], X_res[subj],
D[subj], F[subj], run_TRs[subj], no_DC=True)
X0TAX0[subj], X0TAX0_i[subj], XTAcorrX[subj], XTAcorrY[subj],\
YTAcorrY_diag[subj], X0TAY[subj], XTAX0[subj] \
= self._precompute_ar1_quad_forms_marginalized(
XTY[subj], XTDY[subj], XTFY[subj], YTY_diag[subj],
YTDY_diag[subj], YTFY_diag[subj], XTX[subj],
XTDX[subj], XTFX[subj], X0TX0[subj], X0TDX0[subj],
X0TFX0[subj], XTX0[subj], XTDX0[subj], XTFX0[subj],
X0TY[subj], X0TDY[subj], X0TFY[subj], rho_grids,
n_V[subj], n_X0[subj])
# Now we expand to another dimension including SNR
# and collapse the dimension again.
half_log_det_X0TAX0[subj], X0TAX0[subj], X0TAX0_i[subj], \
s2XTAcorrX[subj], YTAcorrY_diag[subj], sXTAcorrY[subj], \
X0TAY[subj], XTAX0[subj] = self._matrix_flattened_grid(
X0TAX0[subj], X0TAX0_i[subj], SNR_grids,
XTAcorrX[subj], YTAcorrY_diag[subj], XTAcorrY[subj],
X0TAY[subj], XTAX0[subj], n_C, n_V[subj], n_X0[subj],
n_grid)
res = scipy.optimize.minimize(
self._sum_loglike_marginalized, current_vec_U_chlsk_l
+ self.random_state_.randn(n_l) *
np.linalg.norm(current_vec_U_chlsk_l)
/ n_l**0.5 * np.exp(-it / self.n_iter
* self.anneal_speed - 1),
args=(s2XTAcorrX, YTAcorrY_diag, sXTAcorrY,
half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank),
method=self.optimizer, jac=True, tol=self.tol,
options=self.minimize_options)
param_change = res.x - current_vec_U_chlsk_l
current_vec_U_chlsk_l = res.x.copy()
# Estimating a few parameters.
L[l_idx] = current_vec_U_chlsk_l
for subj in range(n_subj):
LL_raw, denominator, L_LAMBDA, L_LAMBDA_LT = \
self._raw_loglike_grids(
L, s2XTAcorrX[subj], YTAcorrY_diag[subj],
sXTAcorrY[subj], half_log_det_X0TAX0[subj],
log_weights, log_fixed_terms[subj], n_C, n_T[subj],
n_V[subj], n_X0[subj], n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
LL[subj] = np.sum(np.log(result_sum) + max_value)
weight_post = result_exp / result_sum
s_post[subj] = np.sum(all_SNR_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of SNR.
rho_post[subj] = np.sum(all_rho_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of rho.
sigma_means = denominator ** 0.5 \
* (np.exp(scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 3) / 2)
- scipy.special.gammaln(
(n_T[subj] - n_X0[subj] - 2) / 2)) / 2**0.5)
sigma_post[subj] = np.sum(sigma_means * weight_post, axis=0)
# The mean of inverse-Gamma distribution is beta/(alpha-1)
# The mode is beta/(alpha+1). Notice that beta here does not
# refer to the brain activation, but the scale parameter of
# inverse-Gamma distribution. In the _UV version, we use the
# maximum likelihood estimate of sigma^2. So we divide by
# (alpha+1), which is (n_T - n_X0).
beta_post[subj] = np.zeros((n_C, n_V[subj]))
beta0_post[subj] = np.zeros((n_X0[subj], n_V[subj]))
for grid in range(n_grid):
beta_post[subj] += np.dot(L_LAMBDA_LT[grid, :, :],
sXTAcorrY[subj][grid, :, :])\
* all_SNR_grids[grid] \
* weight_post[grid, :]
beta0_post[subj] += weight_post[grid, :] * np.dot(
X0TAX0_i[subj][grid, :, :],
(X0TAY[subj][grid, :, :]
- np.dot(np.dot(XTAX0[subj][grid, :, :].T,
L_LAMBDA_LT[grid, :, :]),
sXTAcorrY[subj][grid, :, :])
* all_SNR_grids[grid]))
if np.max(np.abs(param_change)) < self.tol:
logger.info('The change of parameters is smaller than '
'the tolerance value {}. Fitting is finished '
'after {} iterations'.format(self.tol, it + 1))
break
for subj in range(n_subj):
if idx_DC[subj].size > 1:
collapsed_DC = np.sum(X0[subj][:, idx_DC[subj]], axis=1)
X0[subj] = np.insert(np.delete(X0[subj], idx_DC[subj], axis=1),
0, collapsed_DC, axis=1)
collapsed_beta0 = np.mean(beta0_post[subj][idx_DC[subj], :],
axis=0)
beta0_post[subj] = np.insert(
np.delete(beta0_post[subj], idx_DC[subj], axis=0),
0, collapsed_beta0, axis=0)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
return np.dot(L, L.T), L, s_post, \
beta_post, beta0_post, sigma_post, \
rho_post, X0, LL
def _fit_RSA_marginalized_null(self, Y, X_base,
scan_onsets):
""" The marginalized version of the null model for Bayesian RSA.
The null model assumes no task-related response to the
design matrix.
Note that there is a naming change of variable. X in fit()
is changed to Y here.
This is because we follow the tradition that Y corresponds
to data.
However, in wrapper function fit(), we follow the naming
routine of scikit-learn.
"""
# Because there is nothing to learn that is shared across
# participants, we can run each subject in serial.
# The only fitting required is to re-estimate X0 after
# each iteration
n_subj = len(Y)
t_start = time.time()
logger.info('Starting to fit the model. Maximum iteration: '
'{}.'.format(self.n_iter))
rho_grids, rho_weights = self._set_rho_grids()
logger.info('The grids of rho used to do numerical integration '
'is {}.'.format(rho_grids))
n_grid = self.rho_bins
log_weights = np.log(rho_weights)
rho_post = [None] * n_subj
sigma_post = [None] * n_subj
beta0_post = [None] * n_subj
X0 = [None] * n_subj
LL_null = np.zeros(n_subj)
for subj in range(n_subj):
logger.debug('Running on subject {}.'.format(subj))
[n_T, n_V] = np.shape(Y[subj])
D, F, run_TRs, n_run = self._prepare_DF(
n_T, scan_onsets=scan_onsets[subj])
YTY_diag = np.sum(Y[subj] * Y[subj], axis=0)
YTDY_diag = np.sum(Y[subj] * np.dot(D, Y[subj]), axis=0)
YTFY_diag = np.sum(Y[subj] * np.dot(F, Y[subj]), axis=0)
# Add DC components capturing run-specific baselines.
X_DC = self._gen_X_DC(run_TRs)
X_DC, X_base[subj], idx_DC = self._merge_DC_to_base(
X_DC, X_base[subj], no_DC=False)
X_res = np.empty((n_T, 0))
for it in range(0, self.n_iter):
X0[subj] = np.concatenate(
(X_base[subj], X_res), axis=1)
n_X0 = X0[subj].shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(
D, F, X0[subj], X0[subj])
X0TY, X0TDY, X0TFY = self._make_templates(
D, F, X0[subj], Y[subj])
YTAY_diag = YTY_diag - rho_grids[:, None] * YTDY_diag \
+ rho_grids[:, None]**2 * YTFY_diag
# dimension: #rho*space,
# A/sigma2 is the inverse of noise covariance matrix.
# YTAY means Y'AY
X0TAX0 = X0TX0[None, :, :] \
- rho_grids[:, None, None] \
* X0TDX0[None, :, :] \
+ rho_grids[:, None, None]**2 \
* X0TFX0[None, :, :]
# dimension: #rho*#baseline*#baseline
X0TAY = X0TY - rho_grids[:, None, None] * X0TDY \
+ rho_grids[:, None, None]**2 * X0TFY
# dimension: #rho*#baseline*space
X0TAX0_i = np.linalg.solve(
X0TAX0, np.identity(n_X0)[None, :, :])
# dimension: #rho*#baseline*#baseline
YTAcorrY_diag = np.empty(np.shape(YTAY_diag))
for i_r in range(np.size(rho_grids)):
YTAcorrY_diag[i_r, :] = YTAY_diag[i_r, :] \
- np.sum(X0TAY[i_r, :, :] * np.dot(
X0TAX0_i[i_r, :, :], X0TAY[i_r, :, :]),
axis=0)
log_fixed_terms = - (n_T - n_X0) / 2 * np.log(2 * np.pi)\
+ n_run / 2 * np.log(1 - rho_grids**2) \
+ scipy.special.gammaln((n_T - n_X0 - 2) / 2) \
+ (n_T - n_X0 - 2) / 2 * np.log(2)
# These are terms in the log likelihood that do not
# depend on L. Notice that the last term comes from
# ther term of marginalizing sigma. We take the 2 in
# the denominator out. Accordingly, the "denominator"
# variable in the _raw_loglike_grids() function is not
# divided by 2
half_log_det_X0TAX0 = self._half_log_det(X0TAX0)
LL_raw = -half_log_det_X0TAX0[:, None] \
- (n_T - n_X0 - 2) / 2 * np.log(YTAcorrY_diag) \
+ log_weights[:, None] + log_fixed_terms[:, None]
# dimension: n_grid * space
# The log likelihood at each pair of values of rho1.
# half_log_det_X0TAX0 is 0.5*log(det(X0TAX0)) with the size of
# number of parameter grids. So is the size of log_weights
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
weight_post = result_exp / result_sum
rho_post[subj] = np.sum(rho_grids[:, None] * weight_post,
axis=0)
# Mean-posterior estimate of rho.
sigma_means = YTAcorrY_diag ** 0.5 \
* (np.exp(scipy.special.gammaln((n_T - n_X0 - 3) / 2)
- scipy.special.gammaln((n_T - n_X0 - 2) / 2))
/ 2**0.5)
sigma_post[subj] = np.sum(sigma_means * weight_post, axis=0)
beta0_post[subj] = np.zeros((n_X0, n_V))
for grid in range(n_grid):
beta0_post[subj] += weight_post[grid, :] * np.dot(
X0TAX0_i[grid, :, :], X0TAY[grid, :, :])
if self.auto_nuisance:
residuals = Y[subj] - np.dot(
X_base[subj],
beta0_post[subj][:np.size(X_base[subj], 1), :])
X_res_new = self.nureg_method(
self.n_nureg_[subj]).fit_transform(
self.preprocess_residual(residuals))
if it >= 1:
if np.max(np.abs(X_res_new - X_res)) <= self.tol:
logger.info('The change of X_res is '
'smaller than the tolerance value {}.'
'Fitting is finished after {} '
'iterations'.format(self.tol, it + 1))
break
X_res = X_res_new
if idx_DC.size > 1:
collapsed_DC = np.sum(X0[subj][:, idx_DC], axis=1)
X0[subj] = np.insert(np.delete(X0[subj], idx_DC, axis=1), 0,
collapsed_DC, axis=1)
collapsed_beta0 = np.mean(beta0_post[subj][idx_DC, :], axis=0)
beta0_post[subj] = np.insert(
np.delete(beta0_post[subj], idx_DC, axis=0),
0, collapsed_beta0, axis=0)
LL_null[subj] = np.sum(np.log(result_sum) + max_value)
t_finish = time.time()
logger.info(
'total time of fitting: {} seconds'.format(t_finish - t_start))
return beta0_post, sigma_post, rho_post, X0, LL_null
def _raw_loglike_grids(self, L, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
n_C, n_T, n_V, n_X0,
n_grid, rank):
# LAMBDA_i = np.dot(np.einsum('ijk,jl->ilk', s2XTAcorrX, L), L) \
# + np.identity(rank)
LAMBDA_i = np.empty((n_grid, rank, rank))
for grid in np.arange(n_grid):
LAMBDA_i[grid, :, :] = np.dot(np.dot(L.T,
s2XTAcorrX[grid, :, :]), L)
LAMBDA_i += np.identity(rank)
# dimension: n_grid * rank * rank
Chol_LAMBDA_i = np.linalg.cholesky(LAMBDA_i)
# dimension: n_grid * rank * rank
half_log_det_LAMBDA_i = np.sum(
np.log(np.abs(np.diagonal(Chol_LAMBDA_i, axis1=1, axis2=2))),
axis=1)
# dimension: n_grid
L_LAMBDA = np.empty((n_grid, n_C, rank))
L_LAMBDA_LT = np.empty((n_grid, n_C, n_C))
s2YTAcorrXL_LAMBDA_LTXTAcorrY = np.empty((n_grid, n_V))
# dimension: space * n_grid
for grid in np.arange(n_grid):
L_LAMBDA[grid, :, :] = scipy.linalg.cho_solve(
(Chol_LAMBDA_i[grid, :, :], True), L.T).T
L_LAMBDA_LT[grid, :, :] = np.dot(L_LAMBDA[grid, :, :], L.T)
s2YTAcorrXL_LAMBDA_LTXTAcorrY[grid, :] = np.sum(
sXTAcorrY[grid, :, :] * np.dot(L_LAMBDA_LT[grid, :, :],
sXTAcorrY[grid, :, :]),
axis=0)
denominator = (YTAcorrY_diag - s2YTAcorrXL_LAMBDA_LTXTAcorrY)
# dimension: n_grid * space
# Not necessary the best name for it. But this term appears
# as the denominator within the gradient wrt L
# In the equation of the log likelihood, this "denominator"
# term is in fact divided by 2. But we absorb that into the
# log fixted term.
LL_raw = -half_log_det_X0TAX0[:, None] \
- half_log_det_LAMBDA_i[:, None] \
- (n_T - n_X0 - 2) / 2 * np.log(denominator) \
+ log_weights[:, None] + log_fixed_terms[:, None]
# dimension: n_grid * space
# The log likelihood at each pair of values of SNR and rho1.
# half_log_det_X0TAX0 is 0.5*log(det(X0TAX0)) with the size of
# number of parameter grids. So is the size of log_weights
return LL_raw, denominator, L_LAMBDA, L_LAMBDA_LT
def _sum_loglike_marginalized(self, L_vec, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank=None):
sum_LL_total = 0
sum_grad_L = np.zeros(np.size(l_idx[0]))
for subj in range(len(YTAcorrY_diag)):
LL_total, grad_L = self._loglike_marginalized(
L_vec, s2XTAcorrX[subj], YTAcorrY_diag[subj],
sXTAcorrY[subj], half_log_det_X0TAX0[subj], log_weights,
log_fixed_terms[subj], l_idx, n_C, n_T[subj],
n_V[subj], n_X0[subj], n_grid, rank)
sum_LL_total += LL_total
sum_grad_L += grad_L
return sum_LL_total, sum_grad_L
def _loglike_marginalized(self, L_vec, s2XTAcorrX, YTAcorrY_diag,
sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms,
l_idx, n_C, n_T, n_V, n_X0,
n_grid, rank=None):
# In this version, we assume that beta is independent
# between voxels and noise is also independent. X0 captures the
# co-flucturation between voxels that is
# not captured by design matrix X.
# marginalized version marginalize sigma^2, s and rho1
# for all voxels. n_grid is the number of grid on which the numeric
# integration is performed to marginalize s and rho1 for each voxel.
# The log likelihood is an inverse-Gamma distribution sigma^2,
# so we can analytically marginalize it assuming uniform prior.
# n_grid is the number of grid in the parameter space of (s, rho1)
# that is used for numerical integration over (s, rho1).
n_l = np.size(l_idx[0])
# the number of parameters in the index of lower-triangular matrix
if rank is None:
rank = int((2 * n_C + 1
- np.sqrt(n_C**2 * 4 + n_C * 4 + 1 - 8 * n_l)) / 2)
L = np.zeros([n_C, rank])
L[l_idx] = L_vec
LL_raw, denominator, L_LAMBDA, _ = self._raw_loglike_grids(
L, s2XTAcorrX, YTAcorrY_diag, sXTAcorrY, half_log_det_X0TAX0,
log_weights, log_fixed_terms, n_C, n_T, n_V, n_X0, n_grid, rank)
result_sum, max_value, result_exp = utils.sumexp_stable(LL_raw)
LL_total = np.sum(np.log(result_sum) + max_value)
# Now we start the gradient with respect to L
# s2XTAcorrXL_LAMBDA = np.einsum('ijk,ikl->ijl',
# s2XTAcorrX, L_LAMBDA)
s2XTAcorrXL_LAMBDA = np.empty((n_grid, n_C, rank))
for grid in range(n_grid):
s2XTAcorrXL_LAMBDA[grid, :, :] = np.dot(s2XTAcorrX[grid, :, :],
L_LAMBDA[grid, :, :])
# dimension: n_grid * condition * rank
I_minus_s2XTAcorrXL_LAMBDA_LT = np.identity(n_C) \
- np.dot(s2XTAcorrXL_LAMBDA, L.T)
# dimension: n_grid * condition * condition
# The step above may be calculated by einsum. Not sure
# which is faster.
weight_grad = result_exp / result_sum
weight_grad_over_denominator = weight_grad / denominator
# dimension: n_grid * space
weighted_sXTAcorrY = sXTAcorrY \
* weight_grad_over_denominator[:, None, :]
# dimension: n_grid * condition * space
# sYTAcorrXL_LAMBDA = np.einsum('ijk,ijl->ikl', sXTAcorrY, L_LAMBDA)
# dimension: n_grid * space * rank
grad_L = np.zeros([n_C, rank])
for grid in range(n_grid):
grad_L += np.dot(
np.dot(I_minus_s2XTAcorrXL_LAMBDA_LT[grid, :, :],
sXTAcorrY[grid, :, :]),
np.dot(weighted_sXTAcorrY[grid, :, :].T,
L_LAMBDA[grid, :, :])) * (n_T - n_X0 - 2)
grad_L -= np.sum(s2XTAcorrXL_LAMBDA
* np.sum(weight_grad, axis=1)[:, None, None],
axis=0)
# dimension: condition * rank
return -LL_total, -grad_L[l_idx]
def _check_data_GBRSA(self, X, for_fit=True):
# Check input data
if type(X) is np.ndarray:
X = [X]
assert type(X) is list, 'Input data X must be either a list '\
'with each entry for one participant, or a numpy arrary '\
'for single participant.'
if for_fit:
for i, x in enumerate(X):
assert_all_finite(x)
assert x.ndim == 2, 'Each participants'' data should be ' \
'2 dimension ndarray'
assert np.all(np.std(x, axis=0) > 0),\
'The time courses of some voxels in participant {} '\
'do not change at all. Please make sure all voxels '\
'are within the brain'.format(i)
else:
for i, x in enumerate(X):
if x is not None:
assert x.ndim == 2, 'Each participants'' data should be ' \
'2 dimension ndarray'
assert x.shape[1] == self.n_V_[i], 'Number of voxels '\
'does not match that in the data used for fitting: '\
'subject {}'.format(i)
# This program allows to fit a single subject. But to have a consistent
# data structure, we make sure X and design are both lists.
return X
def _check_design_GBRSA(self, design, X):
# check design matrix
if type(design) is np.ndarray:
design = [design] * len(X)
if len(X) > 1:
logger.warning('There are multiple subjects while '
'there is only one design matrix. '
'I assume that the design matrix '
'is shared across all subjects.')
assert type(design) is list, 'design matrix must be either a list '\
'with each entry for one participant, or an numpy arrary '\
'for single participant.'
for i, d in enumerate(design):
if X[i] is not None:
assert_all_finite(d)
assert d.ndim == 2,\
'The design matrix should be 2 dimension ndarray'
assert np.linalg.matrix_rank(d) == d.shape[1], \
'Your design matrix of subject {} has rank ' \
'smaller than the number of columns. Some columns '\
'can be explained by linear combination of other columns.'\
'Please check your design matrix.'.format(i)
assert np.size(d, axis=0) == np.size(X[i], axis=0),\
'Design matrix and data of subject {} do not '\
'have the same number of time points.'.format(i)
assert self.rank is None or self.rank <= d.shape[1],\
'Your design matrix of subject {} '\
'has fewer columns than the rank you set'.format(i)
if i == 0:
n_C = np.shape(d)[1]
else:
assert n_C == np.shape(d)[1], \
'In Group Bayesian RSA, all subjects should have'\
' the same set of experiment conditions, t'\
'hus the same number of columns in design matrix'
if X[i].shape[1] <= d.shape[1]:
logger.warning('Your data have fewer voxels than the '
'number of task conditions. This might '
'cause problem in fitting. Please consider '
'increasing the size of your ROI, or set '
'the rank parameter to a lower number to '
'estimate a low-rank representational '
'structure.')
return design
def _check_nuisance_GBRSA(sef, nuisance, X):
# Check the nuisance regressors.
if nuisance is not None:
if type(nuisance) is np.ndarray:
nuisance = [nuisance] * len(X)
if len(X) > 1:
logger.warning('ATTENTION! There are multiple subjects '
'while there is only one nuisance matrix. '
'I assume that the nuisance matrix '
'is shared across all subjects. '
'Please double check.')
assert type(nuisance) is list, \
'nuisance matrix must be either a list '\
'with each entry for one participant, or an numpy arrary '\
'for single participant.'
for i, n in enumerate(nuisance):
assert_all_finite(n)
if n is not None:
assert n.ndim == 2,\
'The nuisance regressor should be '\
'2 dimension ndarray or None'
assert np.linalg.matrix_rank(n) == n.shape[1], \
'The nuisance regressor of subject {} has rank '\
'smaller than the number of columns.'\
'Some columns can be explained by linear '\
'combination of other columns. Please check your' \
' nuisance regressors.'.format(i)
assert np.size(n, axis=0) == np.size(X[i], axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
else:
nuisance = [None] * len(X)
logger.info('None was provided for nuisance matrix. Replicating '
'it for all subjects.')
return nuisance
def _check_scan_onsets_GBRSA(self, scan_onsets, X):
# check scan_onsets validity
if scan_onsets is None or type(scan_onsets) is np.ndarray:
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
scan_onsets = [scan_onsets] * len(X)
if len(X) > 1:
logger.warning('There are multiple subjects while '
'there is only one set of scan_onsets. '
'I assume that it is the same for all'
' subjects. Please double check')
for i in np.arange(len(scan_onsets)):
if X[i] is not None:
if scan_onsets[i] is None:
scan_onsets[i] = np.array([0], dtype=int)
logger.warning('No scan onsets were provided for subject'
' {}. Treating all data of this subject as'
' coming from the same run.')
else:
scan_onsets[i] = np.int32(scan_onsets[i])
assert (np.max(scan_onsets[i]) <= X[i].shape[0]
and np.min(scan_onsets[i]) >= 0
and 0 in scan_onsets[i]
and scan_onsets[i].ndim == 1), \
'Scan onsets of subject {} has formatting ' \
'issues: {}'.format(i, scan_onsets[i])
return scan_onsets
def _bin_exp(self, n_bin, scale=1.0):
""" Calculate the bin locations to approximate exponential distribution.
It breaks the cumulative probability of exponential distribution
into n_bin equal bins, each covering 1 / n_bin probability. Then it
calculates the center of mass in each bins and returns the
centers of mass. So, it approximates the exponential distribution
with n_bin of Delta function weighted by 1 / n_bin, at the
locations of these centers of mass.
Parameters:
-----------
n_bin: int
The number of bins to approximate the exponential distribution
scale: float.
The scale parameter of the exponential distribution, defined in
the same way as scipy.stats. It does not influence the ratios
between the bins, but just controls the spacing between the bins.
So generally users should not change its default.
Returns:
--------
bins: numpy array of size [n_bin,]
The centers of mass for each segment of the
exponential distribution.
"""
boundaries = np.flip(scipy.stats.expon.isf(
np.linspace(0, 1, n_bin + 1),
scale=scale), axis=0)
bins = np.empty(n_bin)
for i in np.arange(n_bin):
bins[i] = utils.center_mass_exp(
(boundaries[i], boundaries[i + 1]), scale=scale)
return bins
def _set_SNR_grids(self):
""" Set the grids and weights for SNR used in numerical integration
of SNR parameters.
"""
if self.SNR_prior == 'unif':
SNR_grids = np.linspace(0, 1, self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / (self.SNR_bins - 1)
SNR_weights[0] = SNR_weights[0] / 2.0
SNR_weights[-1] = SNR_weights[-1] / 2.0
elif self.SNR_prior == 'lognorm':
dist = scipy.stats.lognorm
alphas = np.arange(np.mod(self.SNR_bins, 2),
self.SNR_bins + 2, 2) / self.SNR_bins
# The goal here is to divide the area under the pdf curve
# to segments representing equal probabilities.
bounds = dist.interval(alphas, (self.logS_range,))
bounds = np.unique(bounds)
# bounds contain the boundaries which equally separate
# the probability mass of the distribution
SNR_grids = np.zeros(self.SNR_bins)
for i in np.arange(self.SNR_bins):
SNR_grids[i] = dist.expect(
lambda x: x, args=(self.logS_range,),
lb=bounds[i], ub=bounds[i + 1]) * self.SNR_bins
# Center of mass of each segment between consecutive
# bounds are set as the grids for SNR.
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
elif self.SNR_prior == 'exp':
SNR_grids = self._bin_exp(self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
else:
SNR_grids = np.ones(1)
SNR_weights = np.ones(1)
SNR_weights = SNR_weights / np.sum(SNR_weights)
return SNR_grids, SNR_weights
def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights
def _matrix_flattened_grid(self, X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX,
YTAcorrY_diag, XTAcorrY, X0TAY, XTAX0,
n_C, n_V, n_X0, n_grid):
""" We need to integrate parameters SNR and rho on 2-d discrete grids.
This function generates matrices which have only one dimension for
these two parameters, with each slice in that dimension
corresponding to each combination of the discrete grids of SNR
and discrete grids of rho.
"""
half_log_det_X0TAX0 = np.reshape(
np.repeat(self._half_log_det(X0TAX0)[None, :],
self.SNR_bins, axis=0), n_grid)
X0TAX0 = np.reshape(
np.repeat(X0TAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
X0TAX0_i = np.reshape(np.repeat(
X0TAX0_i[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
s2XTAcorrX = np.reshape(
SNR_grids[:, None, None, None]**2 * XTAcorrX,
(n_grid, n_C, n_C))
YTAcorrY_diag = np.reshape(np.repeat(
YTAcorrY_diag[None, :, :],
self.SNR_bins, axis=0), (n_grid, n_V))
sXTAcorrY = np.reshape(SNR_grids[:, None, None, None]
* XTAcorrY, (n_grid, n_C, n_V))
X0TAY = np.reshape(np.repeat(X0TAY[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_V))
XTAX0 = np.reshape(np.repeat(XTAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_C, n_X0))
return half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, \
YTAcorrY_diag, sXTAcorrY, X0TAY, XTAX0
| 211,990 | 49.534207 | 123 | py |
brainiak | brainiak-master/brainiak/reprsimil/__init__.py | # Copyright 2016 Mingbo Cai, Princeton Neuroscience Instititute,
# Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Bayesian method to perform Representational Similarity Analysis"""
| 718 | 43.9375 | 75 | py |
brainiak | brainiak-master/brainiak/reconstruct/__init__.py | # Copyright 2018 David Huberdeau & Peter Kok
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inverted encoding model for recreating continuous representations."""
| 675 | 44.066667 | 75 | py |
brainiak | brainiak-master/brainiak/reconstruct/iem.py | # Copyright 2018 David Huberdeau & Peter Kok
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Inverted Encoding Model (IEM)
Method to decode and reconstruct features from data.
The implementation is roughly based on the following publications:
[Kok2013] "1.Kok, P., Brouwer, G. J., Gerven, M. A. J. van &
Lange, F. P. de. Prior Expectations Bias Sensory Representations
in Visual Cortex. J. Neurosci. 33, 16275–16284 (2013).
[Brouwer2011] "2.Brouwer, G. J. & Heeger, D. J. Cross-orientation
suppression in human visual cortex. J. Neurophysiol. 106(5):
2108-2119 (2011).
[Brouwer2009] "3.Brouwer, G. J. & Heeger, D. J.
Decoding and Reconstructing Color from Responses in Human Visual
Cortex. J. Neurosci. 29, 13992–14003 (2009).
This implementation uses a set of sinusoidal
basis functions to represent the set of possible feature values.
A feature value is some characteristic of a stimulus, e.g. the
angular location of a target along a horizontal line. This code was
written to give some flexibility compared to the specific instances
in Kok, 2013 & in Brouwer, 2009. Users can set the number of basis
functions, or channels, and the range of possible feature values.
There are separate classes for reconstructing feature values in a
1-dimensional (1D) space or in a 2-dimensional (2D) space.
"""
# Authors: David Huberdeau (Yale University) &
# Peter Kok (Yale University), 2018 &
# Vy Vo (Intel Corp., UCSD), 2019
import logging
import warnings
import numpy as np
import scipy.stats
from sklearn.base import BaseEstimator
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from ..utils.utils import circ_dist
__all__ = ["InvertedEncoding1D",
"InvertedEncoding2D"]
logger = logging.getLogger(__name__)
MAX_CONDITION_CHECK = 9000
class InvertedEncoding1D(BaseEstimator):
"""Basis function-based reconstruction method
Inverted encoding models (alternatively known as forward models) are used
to reconstruct a feature represented in some N-dimensional space, here 1D,
(e.g. color of a stimulus) from patterns across voxels in functional data.
The model uses n_channels number of idealized basis functions and assumes
that the transformation from stimulus feature (e.g. color) to basis
function is one- to-one and invertible. The response of a voxel is
expressed as the weighted sum of basis functions. In this implementation,
basis functions were half-wave rectified sinusoid functions raised to a
power set by the user (e.g. 6).
The model:
Inverted encoding models reconstruct a stimulus feature from
patterns of BOLD activity by relating the activity in each
voxel, B, to the values of hypothetical channels (or basis
functions), C, according to Equation 1 below.
(1) B = W*C
where W is a weight matrix that represents the relationship
between BOLD activity and Channels. W must be estimated from
training data; this implementation (and most described in the
literature) uses linear regression to estimate W as in Equation
2 below [note: inv() represents matrix inverse or
pseudo-inverse].
(2) W_est = B_train*inv(C_train)
The weights in W_est (short for "estimated") represent the
contributions of each channel to the response of each voxel.
Estimated channel responses can be computed given W_est and
new voxel activity represented in matrix B_exp (short for
"experiment") through inversion of Equation 1:
(3) C_est = inv(W_est)*B_exp
Given estimated channel responses, C_est, it is straightforward
to obtain the reconstructed feature value by summing over
channels multiplied by their channel responses and taking the
argmax (i.e. the feature associated with the maximum value).
Using this model:
Use fit() to estimate the weights of the basis functions given
input data (e.g. beta values from fMRI data). This function
will execute equation 2 above.
Use predict() to compute predicted stimulus values
from new functional data. This function computes estimated
channel responses, as in equation 3, then computes summed
channel output and finds the argmax (within the stimulus
feature space) associated with those responses.
Use score() to compute a measure of the error of the prediction
based on known stimuli.
This implementation assumes a circular (or half-
circular) feature domain. Future implementations might
generalize the feature input space, and increase the
possible dimensionality.
Parameters
----------
n_channels: int, default 5. Number of channels
The number of channels, or basis functions, to be used in
the inverted encoding model.
channel_exp: int, default 6. Basis function exponent.
The exponent of the sinuoidal basis functions, which
establishes the width of the functions.
stimulus_mode: str, default 'halfcircular' (other option is
'circular'). Describes the feature domain.
range_start: double, default 0. Lowest value of domain.
Beginning value of range of independent variable
(usually degrees).
range_stop: double, default 180. Highest value of domain.
Ending value of range of independent variable
(usually degrees).
channel_density: int, default 180. Number of points in the
feature domain.
stimulus_resolution: double, default None will set the stimulus
resolution to be identical to the channel density. This sets
the resolution at which the stimuli were presented (e.g. a
spatial position with some width has a lower stimulus
resolution).
Attributes
----------
channels_: [n_channels, channel density] NumPy 2D array
matrix defining channel values
W_: sklearn.linear_model model containing weight matrix that
relates estimated channel responses to response amplitude
data
See get_params() for the rest of the attributes.
"""
def __init__(self, n_channels=6, channel_exp=5,
stimulus_mode='halfcircular', range_start=0., range_stop=180.,
channel_density=180, stimulus_resolution=None):
self.n_channels = n_channels
self.channel_exp = channel_exp
self.stimulus_mode = stimulus_mode
self.range_start = range_start
self.range_stop = range_stop
self.channel_density = channel_density
self.channel_domain = np.linspace(range_start, range_stop - 1,
channel_density)
if stimulus_resolution is None:
self.stim_res = channel_density
else:
self.stim_res = stimulus_resolution
self._check_params()
def _check_params(self):
if self.range_start >= self.range_stop:
raise ValueError("range_start {} must be less than "
"{} range_stop.".format(self.range_start,
self.range_stop))
if self.stimulus_mode == 'halfcircular':
if (self.range_stop - self.range_start) != 180.:
raise ValueError("For half-circular feature spaces,"
"the range must be 180 degrees, "
"not {}".
format(self.range_stop - self.range_start))
elif self.stimulus_mode == 'circular':
if (self.range_stop - self.range_start) != 360.:
raise ValueError("For circular feature spaces, the"
" range must be 360 degrees"
"not {}".
format(self.range_stop - self.range_start))
if self.n_channels < 2:
raise ValueError("Insufficient number of channels.")
if not np.isin(self.stimulus_mode, ['circular', 'halfcircular']):
raise ValueError("Stimulus mode must be one of these: "
"'circular', 'halfcircular'")
def fit(self, X, y):
"""Use data and feature variable labels to fit an IEM
Parameters
----------
X: numpy matrix of voxel activation data. [observations, voxels]
Should contain the beta values for each observation or
trial and each voxel of training data.
y: numpy array of response variable. [observations]
Should contain the feature for each observation in X.
"""
# Check that data matrix is well conditioned:
if np.linalg.cond(X) > MAX_CONDITION_CHECK:
logger.error("Data is singular.")
raise ValueError("Data matrix is nearly singular.")
if X.shape[0] < self.n_channels:
logger.error("Not enough observations. Cannot calculate "
"pseudoinverse.")
raise ValueError("Fewer observations (trials) than "
"channels. Cannot compute pseudoinverse.")
# Check that the data matrix is the right size
shape_data = np.shape(X)
shape_labels = np.shape(y)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
else:
if shape_data[0] != shape_labels[0]:
raise ValueError("Mismatched data samples and label samples")
# Define the channels (or basis set)
self.channels_, channel_centers = self._define_channels()
logger.info("Defined channels centered at {} degrees.".format(
np.rad2deg(channel_centers)))
# Create a matrix of channel activations for every observation.
# (i.e., C1 in Brouwer & Heeger 2009.)
C = self._define_trial_activations(y)
# Solve for W in B = WC
self.W_ = X.transpose() @ np.linalg.pinv(C.transpose())
if np.linalg.cond(self.W_) > MAX_CONDITION_CHECK:
logger.error("Weight matrix is nearly singular.")
raise ValueError("Weight matrix is nearly singular.")
return self
def predict(self, X):
"""Use test data to predict the feature
Parameters
----------
X: numpy matrix of voxel activation from test trials
[observations, voxels]. Used to predict feature
associated with the given observation.
Returns
-------
model_prediction: numpy array of estimated feature values.
"""
# Check that the data matrix is the right size
shape_data = np.shape(X)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
model_prediction = self._predict_features(X)
return model_prediction
def score(self, X, y):
"""Calculate error measure of prediction. Default measurement
is R^2, the coefficient of determination.
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations,voxels]
y: numpy array of responses. [observations]
Returns
-------
score_value: the error measurement between the actual
feature and predicted features.
"""
pred_features = self.predict(X)
if self.stimulus_mode == 'halfcircular':
# multiply features by 2. otherwise doesn't wrap properly
pred_features = pred_features * 2
y = y * 2
ssres = (circ_dist(np.deg2rad(y),
np.deg2rad(pred_features)) ** 2).sum()
sstot = (circ_dist(np.deg2rad(y),
np.ones(y.size) * scipy.stats.circmean(
np.deg2rad(y))) ** 2).sum()
score_value = (1 - ssres / sstot)
return score_value
def get_params(self, deep: bool = True):
"""Returns model parameters.
Returns
-------
params: parameter of this object
"""
return {"n_channels": self.n_channels, "channel_exp": self.channel_exp,
"stimulus_mode": self.stimulus_mode,
"range_start": self.range_start, "range_stop": self.range_stop,
"channel_domain": self.channel_domain,
"stim_res": self.stim_res}
def set_params(self, **parameters):
"""Sets model parameters after initialization.
Parameters
----------
parameters: structure with parameters and change values
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
setattr(self, "channel_domain",
np.linspace(self.range_start, self.range_stop - 1,
self.channel_density))
self._check_params()
return self
def _define_channels(self):
"""Define basis functions (aka channels).
Returns
-------
channels: numpy matrix of basis functions. dimensions are
[n_channels, function resolution].
channel_centers: numpy array of the centers of each channel
"""
channel_centers = np.linspace(np.deg2rad(self.range_start),
np.deg2rad(self.range_stop),
self.n_channels + 1)
channel_centers = channel_centers[0:-1]
# make sure channels are not bimodal if using 360 deg space
if self.stimulus_mode == 'circular':
domain = self.channel_domain * 0.5
centers = channel_centers * 0.5
elif self.stimulus_mode == 'halfcircular':
domain = self.channel_domain
centers = channel_centers
# define exponentiated function
channels = np.asarray(
[np.cos(np.deg2rad(domain) - cx) ** self.channel_exp for cx in
centers])
# half-wave rectification preserving circularity
channels = abs(channels)
return channels, channel_centers
def _define_trial_activations(self, stimuli):
"""Defines a numpy matrix of predicted channel responses for
each trial/observation.
Parameters
stimuli: numpy array of the feature values for each
observation
Returns
-------
C: matrix of predicted channel responses. dimensions are
number of observations by stimulus resolution
"""
stim_axis = np.linspace(self.range_start, self.range_stop - 1,
self.stim_res)
if self.range_start > 0:
stimuli = stimuli + self.range_start
elif self.range_start < 0:
stimuli = stimuli - self.range_start
one_hot = np.eye(self.stim_res)
indices = [np.argmin(abs(stim_axis - x)) for x in stimuli]
stimulus_mask = one_hot[indices, :]
if self.channel_density != self.stim_res:
if self.channel_density % self.stim_res == 0:
stimulus_mask = np.repeat(stimulus_mask,
self.channel_density / self.stim_res)
else:
raise NotImplementedError("This code doesn't currently support"
" stimuli which are not square "
"functions in the feature domain, or"
" stimulus widths that are not even"
"divisors of the number of points in"
" the feature domain.")
C = stimulus_mask @ self.channels_.transpose()
# Check that C is full rank
if np.linalg.matrix_rank(C) < self.n_channels:
warnings.warn("Stimulus matrix is {}, not full rank. May cause "
"issues with stimulus prediction/reconstruction.".
format(np.linalg.matrix_rank(C)), RuntimeWarning)
return C
def _predict_channel_responses(self, X):
"""Computes predicted channel responses from data
(e.g. C2 in Brouwer & Heeger 2009)
Parameters
----------
X: numpy data matrix. [observations, voxels]
Returns
-------
channel_response: numpy matrix of channel responses
"""
channel_response = np.matmul(np.linalg.pinv(self.W_), X.transpose())
return channel_response
def _predict_feature_responses(self, X):
"""Takes channel weights and transforms them into continuous
functions defined in the feature domain.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_response: predict response from all channels. Used
to predict feature (e.g. direction).
"""
pred_response = np.matmul(self.channels_.transpose(),
self._predict_channel_responses(X))
return pred_response
def _predict_features(self, X):
"""Predicts feature value (e.g. direction) from data in X.
Takes the maximum of the 'reconstructed' or predicted response
function.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_features: predicted feature from response across all
channels.
"""
pred_response = self._predict_feature_responses(X)
feature_ind = np.argmax(pred_response, 0)
pred_features = self.channel_domain[feature_ind]
return pred_features
class InvertedEncoding2D(BaseEstimator):
"""Basis function-based reconstruction method
Inverted encoding models (alternatively known as forward models) are used
to reconstruct a feature represented in a N-dimensional space, here 2D,
(e.g. position on a projector screen) from patterns across voxels in
functional data. The model uses some number of idealized basis functions
that cover the 2D space, and assumes that the transformation from
stimulus feature (e.g. 2D spatial position) to basis function is one-
to-one and invertible. The response of a voxel is expressed as the
weighted sum of basis functions. In this implementation, basis functions
were half-wave rectified sinusoid functions raised to some power (set by
the user).
The documentation will refer to the 'stimulus space' or 'stimulus domain',
which should be a 2D space in consistent units (e.g. screen pixels,
or degrees visual angle). The stimulus space is the domain in which the
stimulus is reconstructed. We will refer to the each point in this 2D
stimulus domain as a 'pixel'.
The model:
Inverted encoding models reconstruct a stimulus feature from
patterns of BOLD activity by relating the activity in each
voxel, B, to the values of hypothetical channels (or basis
functions), C, according to Equation 1 below.
(1) B = W*C
where W is a weight matrix that represents the relationship
between BOLD activity and Channels. W must be estimated from
training data; this implementation (and most described in the
literature) uses linear regression to estimate W as in Equation
2 below [note: inv() represents matrix inverse or
pseudo-inverse].
(2) W_est = B_train*inv(C_train)
The weights in W_est (short for "estimated") represent the
contributions of each channel to the response of each voxel.
Estimated channel responses can be computed given W_est and
new voxel activity represented in matrix B_exp (short for
"experiment") through inversion of Equation 1:
(3) C_est = inv(W_est)*B_exp
Given estimated channel responses, C_est, it is straightforward
to obtain the reconstructed feature value by summing over
channels multiplied by their channel responses and taking the
argmax (i.e. the feature associated with the maximum value).
Using this model:
Use fit() to estimate the weights of the basis functions given
input data (e.g. beta values from fMRI data). This function
will execute equation 2 above.
Use predict() to compute predicted stimulus values
from new functional data. This function computes estimated
channel responses, as in equation 3, then computes summed
channel output and finds the argmax (within the stimulus
feature space) associated with those responses.
Use score() to compute a measure of the error of the prediction
based on known stimuli.
Parameters
----------
stim_xlim: list of 2 floats Specifies the minimum and maximum x-values
of the area to be reconstructed. In order to be estimated properly, a
stimulus must appear at these limits. Specifying limits outside the
range of the stimuli can lead to spurious estimates.
stim_ylim: list of 2 floats Specifies the minimum and maximum y-values
of the area to be reconstructed. In order to be estimated properly, a
stimulus must appear at these limits. Specifying limits outside the
range of the stimuli can lead to spurious estimates.
stimulus_resolution: float or list of 2 floats. If a single float is
given, it will be expanded to a list (i.e. we will assume that the
reconstructed area is composed of square pixels).
stim_radius: float, or sequence of floats [n_stim], default None. If the
user does not define the design matrix of the encoding model (e.g. C
in B = W*C), it will be defined automatically on the assumption that
each observation was for a 2D circular stimulus of some radius.
chan_xlim: list of 2 floats, default None. Specifies the minimum and
maximum x-values of the channels, or basis functions.
chan_ylim: list of 2 floats, default None. Specifies the minimum and
maximum y-values of the channels, or basis functions.
channels: [n_channels, n_pixels] NumPy 2D array, default None. If None at
initialization, it can be defined with
either define_basis_functions_sqgrid() or
define_basis_functions_trigrid(), each of which tiles the given 2D
space with some grid (square or triangular/hexagonal, respectively).
Alternatively, the user can specify their own channels.
channel_exp: int, default 7. Basis function exponent. The exponent of the
sinuoidal basis functions, which helps control their width.
Attributes
----------
channels: [n_channels, n_pixels] NumPy 2D array defining channels
W_: sklearn.linear_model containing weight matrix that relates estimated
channel responses to response data
See get_params() for the rest of the attributes.
"""
def __init__(self, stim_xlim, stim_ylim, stimulus_resolution,
stim_radius=None, chan_xlim=None, chan_ylim=None,
channels=None, channel_exp=7):
"""Defines a 2D inverted encoding model object.
While the parameters defining the domain in which to reconstruct
the stimuli are required (e.g. all `stim_*` inputs), the parameters
to define the channels (`chan*`) are optional, in case the user
wishes to define their own channels (a.k.a basis functions).
Parameters
----------
stim_xlim: sequence of 2 float values, specifying the lower & upper
limits on the horizontal axis, respectively.
stim_ylim: sequence of 2 float values, specifying the lower & upper
limits on the vertical axis, respectively.
stimulus_resolution: a float or sequence of 2 floats, specifying the
number of pixels that exist in the x- and y- directions.
stim_radius: float, default None. The radius in pixels, assuming that
the stimulus is circular. If None, the user must either define it
before running fit(), or pass in a custom C in B = W*C.
chan_xlim: sequence of 2 float values, default None. Specifies the
lower & upper limits of the channels in the horizontal axis. If
None, the user must define this before using the class functions
to create basis functions, or pass in custom-defined channels.
chan_ylim: sequence of 2 float values, default None. Specifies the
lower & upper limits of the channels in the vertical axis. If
None, the user must define this before using the class functions
to create basis functions, or pass in custom-defined channels.
channel_exp: float or int, default None. The exponent for a
sinusoidal basis function. If None, it must be set before the
channels or defined, or pass in custom-defined channels.
"""
# Automatically expand stimulus_resolution if only one value is given.
# This will create a square field of view (FOV) for the
# reconstruction.
if not isinstance(stimulus_resolution, list): # make FOV square
stimulus_resolution = [stimulus_resolution, stimulus_resolution]
if (len(stim_xlim) != 2) or (len(stim_ylim) != 2):
raise ValueError("Stimulus limits should be a sequence, 2 values")
self.stim_fov = [stim_xlim, stim_ylim]
self.stim_pixels = [np.linspace(stim_xlim[0], stim_xlim[1],
stimulus_resolution[0]),
np.linspace(stim_ylim[0], stim_ylim[1],
stimulus_resolution[1])]
self.xp, self.yp = np.meshgrid(self.stim_pixels[0],
self.stim_pixels[1])
self.stim_radius_px = stim_radius
self.channels = channels
if self.channels is None:
self.n_channels = None
else:
self.n_channels = self.channels.shape[0]
if chan_xlim is None:
chan_xlim = stim_xlim
logger.info("Set channel x-limits to stimulus x-limits", stim_xlim)
if chan_ylim is None:
chan_ylim = stim_ylim
logger.info("Set channel y-limits to stimulus y-limits", stim_ylim)
self.channel_limits = [chan_xlim, chan_ylim]
self.channel_exp = channel_exp
self._check_params()
def _check_params(self):
if len(self.stim_fov) != 2:
raise ValueError("Stim FOV needs to have an x-list and a y-list")
elif len(self.stim_fov[0]) != 2 or len(self.stim_fov[1]) != 2:
raise ValueError("Stimulus limits should be a sequence, 2 values")
else:
if (self.stim_fov[0][0] >= self.stim_fov[0][1]) or \
(self.stim_fov[1][0] >= self.stim_fov[1][1]):
raise ValueError("Stimulus x or y limits should be ascending "
"values")
if self.xp.size != self.yp.size:
raise ValueError("xpixel grid and ypixel grid do not have same "
"number of elements")
if self.n_channels and np.all(self.channels):
if self.n_channels != self.channels.shape[0]:
raise ValueError("Number of channels {} does not match the "
"defined channels: {}".
format(self.n_channels,
self.channels.shape[0]))
if self.channels.shape[1] != self.xp.size:
raise ValueError("Defined {} channels over {} pixels, but "
"stimuli are represented over {} pixels. "
"Pixels should match.".
format(self.n_channels,
self.channels.shape[1],
self.xp.size))
def fit(self, X, y, C=None):
"""Use data and feature variable labels to fit an IEM
Parameters
----------
X: numpy matrix of voxel activation data. [observations, voxels]
Should contain the beta values for each observation or
trial and each voxel of training data.
y: numpy array of response variable. [observations]
Should contain the feature for each observation in X.
C: numpy matrix of channel activations for every observation (e.g.
the design matrix C in the linear equation B = W*C), matrix size
[observations, pixels]. If None (default), this assumes that each
observation contains a 2D circular stimulus and will define the
activations with self._define_trial_activations(y).
"""
# Check that data matrix is well conditioned:
if np.linalg.cond(X) > MAX_CONDITION_CHECK:
logger.error("Data is singular.")
raise ValueError("Data matrix is nearly singular.")
if self.channels is None:
raise ValueError("Must define channels (set of basis functions).")
if X.shape[0] < self.n_channels:
logger.error("Not enough observations. Cannot calculate "
"pseudoinverse.")
raise ValueError("Fewer observations (trials) than "
"channels. Cannot compute pseudoinverse.")
# Check that the data matrix is the right size
shape_data = np.shape(X)
shape_labels = np.shape(y)
if shape_data[0] != shape_labels[0]:
raise ValueError("Mismatched data samples and label samples")
if C is None:
# Create a matrix of channel activations for every observation.
# (i.e., C1 in Brouwer & Heeger 2009.)
C = self._define_trial_activations(y)
# Solve for W in B = WC
self.W_ = X.transpose() @ np.linalg.pinv(C.transpose())
if np.linalg.cond(self.W_) > MAX_CONDITION_CHECK:
logger.error("Weight matrix is nearly singular.")
raise ValueError("Weight matrix is nearly singular.")
return self
def predict(self, X):
"""Use test data to predict the feature
Parameters
----------
X: numpy matrix of voxel activation from test trials [observations,
voxels]. Used to predict feature associated with the given
observation.
Returns
-------
model_prediction: numpy array of estimated feature values.
"""
# Check that the data matrix is the right size
shape_data = np.shape(X)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
model_prediction = self._predict_features(X)
return model_prediction
def score(self, X, y):
"""Calculate error measure of prediction, assuming that the predicted
feature is at the maximum of the reconstructed values.
To score the reconstructions against expected features defined in the
stimulus domain (i.e. in pixels), see score_against_reconstructed().
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations,voxels]
y: numpy array of stimulus features. [observations, 2]
Returns
-------
score_value: the error measurement between the actual
feature and predicted features, [observations].
"""
pred_features = self.predict(X)
ssres = np.sum((pred_features - y) ** 2, axis=1)
sstot = np.sum((y - np.mean(y)) ** 2, axis=1)
score_value = 1 - (ssres / sstot)
return score_value
def score_against_reconstructed(self, X, y, metric="euclidean"):
"""Calculates a distance metric between reconstructed features in
the 2D stimulus domain (i.e. reconstructions in pixels) given
some observations X, and expected features y. Expected features must
also be in the pixel stimulus domain.
To score the reconstructions against the expected maxima, see score().
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations, voxels]
y: numpy array of the expected stimulus reconstruction values [pixels,
observations].
metric: string specifying the distance metric, either "euclidean" or
"cosine".
Returns
-------
score_value: the error measurement between the reconstructed feature
values as the expected values, [observations].
"""
yhat = self.predict_feature_responses(X)
if metric == "euclidean":
score_value = euclidean_distances(y.T, yhat.T)
elif metric == "cosine":
score_value = cosine_distances(y.T, yhat.T)
return score_value[0, :]
def get_params(self, deep: bool = True):
"""Returns model parameters.
Returns
-------
params: parameter of this object
"""
return {"n_channels": self.n_channels, "channel_exp": self.channel_exp,
"stim_fov": self.stim_fov, "stim_pixels": self.stim_pixels,
"stim_radius_px": self.stim_radius_px, "xp": self.xp,
"yp": self.yp, "channels": self.channels, "channel_limits":
self.channel_limits}
def set_params(self, **parameters):
"""Sets model parameters after initialization.
Parameters
----------
parameters: structure with parameters and change values
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
self._check_params()
return self
def _make_2d_cosine(self, x, y, x_center, y_center, s):
"""Defines a 2D exponentiated cosine (isometric, e.g. constant width
in x & y) for use as a basis function. Function goes to zero at the
given size constant s. That is, the function is given by if r <= s:
f(r) = (0.5 + 0.5*cos(r*pi/s)))**channel_exp else: 0 where r is
the Euclidean distance from the center of the function. This will
yield a Gaussian-like function, centered at (x_center, y_center).
Parameters
----------
x: x-coordinates of the stimulus space, [npixels, 1] matrix
y: y-coordinates of the stimulus space, [npixels, 1] matrix
x_center: x-coordinate of basis function centers (sequence, nchannels)
y_center: y-coordinate of basis function centers (sequence, nchannels)
s: size constant of the 2D cosine function. This is the radius where
the function is non-zero.
Returns
-------
cos_functions: basis functions defined in the 2D stimulus space.
returns a [nchannels, npixels] matrix.
"""
cos_functions = np.zeros((len(x_center), len(x)))
for i in range(len(x_center)):
myr = np.sqrt((x - x_center[i]) ** 2 + (y - y_center[i]) ** 2). \
squeeze()
qq = (myr <= s) * 1
zp = ((0.5 * (1 + np.cos(myr * np.pi / s))) ** self.channel_exp)
cos_functions[i, :] = zp * qq
return cos_functions
def _2d_cosine_sz_to_fwhm(self, size_constant):
fwhm = 2 * size_constant \
* np.arccos((0.5 ** (1 / self.channel_exp) - 0.5) / 0.5) / np.pi
return fwhm
def _2d_cosine_fwhm_to_sz(self, fwhm):
"""For an exponentiated 2D cosine basis function, converts the
full-width half-maximum (FWHM) of that function to the function's
size constant. The size constant is the variable s in the function
below:
if r <= s: f(r) = (0.5 + 0.5*cos(r*pi/s)))**channel_exp
else: 0 where r is the Euclidean distance from the center of
the function.
Parameters
----------
fwhm: a float indicating the full-width half-maximum in stimulus space
Returns
-------
sz: the size constant of the exponentiated cosine
"""
sz = (0.5 * np.pi * fwhm) / \
(np.arccos((0.5 ** (1 / self.channel_exp) - 0.5) / 0.5))
return sz
def define_basis_functions_sqgrid(self, nchannels, channel_size=None):
"""Define basis functions (aka channels) arrange in a square grid.
Sets the self.channels parameter.
Parameters
----------
nchannels: number of channels in the x (horizontal) direction
channel_size: the desired full-width half-maximum (FWHM) of the
channel, in stimulus space.
Returns
-------
self.channels: defines channels, a [nchannels, npixels] matrix.
channel_centers: numpy array of the centers of each channel, given as
[nchannels x 2] matrix
"""
# If given a single value for nchannels, expand to make a square
if not isinstance(nchannels, list):
nchannels = [nchannels, nchannels]
chan_xcenters = np.linspace(self.channel_limits[0][0],
self.channel_limits[0][1], nchannels[0])
chan_ycenters = np.linspace(self.channel_limits[1][0],
self.channel_limits[1][1], nchannels[1])
cx, cy = np.meshgrid(chan_xcenters, chan_ycenters)
cx = cx.reshape(-1, 1)
cy = cy.reshape(-1, 1)
if channel_size is None:
# To get even coverage, setting the channel FWHM to ~1.1x-1.2x the
# spacing between the channels might work. (See Sprague et al. 2013
# Methods & Supplementary Figure 3 -- this is for cosine exp = 7,
# your mileage may vary for other exponents!).
channel_size = 1.2 * (chan_xcenters[1] - chan_xcenters[0])
cos_width = self._2d_cosine_fwhm_to_sz(channel_size)
# define exponentiated function
self.channels = self._make_2d_cosine(self.xp.reshape(-1, 1),
self.yp.reshape(-1, 1), cx, cy,
cos_width)
self.n_channels = self.channels.shape[0]
return self.channels, np.hstack([cx, cy])
def define_basis_functions_trigrid(self, grid_radius, channel_size=None):
"""Define basis functions (aka channels) arranged in a triangular grid.
Returns
-------
self.channels: defines channels, [nchannels, npixels] matrix.
channel_centers: numpy array of the centers of each channel
"""
x_dist = np.diff(self.channel_limits[0]) / (grid_radius * 2)
y_dist = x_dist * np.sqrt(3) * 0.5
trigrid = np.zeros((0, 2))
xbase = np.expand_dims(np.arange(self.channel_limits[0][0],
self.channel_limits[0][1],
x_dist), 1)
for yi, y in enumerate(np.arange(self.channel_limits[1][0],
self.channel_limits[1][1], y_dist)):
if (yi % 2) == 0:
xx = xbase.copy()
yy = np.ones((xx.size, 1)) * y
else:
xx = xbase.copy() + x_dist / 2
yy = np.ones((xx.size, 1)) * y
trigrid = np.vstack(
(trigrid, np.hstack((xx, yy))))
if channel_size is None:
# To get even coverage, setting the channel FWHM to ~1.1x-1.2x the
# spacing between the channels might work. (See Sprague et al. 2013
# Methods & Supplementary Figure 3 -- this is for cosine exp = 7,
# your mileage may vary for other exponents!).
channel_size = 1.1 * x_dist
cos_width = self._2d_cosine_fwhm_to_sz(channel_size)
self.channels = self._make_2d_cosine(self.xp.reshape(-1, 1),
self.yp.reshape(-1, 1),
trigrid[:, 0],
trigrid[:, 1], cos_width)
self.n_channels = self.channels.shape[0]
return self.channels, trigrid
def _define_trial_activations(self, stim_centers, stim_radius=None):
"""Defines a numpy matrix of predicted channel responses for each
trial/observation. Assumes that the presented stimulus is circular in
the 2D stimulus space. This can effectively be a single circular
pixel if stim_radius=0.5.
Parameters
-------
stim_centers: numpy array of 2D stimulus features for each observation,
expected dimensions are [observations, 2].
stim_radius: scalar value or array-like specifying the radius of the
circular stimulus for each observation, [observations]. While
this can be read-out from the property self.stim_radius_px,
here the user can specify it in case they are retraining the
model with new observations.
Returns
-------
C: numpy array of predicted channel responses [observations, pixels]
"""
nstim = stim_centers.shape[0]
if self.stim_radius_px is None:
if stim_radius is None:
raise ValueError("No defined stimulus radius. Please set.")
else:
self.stim_radius_px = stim_radius
if not isinstance(self.stim_radius_px, np.ndarray) or not isinstance(
self.stim_radius_px, list):
self.stim_radius_px = np.ones(nstim) * self.stim_radius_px
# Create a mask for every stimulus observation in the stimulus domain
stimulus_mask = np.zeros((self.xp.size, nstim))
for i in range(nstim):
rad_vals = ((self.xp.reshape(-1, 1) - stim_centers[i, 0]) ** 2 +
(self.yp.reshape(-1, 1) - stim_centers[i, 1]) ** 2)
inds = np.where(rad_vals < self.stim_radius_px[i])[0]
stimulus_mask[inds, i] = 1
# Go from the stimulus domain to the channel domain
C = self.channels.squeeze() @ stimulus_mask
C = C.transpose()
# Check that C is full rank
if np.linalg.matrix_rank(C) < self.n_channels:
warnings.warn("Stimulus matrix is {}, not full rank. May cause "
"issues with stimulus prediction/reconstruction.".
format(np.linalg.matrix_rank(C)), RuntimeWarning)
return C
def _predict_channel_responses(self, X):
"""Computes predicted channel responses from data
(e.g. C2 in Brouwer & Heeger 2009)
Parameters
----------
X: numpy data matrix. [observations, voxels]
Returns
-------
channel_response: numpy matrix of channel responses. [channels,
observations]
"""
channel_response = np.matmul(np.linalg.pinv(self.W_), X.transpose())
return channel_response
def predict_feature_responses(self, X):
"""Takes channel weights and transforms them into continuous
functions defined in the feature domain.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_response: predict response from all channels. This is the stimulus
reconstruction in the channel domain. [pixels, observations]
"""
pred_response = np.matmul(self.channels.transpose(),
self._predict_channel_responses(X))
return pred_response
def _predict_features(self, X):
"""Predicts feature value from data in X.
Takes the maximum of the reconstructed, i.e. predicted response
function.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_features: numpy matrix of predicted stimulus features.
[observations, 2]
"""
pred_response = self.predict_feature_responses(X)
feature_ind = np.argmax(pred_response, 0)
pred_features = np.hstack((self.xp.reshape(-1, 1)[feature_ind],
self.yp.reshape(-1, 1)[feature_ind]))
return pred_features
| 45,189 | 42.038095 | 79 | py |
brainiak | brainiak-master/brainiak/utils/utils.py | # Copyright 2016 Intel Corporation, Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import re
import warnings
import os.path
import psutil
from .fmrisim import generate_stimfunction, _double_gamma_hrf, convolve_hrf
from scipy.fftpack import fft, ifft
import logging
logger = logging.getLogger(__name__)
"""
Some utility functions that can be used by different algorithms
"""
__all__ = [
"array_correlation",
"center_mass_exp",
"circ_dist",
"concatenate_not_none",
"cov2corr",
"from_tri_2_sym",
"from_sym_2_tri",
"gen_design",
"phase_randomize",
"p_from_null",
"ReadDesign",
"sumexp_stable",
"usable_cpu_count",
]
def circ_dist(x, y):
"""
Computes the pairwise circular distance between two arrays of
points (in radians).
Parameters
----------
x: numpy vector of positions on a circle, in radians.
y: numpy vector of positions on a circle, in radians.
Returns
-------
r: numpy vector of distances between inputs.
"""
if x.size != y.size:
raise ValueError("Input sizes must match to compute pairwise "
"comparisons.")
r = np.angle(np.exp(x*1j) / np.exp(y*1j))
return r
def from_tri_2_sym(tri, dim):
"""convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
----------
tri: 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix.
Returns
-------
symm : 2D array
Symmetric matrix in shape=[dim, dim]
"""
symm = np.zeros((dim, dim))
symm[np.triu_indices(dim)] = tri
return symm
def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri
def sumexp_stable(data):
"""Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
"""
max_value = data.max(axis=0)
result_exp = np.exp(data - max_value)
result_sum = np.sum(result_exp, axis=0)
return result_sum, max_value, result_exp
def concatenate_not_none(data, axis=0):
"""Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array.
"""
# Get the indexes of the arrays in the list
mask = []
for i in range(len(data)):
if data[i] is not None:
mask.append(i)
# Concatenate them
stacked = np.concatenate([data[i] for i in mask], axis=axis)
return stacked
def cov2corr(cov):
"""Calculate the correlation matrix based on a
covariance matrix
Parameters
----------
cov: 2D array
Returns
-------
corr: 2D array
correlation converted from the covarince matrix
"""
assert cov.ndim == 2, 'covariance matrix should be 2D array'
inv_sd = 1 / np.sqrt(np.diag(cov))
corr = cov * inv_sd[None, :] * inv_sd[:, None]
return corr
class ReadDesign:
"""A class which has the ability of reading in design matrix in .1D file,
generated by AFNI's 3dDeconvolve.
Parameters
----------
fname: string, the address of the file to read.
include_orth: Boollean, whether to include "orthogonal" regressors in
the nuisance regressors which are usually head motion parameters.
All the columns of design matrix are still going to be read in,
but the attribute cols_used will reflect whether these orthogonal
regressors are to be included for furhter analysis.
Note that these are not entered into design_task attribute which
include only regressors related to task conditions.
include_pols: Boollean, whether to include polynomial regressors in
the nuisance regressors which are used to capture slow drift of
signals.
Attributes
----------
design: 2d array. The design matrix read in from the csv file.
design_task: 2d array. The part of design matrix corresponding to
task conditions.
n_col: number of total columns in the design matrix.
column_types: 1d array. the types of each column in the design matrix.
0 for orthogonal regressors (usually head motion parameters),
-1 for polynomial basis (capturing slow drift of signals),
values > 0 for stimulus conditions
n_basis: scalar. The number of polynomial bases in the designn matrix.
n_stim: scalar. The number of stimulus conditions.
n_orth: scalar. The number of orthogoanal regressors (usually head
motions)
StimLabels: list. The names of each column in the design matrix.
"""
def __init__(self, fname=None, include_orth=True, include_pols=True):
if fname is None:
# fname is the name of the file to read in the design matrix
self.design = np.zeros([0, 0])
self.n_col = 0
# number of columns (conditions) in the design matrix
self.column_types = np.ones(0)
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
else:
# isAFNI = re.match(r'.+[.](1D|1d|txt)$', fname)
filename, ext = os.path.splitext(fname)
# We assume all AFNI 1D files have extension of 1D or 1d or txt
if ext in ['.1D', '.1d', '.txt']:
self.read_afni(fname=fname)
self.include_orth = include_orth
self.include_pols = include_pols
# The two flags above dictates whether columns corresponding to
# baseline drift modeled by polynomial functions of time and
# columns corresponding to other orthogonal signals (usually motion)
# are included in nuisance regressors.
self.cols_task = np.where(self.column_types == 1)[0]
self.design_task = self.design[:, self.cols_task]
if np.ndim(self.design_task) == 1:
self.design_task = self.design_task[:, None]
# part of the design matrix related to task conditions.
self.n_TR = np.size(self.design_task, axis=0)
self.cols_nuisance = np.array([])
if self.include_orth:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == 0)[0])))
if self.include_pols:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == -1)[0])))
if np.size(self.cols_nuisance) > 0:
self.reg_nuisance = self.design[:, self.cols_nuisance]
if np.ndim(self.reg_nuisance) == 1:
self.reg_nuisance = self.reg_nuisance[:, None]
else:
self.reg_nuisance = None
# Nuisance regressors for motion, baseline, etc.
def read_afni(self, fname):
# Read design file written by AFNI
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
self.design = np.loadtxt(fname, ndmin=2)
with open(fname) as f:
all_text = f.read()
find_n_column = re.compile(
r'^#[ ]+ni_type[ ]+=[ ]+"(?P<n_col>\d+)[*]', re.MULTILINE)
n_col_found = find_n_column.search(all_text)
if n_col_found:
self.n_col = int(n_col_found.group('n_col'))
if self.n_col != np.size(self.design, axis=1):
warnings.warn(
'The number of columns in the design matrix'
+ 'does not match the header information')
self.n_col = np.size(self.design, axis=1)
else:
self.n_col = np.size(self.design, axis=1)
self.column_types = np.ones(self.n_col)
# default that all columns are conditions of interest
find_ColumnGroups = re.compile(
r'^#[ ]+ColumnGroups[ ]+=[ ]+"(?P<CGtext>.+)"', re.MULTILINE)
CG_found = find_ColumnGroups.search(all_text)
if CG_found:
CG_text = re.split(',', CG_found.group('CGtext'))
curr_idx = 0
for CG in CG_text:
split_by_at = re.split('@', CG)
if len(split_by_at) == 2:
# the first tells the number of columns in this condition
# the second tells the condition type
n_this_cond = int(split_by_at[0])
self.column_types[curr_idx:curr_idx + n_this_cond] = \
int(split_by_at[1])
curr_idx += n_this_cond
elif len(split_by_at) == 1 and \
not re.search(r'\..', split_by_at[0]):
# Just a number, and not the type like '1..4'
self.column_types[curr_idx] = int(split_by_at[0])
curr_idx += 1
else: # must be a single stimulus condition
split_by_dots = re.split(r'\..', CG)
n_this_cond = int(split_by_dots[1])
self.column_types[curr_idx:curr_idx + n_this_cond] = 1
curr_idx += n_this_cond
self.n_basis = np.sum(self.column_types == -1)
self.n_stim = np.sum(self.column_types > 0)
self.n_orth = np.sum(self.column_types == 0)
find_StimLabels = re.compile(
r'^#[ ]+StimLabels[ ]+=[ ]+"(?P<SLtext>.+)"', re.MULTILINE)
StimLabels_found = find_StimLabels.search(all_text)
if StimLabels_found:
self.StimLabels = \
re.split(r'[ ;]+', StimLabels_found.group('SLtext'))
else:
self.StimLabels = []
def gen_design(stimtime_files, scan_duration, TR, style='FSL',
temp_res=0.01,
hrf_para={'response_delay': 6, 'undershoot_delay': 12,
'response_dispersion': 0.9,
'undershoot_dispersion': 0.9,
'undershoot_scale': 0.035}):
""" Generate design matrix based on a list of names of stimulus
timing files. The function will read each file, and generate
a numpy array of size [time_points \\* condition], where
time_points equals duration / TR, and condition is the size of
stimtime_filenames. Each column is the hypothetical fMRI response
based on the stimulus timing in the corresponding file
of stimtime_files.
This function uses generate_stimfunction and double_gamma_hrf
of brainiak.utils.fmrisim.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing
the stimulus timing information of one task condition.
The contents in the files will be interpretated
based on the style parameter.
Details are explained under the style parameter.
scan_duration: float or a list (or a 1D numpy array) of numbers.
Total duration of each fMRI scan, in unit of seconds.
If there are multiple runs, the duration should be
a list (or 1-d numpy array) of numbers.
If it is a list, then each number in the list
represents the duration of the corresponding scan
in the stimtime_files.
If only a number is provided, it is assumed that
there is only one fMRI scan lasting for scan_duration.
TR: float.
The sampling period of fMRI, in unit of seconds.
style: string, default: 'FSL'
Acceptable inputs: 'FSL', 'AFNI'
The formating style of the stimtime_files.
'FSL' style has one line for each event of the same condition.
Each line contains three numbers. The first number is the onset
of the event relative to the onset of the first scan,
in units of seconds.
(Multiple scans should be treated as a concatenated long scan
for the purpose of calculating onsets.
However, the design matrix from one scan won't leak into the next).
The second number is the duration of the event,
in unit of seconds.
The third number is the amplitude modulation (or weight)
of the response.
It is acceptable to not provide the weight,
or not provide both duration and weight.
In such cases, these parameters will default to 1.0.
This code will accept timing files with only 1 or 2 columns for
convenience but please note that the FSL package does not allow this
'AFNI' style has one line for each scan (run).
Each line has a few triplets in the format of
stim_onsets*weight:duration
(or simpler, see below), separated by spaces.
For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s,
modulated by weight of 2.0 and lasts for 1.5s.
If some run does not include a single event
of a condition (stimulus type), then you can put \\*,
or a negative number, or a very large number in that line.
Either duration or weight can be neglected. In such
cases, they will default to 1.0.
For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all
means an event starting at 3.0s, lasting for 1.0s, with
amplitude modulation of 1.0.
temp_res: float, default: 0.01
Temporal resolution of fMRI, in second.
hrf_para: dictionary
The parameters of the double-Gamma hemodynamic response function.
To set different parameters, supply a dictionary with
the same set of keys as the default, and replace the corresponding
values with the new values.
Returns
-------
design: 2D numpy array
design matrix. Each time row represents one TR
(fMRI sampling time point) and each column represents
one experiment condition, in the order in stimtime_files
"""
if np.ndim(scan_duration) == 0:
scan_duration = [scan_duration]
scan_duration = np.array(scan_duration)
assert np.all(scan_duration > TR), \
'scan duration should be longer than a TR'
if type(stimtime_files) is str:
stimtime_files = [stimtime_files]
assert TR > 0, 'TR should be positive'
assert style == 'FSL' or style == 'AFNI', 'style can only be FSL or AFNI'
n_C = len(stimtime_files) # number of conditions
n_S = np.size(scan_duration) # number of scans
if n_S > 1:
design = [np.empty([int(np.round(duration / TR)), n_C])
for duration in scan_duration]
else:
design = [np.empty([int(np.round(scan_duration / TR)), n_C])]
scan_onoff = np.insert(np.cumsum(scan_duration), 0, 0)
if style == 'FSL':
design_info = _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff)
elif style == 'AFNI':
design_info = _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff)
response_delay = hrf_para['response_delay']
undershoot_delay = hrf_para['undershoot_delay']
response_disp = hrf_para['response_dispersion']
undershoot_disp = hrf_para['undershoot_dispersion']
undershoot_scale = hrf_para['undershoot_scale']
# generate design matrix
for i_s in range(n_S):
for i_c in range(n_C):
if len(design_info[i_s][i_c]['onset']) > 0:
stimfunction = generate_stimfunction(
onsets=design_info[i_s][i_c]['onset'],
event_durations=design_info[i_s][i_c]['duration'],
total_time=scan_duration[i_s],
weights=design_info[i_s][i_c]['weight'],
temporal_resolution=1.0/temp_res)
hrf = _double_gamma_hrf(response_delay=response_delay,
undershoot_delay=undershoot_delay,
response_dispersion=response_disp,
undershoot_dispersion=undershoot_disp,
undershoot_scale=undershoot_scale,
temporal_resolution=1.0/temp_res)
design[i_s][:, i_c] = convolve_hrf(
stimfunction, TR, hrf_type=hrf, scale_function=False,
temporal_resolution=1.0 / temp_res).transpose() * temp_res
else:
design[i_s][:, i_c] = 0.0
# We multiply the resulting design matrix with
# the temporal resolution to normalize it.
# We do not use the internal normalization
# in double_gamma_hrf because it does not guarantee
# normalizing with the same constant.
return np.concatenate(design, axis=0)
def _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more
stimulus timing file comforming to FSL style,
and return a list (size of [#run \\* #condition])
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of FSL
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
for line in f.readlines():
tmp = line.strip().split()
i_s = np.where(
np.logical_and(scan_onoff[:-1] <= float(tmp[0]),
scan_onoff[1:] > float(tmp[0])))[0]
if len(i_s) == 1:
i_s = i_s[0]
design_info[i_s][i_c]['onset'].append(float(tmp[0])
- scan_onoff[i_s])
if len(tmp) >= 2:
design_info[i_s][i_c]['duration'].append(float(tmp[1]))
else:
design_info[i_s][i_c]['duration'].append(1.0)
if len(tmp) >= 3:
design_info[i_s][i_c]['weight'].append(float(tmp[2]))
else:
design_info[i_s][i_c]['weight'].append(1.0)
return design_info
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
text = f.readlines()
assert len(text) == n_S, \
'Number of lines does not match number of runs!'
for i_s, line in enumerate(text):
events = line.strip().split()
if events[0] == '*':
continue
for event in events:
assert event != '*'
tmp = str.split(event, ':')
if len(tmp) == 2:
duration = float(tmp[1])
else:
duration = 1.0
tmp = str.split(tmp[0], '*')
if len(tmp) == 2:
weight = float(tmp[1])
else:
weight = 1.0
if (float(tmp[0]) >= 0
and float(tmp[0])
< scan_onoff[i_s + 1] - scan_onoff[i_s]):
design_info[i_s][i_c]['onset'].append(float(tmp[0]))
design_info[i_s][i_c]['duration'].append(duration)
design_info[i_s][i_c]['weight'].append(weight)
return design_info
def center_mass_exp(interval, scale=1.0):
""" Calculate the center of mass of negative exponential distribution
p(x) = exp(-x / scale) / scale
in the interval of (interval_left, interval_right).
scale is the same scale parameter as scipy.stats.expon.pdf
Parameters
----------
interval: size 2 tuple, float
interval must be in the form of (interval_left, interval_right),
where interval_left/interval_right is the starting/end point of the
interval in which the center of mass is calculated for exponential
distribution.
Note that interval_left must be non-negative, since exponential is
not supported in the negative domain, and interval_right must be
bigger than interval_left (thus positive) to form a well-defined
interval.
scale: float, positive
The scale parameter of the exponential distribution. See above.
Returns
-------
m: float
The center of mass in the interval of (interval_left,
interval_right) for exponential distribution.
"""
assert isinstance(interval, tuple), 'interval must be a tuple'
assert len(interval) == 2, 'interval must be length two'
(interval_left, interval_right) = interval
assert interval_left >= 0, 'interval_left must be non-negative'
assert interval_right > interval_left, \
'interval_right must be bigger than interval_left'
assert scale > 0, 'scale must be positive'
if interval_right < np.inf:
return ((interval_left + scale) * np.exp(-interval_left / scale) - (
scale + interval_right) * np.exp(-interval_right / scale)) / (
np.exp(-interval_left / scale) - np.exp(-interval_right / scale))
else:
return interval_left + scale
def usable_cpu_count():
"""Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
"""
try:
result = len(os.sched_getaffinity(0))
except AttributeError:
try:
result = len(psutil.Process().cpu_affinity())
except AttributeError:
result = os.cpu_count()
return result
def phase_randomize(data, voxelwise=False, random_state=None):
"""Randomize phase of time series across subjects
For each subject, apply Fourier transform to voxel time series
and then randomly shift the phase of each frequency before inverting
back into the time domain. This yields time series with the same power
spectrum (and thus the same autocorrelation) as the original time series
but will remove any meaningful temporal relationships among time series
across subjects. By default (voxelwise=False), the same phase shift is
applied across all voxels; however if voxelwise=True, different random
phase shifts are applied to each voxel. The typical input is a time by
voxels by subjects ndarray. The first dimension is assumed to be the
time dimension and will be phase randomized. If a 2-dimensional ndarray
is provided, the last dimension is assumed to be subjects, and different
phase randomizations will be applied to each subject.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
Data to be phase randomized (per subject)
voxelwise : bool, default: False
Apply same (False) or different (True) randomizations across voxels
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
shifted_data : ndarray (n_TRs x n_voxels x n_subjects)
Phase-randomized time series
"""
# Check if input is 2-dimensional
data_ndim = data.ndim
# Get basic shape of data
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get randomized phase shifts
if n_TRs % 2 == 0:
# Why are we indexing from 1 not zero here? n_TRs / -1 long?
pos_freq = np.arange(1, data.shape[0] // 2)
neg_freq = np.arange(data.shape[0] - 1, data.shape[0] // 2, -1)
else:
pos_freq = np.arange(1, (data.shape[0] - 1) // 2 + 1)
neg_freq = np.arange(data.shape[0] - 1,
(data.shape[0] - 1) // 2, -1)
if not voxelwise:
phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects)
* 2 * np.math.pi)
else:
phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects)
* 2 * np.math.pi)
# Fast Fourier transform along time dimension of data
fft_data = fft(data, axis=0)
# Shift pos and neg frequencies symmetrically, to keep signal real
fft_data[pos_freq, :, :] *= np.exp(1j * phase_shifts)
fft_data[neg_freq, :, :] *= np.exp(-1j * phase_shifts)
# Inverse FFT to put data back in time domain
shifted_data = np.real(ifft(fft_data, axis=0))
# Go back to 2-dimensions if input was 2-dimensional
if data_ndim == 2:
shifted_data = shifted_data[:, 0, :]
return shifted_data
def p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=None):
"""Compute p-value from null distribution
Returns the p-value for an observed test statistic given a null
distribution. Performs either a 'two-sided' (i.e., two-tailed)
test (default) or a one-sided (i.e., one-tailed) test for either the
'left' or 'right' side. For an exact test (exact=True), does not adjust
for the observed test statistic; otherwise, adjusts for observed
test statistic (prevents p-values of zero). If a multidimensional
distribution is provided, use axis argument to specify which axis indexes
resampling iterations.
The implementation is based on the work in [PhipsonSmyth2010]_.
.. [PhipsonSmyth2010] "Permutation p-values should never be zero:
calculating exact p-values when permutations are randomly drawn.",
B. Phipson, G. K., Smyth, 2010, Statistical Applications in Genetics
and Molecular Biology, 9, 1544-6115.
https://doi.org/10.2202/1544-6115.1585
Parameters
----------
observed : float
Observed test statistic
distribution : ndarray
Null distribution of test statistic
side : str, default: 'two-sided'
Perform one-sided ('left' or 'right') or 'two-sided' test
axis: None or int, default: None
Axis indicating resampling iterations in input distribution
Returns
-------
p : float
p-value for observed test statistic based on null distribution
"""
if side not in ('two-sided', 'left', 'right'):
raise ValueError("The value for 'side' must be either "
"'two-sided', 'left', or 'right', got {0}".
format(side))
n_samples = len(distribution)
logger.info("Assuming {0} resampling iterations".format(n_samples))
if side == 'two-sided':
# Numerator for two-sided test
numerator = np.sum(np.abs(distribution) >= np.abs(observed), axis=axis)
elif side == 'left':
# Numerator for one-sided test in left tail
numerator = np.sum(distribution <= observed, axis=axis)
elif side == 'right':
# Numerator for one-sided test in right tail
numerator = np.sum(distribution >= observed, axis=axis)
# If exact test all possible permutations and do not adjust
if exact:
p = numerator / n_samples
# If not exact test, adjust number of samples to account for
# observed statistic; prevents p-value from being zero
else:
p = (numerator + 1) / (n_samples + 1)
return p
def _check_timeseries_input(data):
"""Checks response time series input data (e.g., for ISC analysis)
Input data should be a n_TRs by n_voxels by n_subjects ndarray
(e.g., brainiak.image.MaskedMultiSubjectData) or a list where each
item is a n_TRs by n_voxels ndarray for a given subject. Multiple
input ndarrays must be the same shape. If a 2D array is supplied,
the last dimension is assumed to correspond to subjects. This
function is generally intended to be used internally by other
functions module (e.g., isc, isfc in brainiak.isc).
Parameters
----------
data : ndarray or list
Time series data
Returns
-------
data : ndarray
Input time series data with standardized structure
n_TRs : int
Number of time points (TRs)
n_voxels : int
Number of voxels (or ROIs)
n_subjects : int
Number of subjects
"""
# Convert list input to 3d and check shapes
if type(data) == list:
data_shape = data[0].shape
for i, d in enumerate(data):
if d.shape != data_shape:
raise ValueError("All ndarrays in input list "
"must be the same shape!")
if d.ndim == 1:
data[i] = d[:, np.newaxis]
data = np.dstack(data)
# Convert input ndarray to 3d and check shape
elif isinstance(data, np.ndarray):
if data.ndim == 2:
data = data[:, np.newaxis, :]
elif data.ndim == 3:
pass
else:
raise ValueError("Input ndarray should have 2 "
"or 3 dimensions (got {0})!".format(data.ndim))
# Infer subjects, TRs, voxels and log for user to check
n_TRs, n_voxels, n_subjects = data.shape
logger.info("Assuming {0} subjects with {1} time points "
"and {2} voxel(s) or ROI(s) for ISC analysis.".format(
n_subjects, n_TRs, n_voxels))
return data, n_TRs, n_voxels, n_subjects
def array_correlation(x, y, axis=0):
"""Column- or row-wise Pearson correlation between two arrays
Computes sample Pearson correlation between two 1D or 2D arrays (e.g.,
two n_TRs by n_voxels arrays). For 2D arrays, computes correlation
between each corresponding column (axis=0) or row (axis=1) where axis
indexes observations. If axis=0 (default), each column is considered to
be a variable and each row is an observation; if axis=1, each row is a
variable and each column is an observation (equivalent to transposing
the input arrays). Input arrays must be the same shape with corresponding
variables and observations. This is intended to be an efficient method
for computing correlations between two corresponding arrays with many
variables (e.g., many voxels).
Parameters
----------
x : 1D or 2D ndarray
Array of observations for one or more variables
y : 1D or 2D ndarray
Array of observations for one or more variables (same shape as x)
axis : int (0 or 1), default: 0
Correlation between columns (axis=0) or rows (axis=1)
Returns
-------
r : float or 1D ndarray
Pearson correlation values for input variables
"""
# Accommodate array-like inputs
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
# Check that inputs are same shape
if x.shape != y.shape:
raise ValueError("Input arrays must be the same shape")
# Transpose if axis=1 requested (to avoid broadcasting
# issues introduced by switching axis in mean and sum)
if axis == 1:
x, y = x.T, y.T
# Center (de-mean) input variables
x_demean = x - np.mean(x, axis=0)
y_demean = y - np.mean(y, axis=0)
# Compute summed product of centered variables
numerator = np.sum(x_demean * y_demean, axis=0)
# Compute sum squared error
denominator = np.sqrt(np.sum(x_demean ** 2, axis=0) *
np.sum(y_demean ** 2, axis=0))
return numerator / denominator
| 36,244 | 35.390562 | 79 | py |
brainiak | brainiak-master/brainiak/utils/kronecker_solvers.py | import tensorflow as tf
__all__ = ["tf_kron_mult", "tf_masked_triangular_solve"]
def tf_solve_lower_triangular_kron(L, y):
""" Tensorflow function to solve L x = y
where L = kron(L[0], L[1] .. L[n-1])
and L[i] are the lower triangular matrices
Arguments
---------
L : list of 2-D tensors
Each element of the list must be a tensorflow tensor and
must be a lower triangular matrix of dimension n_i x n_i
y : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p
Returns
-------
x : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p
"""
n = len(L)
if n == 1:
return tf.linalg.triangular_solve(L[0], y)
else:
x = y
na = L[0].get_shape().as_list()[0]
n_list = tf.stack(
[tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]
)
n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32)
nb = tf.cast(n_prod / na, dtype=tf.int32)
col = tf.shape(input=x)[1]
for i in range(na):
xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0)
t = xinb / L[0][i, i]
xinb = tf_solve_lower_triangular_kron(L[1:], t)
xina = xina - tf.reshape(
tf.tile(tf.slice(L[0], [i + 1, i],
[na - i - 1, 1]), [1, nb * col]),
[(na - i - 1) * nb, col],
) * tf.reshape(
tf.tile(tf.reshape(t, [-1, 1]), [na - i - 1, 1]),
[(na - i - 1) * nb, col],
)
x = tf.concat(axis=0, values=[xt, xinb, xina])
return x
def tf_solve_upper_triangular_kron(L, y):
""" Tensorflow function to solve L^T x = y
where L = kron(L[0], L[1] .. L[n-1])
and L[i] are the lower triangular matrices
Arguments
---------
L : list of 2-D tensors
Each element of the list must be a tensorflow tensor and
must be a lower triangular matrix of dimension n_i x n_i
y : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p
Returns
-------
x : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p
"""
n = len(L)
if n == 1:
return tf.linalg.triangular_solve(L[0], y, adjoint=True)
else:
x = y
na = L[0].get_shape().as_list()[0]
n_list = tf.stack(
[tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]
)
n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32)
nb = tf.cast(n_prod / na, dtype=tf.int32)
col = tf.shape(input=x)[1]
for i in range(na - 1, -1, -1):
xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0)
t = xinb / L[0][i, i]
xinb = tf_solve_upper_triangular_kron(L[1:], t)
xt = xt - tf.reshape(
tf.tile(tf.transpose(a=tf.slice(
L[0], [i, 0], [1, i])), [1, nb * col]),
[i * nb, col],
) * tf.reshape(tf.tile(tf.reshape(t, [-1, 1]), [i, 1]),
[i * nb, col])
x = tf.concat(axis=0, values=[xt, xinb, xina])
return x
def tf_kron_mult(L, x):
""" Tensorflow multiply with kronecker product matrix
Returns kron(L[0], L[1] ...) * x
Arguments
---------
L : list of 2-D tensors
Each element of the list must be a tensorflow tensor and
must be a square matrix of dimension n_i x n_i
x : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p
Returns
-------
y : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p
"""
n = len(L)
if n == 1:
return tf.matmul(L[0], x)
else:
na = L[0].get_shape().as_list()[0]
n_list = tf.stack(
[tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]
)
n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32)
nb = tf.cast(n_prod / na, dtype=tf.int32)
col = tf.shape(input=x)[1]
xt = tf_kron_mult(
L[1:], tf.transpose(a=tf.reshape(tf.transpose(a=x), [-1, nb]))
)
y = tf.zeros_like(x)
for i in range(na):
ya, yb, yc = tf.split(y, [i * nb, nb, (na - i - 1) * nb], 0)
yb = tf.reshape(
tf.matmul(
tf.reshape(xt, [nb * col, na]),
tf.transpose(a=tf.slice(L[0], [i, 0], [1, na])),
),
[nb, col],
)
y = tf.concat(axis=0, values=[ya, yb, yc])
return y
def tf_masked_triangular_solve(L, y, mask, lower=True, adjoint=False):
""" Tensorflow function to solve L x = y
where L is a lower triangular matrix with a mask
Arguments
---------
L : 2-D tensor
Must be a tensorflow tensor and
must be a triangular matrix of dimension n x n
y : 1-D or 2-D tensor
Dimension n x p
mask : 1-D tensor
Dimension n x 1, should be 1 if element is valid, 0 if invalid
lower : boolean (default : True)
True if L is lower triangular, False if upper triangular
adjoint : boolean (default : False)
True if solving for L^T x = y, False if solving for Lx = y
Returns
-------
x : 1-D or 2-D tensor
Dimension n x p, values at rows for which mask == 0 are set to zero
"""
zero = tf.constant(0, dtype=tf.int32)
mask_mat = tf.compat.v1.where(
tf.not_equal(
tf.matmul(tf.reshape(mask, [-1, 1]),
tf.reshape(mask, [1, -1])), zero
)
)
q = tf.cast(
tf.sqrt(tf.cast(tf.shape(input=mask_mat)[0], dtype=tf.float64)),
dtype=tf.int32
)
L_masked = tf.reshape(tf.gather_nd(L, mask_mat), [q, q])
maskindex = tf.compat.v1.where(tf.not_equal(mask, zero))
y_masked = tf.gather_nd(y, maskindex)
x_s1 = tf.linalg.triangular_solve(
L_masked, y_masked, lower=lower, adjoint=adjoint)
x = tf.scatter_nd(maskindex, x_s1, tf.cast(
tf.shape(input=y), dtype=tf.int64))
return x
def tf_solve_lower_triangular_masked_kron(L, y, mask):
""" Tensorflow function to solve L x = y
where L = kron(L[0], L[1] .. L[n-1]),
L[i] are the lower triangular matrices,
and mask is a binary elementwise mask on the full L
Arguments
---------
L : list of 2-D tensors
Each element of the list must be a tensorflow tensor and
must be a lower triangular matrix of dimension n_i x n_i
y : 1-D or 2-D tensor
Dimension [n_0*n_1*..n_(m-1)), p]
mask: 1-D tensor
Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows and 0
for don't care
Returns
-------
x : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p, values at rows
for which mask == 0 are set to zero
"""
n = len(L)
if n == 1:
return tf_masked_triangular_solve(L[0], y, mask, lower=True,
adjoint=False)
else:
x = y
na = L[0].get_shape().as_list()[0]
n_list = tf.stack(
[tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]
)
n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32)
nb = tf.cast(n_prod / na, dtype=tf.int32)
col = tf.shape(input=x)[1]
for i in range(na):
mask_b = tf.slice(mask, [i * nb], [nb])
xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0)
t = xinb / L[0][i, i]
if tf.reduce_sum(input_tensor=mask_b) != nb:
xinb = tf_solve_lower_triangular_masked_kron(L[1:], t, mask_b)
t_masked = tf_kron_mult(L[1:], xinb)
else:
# all valid - same as no mask
xinb = tf_solve_lower_triangular_kron(L[1:], t)
t_masked = t
xina = xina - tf.reshape(
tf.tile(tf.slice(L[0], [i + 1, i],
[na - i - 1, 1]), [1, nb * col]),
[(na - i - 1) * nb, col],
) * tf.reshape(
tf.tile(tf.reshape(t_masked, [-1, 1]), [na - i - 1, 1]),
[(na - i - 1) * nb, col],
)
x = tf.concat(axis=0, values=[xt, xinb, xina])
return x
def tf_solve_upper_triangular_masked_kron(L, y, mask):
""" Tensorflow function to solve L^T x = y
where L = kron(L[0], L[1] .. L[n-1])
and L[i] are the lower triangular matrices
Arguments
---------
L : list of 2-D tensors
Each element of the list must be a tensorflow tensor and
must be a lower triangular matrix of dimension n_i x n_i
y : 1-D or 2-D tensor
Dimension [n_0*n_1*..n_(m-1)), p]
mask: 1-D tensor
Dimension [n_0*n_1*...n_(m-1)] with 1 for valid rows
and 0 for don't care
Returns
-------
x : 1-D or 2-D tensor
Dimension (n_0*n_1*..n_(m-1)) x p, values at rows
for which mask == 0 are set to zero
"""
n = len(L)
if n == 1:
return tf_masked_triangular_solve(L[0], y, mask, lower=True,
adjoint=True)
else:
x = y
na = L[0].get_shape().as_list()[0]
n_list = tf.stack(
[tf.cast(tf.shape(input=mat)[0], dtype=tf.float64) for mat in L]
)
n_prod = tf.cast(tf.reduce_prod(input_tensor=n_list), dtype=tf.int32)
nb = tf.cast(n_prod / na, dtype=tf.int32)
col = tf.shape(input=x)[1]
L1_end_tr = [tf.transpose(a=x) for x in L[1:]]
for i in range(na - 1, -1, -1):
mask_b = tf.slice(mask, [i * nb], [nb])
xt, xinb, xina = tf.split(x, [i * nb, nb, (na - i - 1) * nb], 0)
t = xinb / L[0][i, i]
if tf.reduce_sum(input_tensor=mask_b) != nb:
xinb = tf_solve_upper_triangular_masked_kron(L[1:], t, mask_b)
t_masked = tf_kron_mult(L1_end_tr, xinb)
else:
xinb = tf_solve_upper_triangular_kron(L[1:], t)
t_masked = t
xt = xt - tf.reshape(
tf.tile(tf.transpose(a=tf.slice(
L[0], [i, 0], [1, i])), [1, nb * col]),
[i * nb, col],
) * tf.reshape(
tf.tile(tf.reshape(t_masked, [-1, 1]), [i, 1]), [i * nb, col]
)
x = tf.concat(axis=0, values=[xt, xinb, xina])
return x
| 10,546 | 30.864048 | 78 | py |
brainiak | brainiak-master/brainiak/utils/__init__.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities used by multiple subpackages."""
| 638 | 41.6 | 75 | py |
brainiak | brainiak-master/brainiak/utils/fmrisim_real_time_generator.py | # Generate simulated fMRI data with a few parameters that might be relevant
# for real time analysis
"""
This code can be run as a function in python or from the command line:
python fmrisim_real-time_generator --outputDir data
The input arguments are:
Required:
outputDir - Specify output data dir where the data should be saved
Optional (can be modified by flags from the command line):
data_dict contains:
numTRs - Specify the number of time points
multivariate_patterns - Is the difference between conditions univariate (0)
or multivariate (1)
different_ROIs - Are there different ROIs for each condition (1) or is
it in the same ROI (0). If it is the same ROI and you are using univariate
differences, the second condition will have a smaller evoked response than
the other.
event_duration - How long, in seconds, is each event
scale_percentage - What is the percent signal change
trDuration - How many seconds per volume
save_dicom - Do you want to save data as a dicom (1) or numpy (0)
save_realtime - Do you want to save the data in real time (1) or as fast as
possible (0)?
isi - What is the time between each event (in seconds)
burn_in - How long before the first event (in seconds)
"""
import os
import time
import argparse
import datetime
import nibabel # type: ignore
import numpy as np # type: ignore
import pydicom as dicom
from brainiak.utils import fmrisim as sim # type: ignore
import logging
from pkg_resources import resource_stream
from nibabel.nifti1 import Nifti1Image
import gzip
__all__ = ["generate_data"]
logger = logging.getLogger(__name__)
script_datetime = datetime.datetime.now()
default_settings = {
'ROI_A_file': None,
'ROI_B_file': None,
'template_path': None,
'noise_dict_file': None,
'numTRs': 200,
'trDuration': 2,
'isi': 6,
'burn_in': 6,
'event_duration': 10,
'scale_percentage': 0.5,
'multivariate_pattern': False,
'different_ROIs': False,
'save_dicom': False,
'save_realtime': False,
}
def _generate_ROIs(ROI_file,
stimfunc,
noise,
scale_percentage,
data_dict):
"""Make signal activity for an ROI of data
Creates the specified evoked response time course, calibrated to the
expected signal change, for a given ROI
Parameters
----------
ROI_file : str
Path to the file of the ROI being loaded in
stimfunc : 1 dimensional array
Time course of evoked response. Output from
fmrisim.generate_stimfunction
noise : 4 dimensional array
Volume of noise generated from fmrisim.generate_noise. Although this
is needed as an input, this is only so that the percent signal change
can be calibrated. This is not combined with the signal generated.
scale_percentage : float
What is the percent signal change for the evoked response
data_dict : dict
A dictionary to specify the parameters used for making data,
specifying the following keys
numTRs - int - Specify the number of time points
multivariate_patterns - bool - Is the difference between conditions
univariate (0) or multivariate (1)
different_ROIs - bool - Are there different ROIs for each condition (
1) or is it in the same ROI (0). If it is the same ROI and you are
using univariate differences, the second condition will have a
smaller evoked response than the other.
event_duration - int - How long, in seconds, is each event
scale_percentage - float - What is the percent signal change
trDuration - float - How many seconds per volume
save_dicom - bool - Save to data as a dicom (1) or numpy (0)
save_realtime - bool - Do you want to save the data in real time (1)
or as fast as possible (0)?
isi - float - What is the time between each event (in seconds)
burn_in - int - How long before the first event (in seconds)
Returns
----------
signal : 4 dimensional array
Volume of signal in the specified ROI (noise has not yet been added)
"""
# Create the signal in the ROI as specified.
# Load in the template data (it may already be loaded if doing a test)
if isinstance(ROI_file, str):
logger.info('Loading', ROI_file)
nii = nibabel.load(ROI_file)
ROI = nii.get_data()
else:
ROI = ROI_file
# Find all the indices that contain signal
idx_list = np.where(ROI == 1)
idxs = np.zeros([len(idx_list[0]), 3])
for idx_counter in list(range(0, len(idx_list[0]))):
idxs[idx_counter, 0] = int(idx_list[0][idx_counter])
idxs[idx_counter, 1] = int(idx_list[1][idx_counter])
idxs[idx_counter, 2] = int(idx_list[2][idx_counter])
idxs = idxs.astype('int8')
# How many voxels per ROI
voxels = int(ROI.sum())
# Create a pattern of activity across the two voxels
if data_dict['multivariate_pattern'] is True:
pattern = np.random.rand(voxels).reshape((voxels, 1))
else: # Just make a univariate increase
pattern = np.tile(1, voxels).reshape((voxels, 1))
# Multiply each pattern by each voxel time course
weights = np.tile(stimfunc, voxels) * pattern.T
# Convolve the onsets with the HRF
temporal_res = 1 / data_dict['trDuration']
signal_func = sim.convolve_hrf(stimfunction=weights,
tr_duration=data_dict['trDuration'],
temporal_resolution=temporal_res,
scale_function=1,
)
# Change the type of noise
noise = noise.astype('double')
# Create a noise function (same voxels for signal function as for noise)
noise_function = noise[idxs[:, 0], idxs[:, 1], idxs[:, 2], :].T
# Compute the signal magnitude for the data
sf_scaled = sim.compute_signal_change(signal_function=signal_func,
noise_function=noise_function,
noise_dict=data_dict['noise_dict'],
magnitude=[scale_percentage],
method='PSC',
)
# Combine the signal time course with the signal volume
signal = sim.apply_signal(sf_scaled,
ROI,
)
# Return signal needed
return signal
def _write_dicom(output_name,
data,
image_number=0):
"""Write the data to a dicom file
Saves the data for one TR to a dicom.
Dicom files are difficult to set up correctly, this file will likely
crash when trying to open it using dcm2nii. However, if it is loaded in
python (e.g., dicom.dcmread) then pixel_array contains the relevant
voxel data
Parameters
----------
output_name : str
Output name for volume being created
data : 3 dimensional array
Volume of data to be saved
image_number : int
Number dicom to be saved. This is critical for setting up dicom file
header information.
"""
# Convert data from float to in
dataInts = data.astype(np.int16)
# Populate required values for file meta information
file_meta = dicom.Dataset()
file_meta.MediaStorageSOPClassUID = '1.2' # '1.2.840.10008.5.1.4.1.1.2'
file_meta.MediaStorageSOPInstanceUID = "1.2.3"
file_meta.ImplementationClassUID = "1.2.3.4"
file_meta.TransferSyntaxUID = '1.2.840.10008.1.2'
# Create the FileDataset
ds = dicom.FileDataset(output_name,
{},
file_meta=file_meta,
preamble=b"\0" * 128)
# Set image dimensions
frames, rows, cols = dataInts.shape
ds.Rows = rows
ds.Columns = cols
ds.NumberOfFrames = frames
ds.SamplesPerPixel = 1
ds.BitsAllocated = 16
ds.BitsStored = 16
ds.PixelRepresentation = 0
ds.InstanceNumber = image_number
ds.ImagePositionPatient = [0, 0, 0]
ds.ImageOrientationPatient = [.01, 0, 0, 0, 0, 0]
ds.PhotometricInterpretation = 'MONOCHROME1'
# Add the data elements -- not trying to set all required here. Check DICOM
# standard
ds.PatientName = "sim"
ds.PatientID = "sim"
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
# Set creation date/time
image_datetime = script_datetime + datetime.timedelta(seconds=image_number)
timeStr = image_datetime.strftime('%H%M%S')
ds.ContentDate = image_datetime.strftime('%Y%m%d')
ds.ContentTime = timeStr
# Add the data
ds.PixelData = dataInts.tobytes()
ds.save_as(output_name)
def _get_input_names(data_dict):
"""Get names from dict
Read in the data_dict to return the relevant file names. Will also
add the default values if trDuration, isi, or burn_in haven't been set
Parameters
----------
data_dict : dict
A dictionary to specify the parameters used for making data,
specifying the following keys
numTRs - int - Specify the number of time points
multivariate_patterns - bool - Is the difference between conditions
univariate (0) or multivariate (1)
different_ROIs - bool - Are there different ROIs for each condition (
1) or is it in the same ROI (0). If it is the same ROI and you are
using univariate differences, the second condition will have a
smaller evoked response than the other.
event_duration - int - How long, in seconds, is each event
scale_percentage - float - What is the percent signal change
trDuration - float - How many seconds per volume
save_dicom - bool - Save to data as a dicom (1) or numpy (0)
save_realtime - bool - Do you want to save the data in real time (1)
or as fast as possible (0)?
isi - float - What is the time between each event (in seconds)
burn_in - int - How long before the first event (in seconds)
Returns
----------
ROI_A_file : str
Path to ROI for condition A
ROI_B_file : str
Path to ROI for condition B
template_path : str
Path to template file for data
noise_dict_file : str
Path to file containing parameters for noise simulation
"""
# Load in the ROIs
if data_dict.get('ROI_A_file') is None:
vol = resource_stream(__name__, "sim_parameters/ROI_A.nii.gz").read()
ROI_A_file = Nifti1Image.from_bytes(gzip.decompress(vol)).get_data()
else:
ROI_A_file = data_dict['ROI_A_file']
if data_dict.get('ROI_B_file') is None:
vol = resource_stream(__name__, "sim_parameters/ROI_B.nii.gz").read()
ROI_B_file = Nifti1Image.from_bytes(gzip.decompress(vol)).get_data()
else:
ROI_B_file = data_dict['ROI_B_file']
# Get the path to the template
if data_dict.get('template_path') is None:
vol = resource_stream(__name__,
"sim_parameters/sub_template.nii.gz").read()
template_path = Nifti1Image.from_bytes(gzip.decompress(vol)).get_data()
else:
template_path = data_dict['template_path']
# Load in the noise dict if supplied
if data_dict.get('noise_dict_file') is None:
file = resource_stream(__name__,
'sim_parameters/sub_noise_dict.txt').read()
noise_dict_file = file
else:
noise_dict_file = data_dict['noise_dict_file']
# Return the paths
return ROI_A_file, ROI_B_file, template_path, noise_dict_file
def generate_data(outputDir,
user_settings):
"""Generate simulated fMRI data
Use a few parameters that might be relevant for real time analysis
Parameters
----------
outputDir : str
Specify output data dir where the data should be saved
user_settings : dict
A dictionary to specify the parameters used for making data,
specifying the following keys
numTRs - int - Specify the number of time points
multivariate_patterns - bool - Is the difference between conditions
univariate (0) or multivariate (1)
different_ROIs - bool - Are there different ROIs for each condition (
1) or is it in the same ROI (0). If it is the same ROI and you are
using univariate differences, the second condition will have a
smaller evoked response than the other.
event_duration - int - How long, in seconds, is each event
scale_percentage - float - What is the percent signal change
trDuration - float - How many seconds per volume
save_dicom - bool - Save to data as a dicom (1) or numpy (0)
save_realtime - bool - Do you want to save the data in real time (1)
or as fast as possible (0)?
isi - float - What is the time between each event (in seconds)
burn_in - int - How long before the first event (in seconds)
"""
data_dict = default_settings.copy()
data_dict.update(user_settings)
# If the folder doesn't exist then make it
os.system('mkdir -p %s' % outputDir)
logger.info('Load template of average voxel value')
# Get the file names needed for loading in the data
ROI_A_file, ROI_B_file, template_path, noise_dict_file = \
_get_input_names(data_dict)
# Load in the template data (it may already be loaded if doing a test)
if isinstance(template_path, str):
template_nii = nibabel.load(template_path)
template = template_nii.get_data()
else:
template = template_path
dimensions = np.array(template.shape[0:3])
logger.info('Create binary mask and normalize the template range')
mask, template = sim.mask_brain(volume=template,
mask_self=True,
)
# Write out the mask as a numpy file
outFile = os.path.join(outputDir, 'mask.npy')
np.save(outFile, mask.astype(np.uint8))
# Load the noise dictionary
logger.info('Loading noise parameters')
# If this isn't a string, assume it is a resource stream file
if type(noise_dict_file) is str:
with open(noise_dict_file, 'r') as f:
noise_dict = f.read()
else:
# Read the resource stream object
noise_dict = noise_dict_file.decode()
noise_dict = eval(noise_dict)
noise_dict['matched'] = 0 # Increases processing time
# Add it here for easy access
data_dict['noise_dict'] = noise_dict
logger.info('Generating noise')
temp_stimfunction = np.zeros((data_dict['numTRs'], 1))
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=temp_stimfunction,
tr_duration=int(data_dict['trDuration']),
template=template,
mask=mask,
noise_dict=noise_dict,
)
# Create the stimulus time course of the conditions
total_time = int(data_dict['numTRs'] * data_dict['trDuration'])
onsets_A = []
onsets_B = []
curr_time = data_dict['burn_in']
while curr_time < (total_time - data_dict['event_duration']):
# Flip a coin for each epoch to determine whether it is A or B
if np.random.randint(0, 2) == 1:
onsets_A.append(curr_time)
else:
onsets_B.append(curr_time)
# Increment the current time
curr_time += data_dict['event_duration'] + data_dict['isi']
# How many timepoints per second of the stim function are to be generated?
temporal_res = 1 / data_dict['trDuration']
# Create a time course of events
event_durations = [data_dict['event_duration']]
stimfunc_A = sim.generate_stimfunction(onsets=onsets_A,
event_durations=event_durations,
total_time=total_time,
temporal_resolution=temporal_res,
)
stimfunc_B = sim.generate_stimfunction(onsets=onsets_B,
event_durations=event_durations,
total_time=total_time,
temporal_resolution=temporal_res,
)
# Create a labels timecourse
outFile = os.path.join(outputDir, 'labels.npy')
np.save(outFile, (stimfunc_A + (stimfunc_B * 2)))
# How is the signal implemented in the different ROIs
signal_A = _generate_ROIs(ROI_A_file,
stimfunc_A,
noise,
data_dict['scale_percentage'],
data_dict)
if data_dict['different_ROIs'] is True:
signal_B = _generate_ROIs(ROI_B_file,
stimfunc_B,
noise,
data_dict['scale_percentage'],
data_dict)
else:
# Halve the evoked response if these effects are both expected in the
# same ROI
if data_dict['multivariate_pattern'] is False:
signal_B = _generate_ROIs(ROI_A_file,
stimfunc_B,
noise,
data_dict['scale_percentage'] * 0.5,
data_dict)
else:
signal_B = _generate_ROIs(ROI_A_file,
stimfunc_B,
noise,
data_dict['scale_percentage'],
data_dict)
# Combine the two signal timecourses
signal = signal_A + signal_B
logger.info('Generating TRs in real time')
for idx in range(data_dict['numTRs']):
# Create the brain volume on this TR
brain = noise[:, :, :, idx] + signal[:, :, :, idx]
# Convert file to integers to mimic what you get from MR
brain_int32 = brain.astype(np.int32)
# Store as dicom or nifti?
if data_dict['save_dicom'] is True:
# Save the volume as a DICOM file, with each TR as its own file
output_file = os.path.join(outputDir, 'rt_' + format(idx, '03d')
+ '.dcm')
_write_dicom(output_file, brain_int32, idx+1)
else:
# Save the volume as a numpy file, with each TR as its own file
output_file = os.path.join(outputDir, 'rt_' + format(idx, '03d')
+ '.npy')
np.save(output_file, brain_int32)
logger.info("Generate {}".format(output_file))
# Sleep until next TR
if data_dict['save_realtime'] == 1:
time.sleep(data_dict['trDuration'])
if __name__ == '__main__':
# Receive the inputs
argParser = argparse.ArgumentParser(
'Specify input arguments. Some arguments are parameters that require '
'an input is provided (noted by "Param"), others are flags that when '
'provided will change according to the flag (noted by "Flag")')
argParser.add_argument('--output-dir', '-o', default=None, type=str,
help='Param. Output directory for simulated data')
argParser.add_argument('--ROI-A-file', default=None, type=str,
help='Param. Full path to file for cond. A ROI')
argParser.add_argument('--ROI-B-file', default=None, type=str,
help='Param. Full path to file for cond. B ROI')
argParser.add_argument('--template-path', default=None, type=str,
help='Param. Full path to file for brain template')
argParser.add_argument('--noise-dict-file', default=None, type=str,
help='Param. Full path to file setting noise '
'params')
argParser.add_argument('--numTRs', '-n', default=200, type=int,
help='Param. Number of time points')
argParser.add_argument('--event-duration', '-d', default=10, type=int,
help='Param. Number of seconds per event')
argParser.add_argument('--trDuration', default=2, type=int,
help='Param. How many second per volume')
argParser.add_argument('--isi', default=6, type=int,
help='Param. How long in seconds between events')
argParser.add_argument('--burn-in', default=6, type=int,
help='Param. How long before the first event '
'begins after the run onset')
argParser.add_argument('--scale-percentage', '-s', default=0.5, type=float,
help='Param. Percent signal change')
argParser.add_argument('--multivariate-pattern', '-m', default=False,
action='store_true',
help='Flag. Signal is different between conditions '
'in a multivariate, versus univariate, way')
argParser.add_argument('--different-ROIs', '-r', default=False,
action='store_true', help='Flag. Use different '
'ROIs for each condition')
argParser.add_argument('--save-dicom', default=False,
action='store_true', help='Flag. Output files in '
'DICOM format rather '
'than numpy')
argParser.add_argument('--save-realtime', default=False,
action='store_true', help='Flag. Save data as if '
'it was coming in at '
'the acquisition rate')
args = argParser.parse_args()
# Essential arguments
outputDir = args.output_dir
if outputDir is None:
logger.info("Must specify an output directory using -o")
exit(-1)
data_dict = default_settings.copy()
# User controlled settings
# Specify the path to the files used for defining ROIs.
data_dict['ROI_A_file'] = args.ROI_A_file
data_dict['ROI_B_file'] = args.ROI_B_file
# Specify where the template
data_dict['template_path'] = args.template_path
data_dict['noise_dict_file'] = args.noise_dict_file
# Specify the number of time points
data_dict['numTRs'] = args.numTRs
# How long is each event/block you are modelling (assumes 6s rest between)
data_dict['event_duration'] = float(args.event_duration)
# How long does each acquisition take
data_dict['trDuration'] = args.trDuration
# What is the time between each event (in seconds)
data_dict['isi'] = args.isi
# How long before the first event (in seconds)
data_dict['burn_in'] = args.burn_in
# What is the percent signal change being simulated
data_dict['scale_percentage'] = args.scale_percentage
# Are there different ROIs for each condition (True) or is it in the same
# ROI (False).
# If it is the same ROI and you are using univariate differences,
# the second condition will have a smaller evoked response than the other.
data_dict['different_ROIs'] = args.different_ROIs
# Is this a multivariate pattern (1) or a univariate pattern
data_dict['multivariate_pattern'] = args.multivariate_pattern
# Do you want to save data as a dicom (True) or numpy (False)
data_dict['save_dicom'] = args.save_dicom
# Do you want to save the data in real time (1) or as fast as possible (0)?
data_dict['save_realtime'] = args.save_realtime
# Run the function if running from command line
generate_data(outputDir,
data_dict)
| 24,201 | 37.599681 | 79 | py |
brainiak | brainiak-master/brainiak/utils/fmrisim.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fMRI Simulator
Simulate fMRI data for a single subject.
This code provides a set of functions necessary to produce realistic
simulations of fMRI data. There are two main steps: characterizing the
signal and generating the noise model, which are then combined to simulate
brain data. Tools are included to support the creation of different types
of signal, such as region specific differences in univariate
activity. To create the noise model the parameters can either be set
manually or can be estimated from real fMRI data with reasonable accuracy (
works best when fMRI data has not been preprocessed)
Functions:
generate_signal
Create a volume with activity, of a specified shape and either multivariate
or univariate pattern, in a specific region to represent the signal in the
neural data.
generate_stimfunction
Create a timecourse of the signal activation. This can be specified using event
onsets and durations from a timing file. This is the time course before
convolution and therefore can be at any temporal precision.
export_3_column:
Generate a three column timing file that can be used with software like FSL
to represent event event onsets and duration
export_epoch_file:
Generate an epoch file from the time course which can be used as an input to
brainiak functions
convolve_hrf
Convolve the signal timecourse with the HRF to model the expected evoked
activity
apply_signal
Combine the signal volume with the HRF, thus giving the signal the temporal
properties of the HRF (such as smoothing and lag)
calc_noise
Estimate the noise properties of a given fMRI volume. Prominently, estimate
the smoothing and SFNR of the data
generate_noise
Create the noise for this run. This creates temporal, spatial task and white
noise. Various parameters can be tuned depending on need
mask_brain
Create a mask volume that has similar contrast as an fMRI image. Defaults to
use an MNI grey matter atlas but any image can be supplied to create an
estimate.
compute_signal_change
Convert the signal function into useful metric units according to metrics
used by others (Welvaert & Rosseel, 2013)
Authors:
Cameron Ellis (Princeton & Yale) 2016-2018
Chris Baldassano (Princeton) 2016-2017
Mingbo Cai (Princeton) 2017
"""
import logging
from itertools import product
from statsmodels.tsa.arima_model import ARMA
import math
import numpy as np
# See pyflakes issue #248
# https://github.com/PyCQA/pyflakes/issues/248
import numpy.matlib # noqa: F401
from numpy.linalg import LinAlgError
from pkg_resources import resource_stream
from scipy import stats
from scipy import signal
import scipy.ndimage as ndimage
import copy
from scipy import optimize
__all__ = [
"apply_signal",
"calc_noise",
"compute_signal_change",
"convolve_hrf",
"export_3_column",
"export_epoch_file",
"generate_signal",
"generate_stimfunction",
"generate_noise",
"mask_brain",
"generate_1d_gaussian_rfs",
"generate_1d_rf_responses",
]
logger = logging.getLogger(__name__)
def _generate_feature(feature_type,
feature_size,
signal_magnitude,
thickness=1):
"""Generate features corresponding to signal
Generate a single feature, that can be inserted into the signal volume.
A feature is a region of activation with a specific shape such as cube
or ring
Parameters
----------
feature_type : str
What shape signal is being inserted? Options are 'cube',
'loop' (aka ring), 'cavity' (aka hollow sphere), 'sphere'.
feature_size : int
How big is the signal in diameter?
signal_magnitude : float
Set the signal size, a value of 1 means the signal is one standard
deviation of the noise
thickness : int
How thick is the surface of the loop/cavity
Returns
----------
signal : 3 dimensional array
The volume representing the signal
"""
# If the size is equal to or less than 2 then all features are the same
if feature_size <= 2:
feature_type = 'cube'
# What kind of signal is it?
if feature_type == 'cube':
# Preset the size of the signal
signal = np.ones((feature_size, feature_size, feature_size))
elif feature_type == 'loop':
# First make a cube of zeros
signal = np.zeros((feature_size, feature_size, feature_size))
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy = np.meshgrid(seq, seq)
# Make a disk corresponding to the whole mesh grid
xxmesh = (xx - ((feature_size - 1) / 2)) ** 2
yymesh = (yy - ((feature_size - 1) / 2)) ** 2
disk = xxmesh + yymesh
# What are the limits of the rings being made
outer_lim = disk[int((feature_size - 1) / 2), 0]
inner_lim = disk[int((feature_size - 1) / 2), thickness]
# What is the outer disk
outer = disk <= outer_lim
# What is the inner disk
inner = disk <= inner_lim
# Subtract the two disks to get a loop
loop = outer != inner
# Check if the loop is a disk
if np.all(inner is False):
logger.warning('Loop feature reduces to a disk because the loop '
'is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(loop is False):
loop = outer
# store the loop
signal[0:feature_size, 0:feature_size, int(np.round(feature_size /
2))] = loop
elif feature_type == 'sphere' or feature_type == 'cavity':
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy, zz = np.meshgrid(seq, seq, seq)
# Make a disk corresponding to the whole mesh grid
signal = ((xx - ((feature_size - 1) / 2)) ** 2 +
(yy - ((feature_size - 1) / 2)) ** 2 +
(zz - ((feature_size - 1) / 2)) ** 2)
# What are the limits of the rings being made
outer_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2), 0]
inner_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2),
thickness]
# Is the signal a sphere or a cavity?
if feature_type == 'sphere':
signal = signal <= outer_lim
else:
# Get the inner and outer sphere
outer = signal <= outer_lim
inner = signal <= inner_lim
# Subtract the two disks to get a loop
signal = outer != inner
# Check if the cavity is a sphere
if np.all(inner is False):
logger.warning('Cavity feature reduces to a sphere because '
'the cavity is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(signal is False):
signal = outer
# Assign the signal magnitude
signal = signal * signal_magnitude
# Return the signal
return signal
def _insert_idxs(feature_centre, feature_size, dimensions):
"""Returns the indices of where to put the signal into the signal volume
Parameters
----------
feature_centre : list, int
List of coordinates for the centre location of the signal
feature_size : list, int
How big is the signal's diameter.
dimensions : 3 length array, int
What are the dimensions of the volume you wish to create
Returns
----------
x_idxs : tuple
The x coordinates of where the signal is to be inserted
y_idxs : tuple
The y coordinates of where the signal is to be inserted
z_idxs : tuple
The z coordinates of where the signal is to be inserted
"""
# Set up the indexes within which to insert the signal
x_idx = [int(feature_centre[0] - (feature_size / 2)) + 1,
int(feature_centre[0] - (feature_size / 2) +
feature_size) + 1]
y_idx = [int(feature_centre[1] - (feature_size / 2)) + 1,
int(feature_centre[1] - (feature_size / 2) +
feature_size) + 1]
z_idx = [int(feature_centre[2] - (feature_size / 2)) + 1,
int(feature_centre[2] - (feature_size / 2) +
feature_size) + 1]
# Check for out of bounds
# Min Boundary
if 0 > x_idx[0]:
x_idx[0] = 0
if 0 > y_idx[0]:
y_idx[0] = 0
if 0 > z_idx[0]:
z_idx[0] = 0
# Max Boundary
if dimensions[0] < x_idx[1]:
x_idx[1] = dimensions[0]
if dimensions[1] < y_idx[1]:
y_idx[1] = dimensions[1]
if dimensions[2] < z_idx[1]:
z_idx[1] = dimensions[2]
# Return the idxs for data
return x_idx, y_idx, z_idx
def generate_signal(dimensions,
feature_coordinates,
feature_size,
feature_type,
signal_magnitude=[1],
signal_constant=1,
):
"""Generate volume containing signal
Generate signal, of a specific shape in specific regions, for a single
volume. This will then be convolved with the HRF across time
Parameters
----------
dimensions : 1d array, ndarray
What are the dimensions of the volume you wish to create
feature_coordinates : multidimensional array
What are the feature_coordinates of the signal being created.
Be aware of clipping: features far from the centre of the
brain will be clipped. If you wish to have multiple features
then list these as a features x 3 array. To create a feature of
a unique shape then supply all the individual
feature_coordinates of the shape and set the feature_size to 1.
feature_size : list, int
How big is the signal. If feature_coordinates=1 then only one value is
accepted, if feature_coordinates>1 then either one value must be
supplied or m values
feature_type : list, string
What feature_type of signal is being inserted? Options are cube,
loop, cavity, sphere. If feature_coordinates = 1 then
only one value is accepted, if feature_coordinates > 1 then either
one value must be supplied or m values
signal_magnitude : list, float
What is the (average) magnitude of the signal being generated? A
value of 1 means that the signal is one standard deviation from the
noise
signal_constant : list, bool
Is the signal constant across the feature (for univariate activity)
or is it a random pattern of a given magnitude across the feature (for
multivariate activity)
Returns
----------
volume_signal : 3 dimensional array, float
Creates a single volume containing the signal
"""
# Preset the volume
volume_signal = np.zeros(dimensions)
feature_quantity = round(feature_coordinates.shape[0])
# If there is only one feature_size value then make sure to duplicate it
# for all signals
if len(feature_size) == 1:
feature_size = feature_size * feature_quantity
# Do the same for feature_type
if len(feature_type) == 1:
feature_type = feature_type * feature_quantity
if len(signal_magnitude) == 1:
signal_magnitude = signal_magnitude * feature_quantity
# Iterate through the signals and insert in the data
for signal_counter in range(feature_quantity):
# What is the centre of this signal
if len(feature_size) > 1:
feature_centre = np.asarray(feature_coordinates[signal_counter, ])
else:
feature_centre = np.asarray(feature_coordinates)[0]
# Generate the feature to be inserted in the volume
signal = _generate_feature(feature_type[signal_counter],
feature_size[signal_counter],
signal_magnitude[signal_counter],
)
# If the signal is a random noise pattern then multiply these ones by
# a noise mask
if signal_constant == 0:
signal = signal * np.random.random([feature_size[signal_counter],
feature_size[signal_counter],
feature_size[signal_counter]])
# Pull out the idxs for where to insert the data
x_idx, y_idx, z_idx = _insert_idxs(feature_centre,
feature_size[signal_counter],
dimensions)
# Insert the signal into the Volume
volume_signal[x_idx[0]:x_idx[1], y_idx[0]:y_idx[1], z_idx[0]:z_idx[
1]] = signal
return volume_signal
def generate_stimfunction(onsets,
event_durations,
total_time,
weights=[1],
timing_file=None,
temporal_resolution=100.0,
):
"""Return the function for the timecourse events
When do stimuli onset, how long for and to what extent should you
resolve the fMRI time course. There are two ways to create this, either
by supplying onset, duration and weight information or by supplying a
timing file (in the three column format used by FSL).
Parameters
----------
onsets : list, int
What are the timestamps (in s) for when an event you want to
generate onsets?
event_durations : list, int
What are the durations (in s) of the events you want to
generate? If there is only one value then this will be assigned
to all onsets
total_time : int
How long (in s) is the experiment in total.
weights : list, float
What is the weight for each event (how high is the box car)? If
there is only one value then this will be assigned to all onsets
timing_file : string
The filename (with path) to a three column timing file (FSL) to
make the events. Still requires total_time to work
temporal_resolution : float
How many elements per second are you modeling for the
timecourse. This is useful when you want to model the HRF at an
arbitrarily high resolution (and then downsample to your TR later).
Returns
----------
stim_function : 1 by timepoint array, float
The time course of stimulus evoked activation. This has a temporal
resolution of temporal resolution / 1.0 elements per second
"""
# If the timing file is supplied then use this to acquire the
if timing_file is not None:
# Read in text file line by line
with open(timing_file) as f:
text = f.readlines() # Pull out file as a an array
# Preset
onsets = list()
event_durations = list()
weights = list()
# Pull out the onsets, weights and durations, set as a float
for line in text:
onset, duration, weight = line.strip().split()
# Check if the onset is more precise than the temporal resolution
upsampled_onset = float(onset) * temporal_resolution
# Because of float precision, the upsampled values might
# not round as expected .
# E.g. float('1.001') * 1000 = 1000.99
if np.allclose(upsampled_onset, np.round(upsampled_onset)) == 0:
warning = 'Your onset: ' + str(onset) + ' has more decimal ' \
'points than the ' \
'specified temporal ' \
'resolution can ' \
'resolve. This means' \
' that events might' \
' be missed. ' \
'Consider increasing' \
' the temporal ' \
'resolution.'
logger.warning(warning)
onsets.append(float(onset))
event_durations.append(float(duration))
weights.append(float(weight))
# If only one duration is supplied then duplicate it for the length of
# the onset variable
if len(event_durations) == 1:
event_durations = event_durations * len(onsets)
if len(weights) == 1:
weights = weights * len(onsets)
# Check files
if np.max(onsets) > total_time:
raise ValueError('Onsets outside of range of total time.')
# Generate the time course as empty, each element is a millisecond by
# default
stimfunction = np.zeros((int(round(total_time * temporal_resolution)), 1))
# Cycle through the onsets
for onset_counter in list(range(len(onsets))):
# Adjust for the resolution
onset_idx = int(np.floor(onsets[onset_counter] * temporal_resolution))
# Adjust for the resolution
offset_idx = int(np.floor((onsets[onset_counter] + event_durations[
onset_counter]) * temporal_resolution))
# Store the weights
stimfunction[onset_idx:offset_idx, 0] = [weights[onset_counter]]
return stimfunction
def export_3_column(stimfunction,
filename,
temporal_resolution=100.0
):
""" Output a tab separated three column timing file
This produces a three column tab separated text file, with the three
columns representing onset time (s), event duration (s) and weight,
respectively. Useful if you want to run the simulated data through FEAT
analyses. In a way, this is the reverse of generate_stimfunction
Parameters
----------
stimfunction : timepoint by 1 array
The stimulus function describing the time course of events. For
instance output from generate_stimfunction.
filename : str
The name of the three column text file to be output
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Iterate through the stim function
stim_counter = 0
event_counter = 0
while stim_counter < stimfunction.shape[0]:
# Is it an event?
if stimfunction[stim_counter, 0] != 0:
# When did the event start?
event_onset = str(stim_counter / temporal_resolution)
# The weight of the stimulus
weight = str(stimfunction[stim_counter, 0])
# Reset
event_duration = 0
# Is the event still ongoing?
while stimfunction[stim_counter, 0] != 0 & stim_counter <= \
stimfunction.shape[0]:
# Add one millisecond to each duration
event_duration = event_duration + 1
# Increment
stim_counter = stim_counter + 1
# How long was the event in seconds
event_duration = str(event_duration / temporal_resolution)
# Append this row to the data file
with open(filename, "a") as file:
file.write(event_onset + '\t' + event_duration + '\t' +
weight + '\n')
# Increment the number of events
event_counter = event_counter + 1
# Increment
stim_counter = stim_counter + 1
def export_epoch_file(stimfunction,
filename,
tr_duration,
temporal_resolution=100.0
):
""" Output an epoch file, necessary for some inputs into brainiak
This takes in the time course of stimulus events and outputs the epoch
file used in Brainiak. The epoch file is a way to structure the timing
information in fMRI that allows you to flexibly input different stimulus
sequences. This is a list with each entry a 3d matrix corresponding to a
participant. The dimensions of the 3d matrix are condition by epoch by
time. For the i-th condition, if its k-th epoch spans time points t_m to
t_n-1, then [i, k, t_m:t_n] are 1 in the epoch file.
Parameters
----------
stimfunction : list of timepoint by condition arrays
The stimulus function describing the time course of events. Each
list entry is from a different participant, each row is a different
timepoint (with the given temporal precision), each column is a
different condition. export_epoch_file is looking for differences in
the value of stimfunction to identify the start and end of an
epoch. If epochs in stimfunction are coded with the same weight and
there is no time between blocks then export_epoch_file won't be able to
label them as different epochs
filename : str
The name of the epoch file to be output
tr_duration : float
How long is each TR in seconds
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Cycle through the participants, different entries in the list
epoch_file = [0] * len(stimfunction)
for ppt_counter in range(len(stimfunction)):
# What is the time course for the participant (binarized)
stimfunction_ppt = np.abs(stimfunction[ppt_counter]) > 0
# Down sample the stim function
stride = tr_duration * temporal_resolution
stimfunction_downsampled = stimfunction_ppt[::int(stride), :]
# Calculates the number of event onsets. This uses changes in value
# to reflect different epochs. This might be false in some cases (the
# weight is non-uniform over an epoch or there is no break between
# identically weighted epochs).
epochs = 0 # Preset
conditions = stimfunction_ppt.shape[1]
for condition_counter in range(conditions):
weight_change = (np.diff(stimfunction_downsampled[:,
condition_counter], 1, 0) != 0)
# If the first or last events are 'on' then make these
# represent a epoch change
if stimfunction_downsampled[0, condition_counter] == 1:
weight_change[0] = True
if stimfunction_downsampled[-1, condition_counter] == 1:
weight_change[-1] = True
epochs += int(np.max(np.sum(weight_change, 0)) / 2)
# Get other information
trs = stimfunction_downsampled.shape[0]
# Make a timing file for this participant
epoch_file[ppt_counter] = np.zeros((conditions, epochs, trs))
# Cycle through conditions
epoch_counter = 0 # Reset and count across conditions
tr_counter = 0
while tr_counter < stimfunction_downsampled.shape[0]:
for condition_counter in range(conditions):
# Is it an event?
if tr_counter < stimfunction_downsampled.shape[0] and \
stimfunction_downsampled[
tr_counter, condition_counter] == 1:
# Add a one for this TR
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_counter] = 1
# Find the next non event value
end_idx = np.where(stimfunction_downsampled[tr_counter:,
condition_counter] == 0)[
0][0]
tr_idxs = list(range(tr_counter, tr_counter + end_idx))
# Add ones to all the trs within this event time frame
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_idxs] = 1
# Start from this index
tr_counter += end_idx
# Increment
epoch_counter += 1
# Increment the counter
tr_counter += 1
# Convert to boolean
epoch_file[ppt_counter] = epoch_file[ppt_counter].astype('bool')
# Save the file
np.save(filename, epoch_file)
def _double_gamma_hrf(response_delay=6,
undershoot_delay=12,
response_dispersion=0.9,
undershoot_dispersion=0.9,
response_scale=1,
undershoot_scale=0.035,
temporal_resolution=100.0,
):
"""Create the double gamma HRF with the timecourse evoked activity.
Default values are based on Glover, 1999 and Walvaert, Durnez,
Moerkerke, Verdoolaege and Rosseel, 2011
Parameters
----------
response_delay : float
How many seconds until the peak of the HRF
undershoot_delay : float
How many seconds until the trough of the HRF
response_dispersion : float
How wide is the rising peak dispersion
undershoot_dispersion : float
How wide is the undershoot dispersion
response_scale : float
How big is the response relative to the peak
undershoot_scale :float
How big is the undershoot relative to the trough
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
hrf : multi dimensional array
A double gamma HRF to be used for convolution.
"""
hrf_length = 30 # How long is the HRF being created
# How many seconds of the HRF will you model?
hrf = [0] * int(hrf_length * temporal_resolution)
# When is the peak of the two aspects of the HRF
response_peak = response_delay * response_dispersion
undershoot_peak = undershoot_delay * undershoot_dispersion
for hrf_counter in list(range(len(hrf) - 1)):
# Specify the elements of the HRF for both the response and undershoot
resp_pow = math.pow((hrf_counter / temporal_resolution) /
response_peak, response_delay)
resp_exp = math.exp(-((hrf_counter / temporal_resolution) -
response_peak) /
response_dispersion)
response_model = response_scale * resp_pow * resp_exp
undershoot_pow = math.pow((hrf_counter / temporal_resolution) /
undershoot_peak,
undershoot_delay)
undershoot_exp = math.exp(-((hrf_counter / temporal_resolution) -
undershoot_peak /
undershoot_dispersion))
undershoot_model = undershoot_scale * undershoot_pow * undershoot_exp
# For this time point find the value of the HRF
hrf[hrf_counter] = response_model - undershoot_model
return hrf
def convolve_hrf(stimfunction,
tr_duration,
hrf_type='double_gamma',
scale_function=True,
temporal_resolution=100.0,
):
""" Convolve the specified hrf with the timecourse.
The output of this is a downsampled convolution of the stimfunction and
the HRF function. If temporal_resolution is 1 / tr_duration then the
output will be the same length as stimfunction. This time course assumes
that slice time correction has occurred and all slices have been aligned
to the middle time point in the TR.
Be aware that if scaling is on and event durations are less than the
duration of a TR then the hrf may or may not come out as anticipated.
This is because very short events would evoke a small absolute response
after convolution but if there are only short events and you scale then
this will look similar to a convolution with longer events. In general
scaling is useful, which is why it is the default, but be aware of this
edge case and if it is a concern, set the scale_function to false.
Parameters
----------
stimfunction : timepoint by feature array
What is the time course of events to be modelled in this
experiment. This can specify one or more timecourses of events.
The events can be weighted or binary
tr_duration : float
How long (in s) between each volume onset
hrf_type : str or list
Takes in a string describing the hrf that ought to be created.
Can instead take in a vector describing the HRF as it was
specified by any function. The default is 'double_gamma' in which
an initial rise and an undershoot are modelled.
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
signal_function : timepoint by timecourse array
The time course of the HRF convolved with the stimulus function.
This can have multiple time courses specified as different
columns in this array.
"""
# Check if it is timepoint by feature
if stimfunction.shape[0] < stimfunction.shape[1]:
logger.warning('Stimfunction may be the wrong shape')
if np.any(np.sum(abs(stimfunction), 0) == 0):
logger.warning('stimfunction contains voxels of all zeros, will nan')
# How will stimfunction be resized
stride = int(temporal_resolution * tr_duration)
duration = int(stimfunction.shape[0] / stride)
# Generate the hrf to use in the convolution
if hrf_type == 'double_gamma':
hrf = _double_gamma_hrf(temporal_resolution=temporal_resolution)
elif isinstance(hrf_type, list):
hrf = hrf_type
# How many timecourses are there
list_num = stimfunction.shape[1]
# Create signal functions for each list in the stimfunction
for list_counter in range(list_num):
# Perform the convolution
signal_temp = np.convolve(stimfunction[:, list_counter], hrf)
# Down sample the signal function so that it only has one element per
# TR. This assumes that all slices are collected at the same time,
# which is often the result of slice time correction. In other
# words, the output assumes slice time correction
signal_temp = signal_temp[:duration * stride]
signal_vox = signal_temp[int(stride / 2)::stride]
# Scale the function so that the peak response is 1
if scale_function:
signal_vox = signal_vox / np.max(signal_vox)
# Add this function to the stack
if list_counter == 0:
signal_function = np.zeros((len(signal_vox), list_num))
signal_function[:, list_counter] = signal_vox
return signal_function
def apply_signal(signal_function,
volume_signal,
):
"""Combine the signal volume with its timecourse
Apply the convolution of the HRF and stimulus time course to the
volume.
Parameters
----------
signal_function : timepoint by timecourse array, float
The timecourse of the signal over time. If there is only one column
then the same timecourse is applied to all non-zero voxels in
volume_signal. If there is more than one column then each column is
paired with a non-zero voxel in the volume_signal (a 3d numpy array
generated in generate_signal).
volume_signal : multi dimensional array, float
The volume containing the signal to be convolved with the same
dimensions as the output volume. The elements in volume_signal
indicate how strong each signal in signal_function are modulated by
in the output volume
Returns
----------
signal : multidimensional array, float
The convolved signal volume with the same 3d as volume signal and
the same 4th dimension as signal_function
"""
# How many timecourses are there within the signal_function
timepoints = signal_function.shape[0]
timecourses = signal_function.shape[1]
# Preset volume
signal = np.zeros([volume_signal.shape[0], volume_signal.shape[
1], volume_signal.shape[2], timepoints])
# Find all the non-zero voxels in the brain
idxs = np.where(volume_signal != 0)
if timecourses == 1:
# If there is only one time course supplied then duplicate it for
# every voxel
signal_function = np.matlib.repmat(signal_function, 1, len(idxs[0]))
elif len(idxs[0]) != timecourses:
raise IndexError('The number of non-zero voxels in the volume and '
'the number of timecourses does not match. Aborting')
# For each coordinate with a non zero voxel, fill in the timecourse for
# that voxel
for idx_counter in range(len(idxs[0])):
x = idxs[0][idx_counter]
y = idxs[1][idx_counter]
z = idxs[2][idx_counter]
# Pull out the function for this voxel
signal_function_temp = signal_function[:, idx_counter]
# Multiply the voxel value by the function timecourse
signal[x, y, z, :] = volume_signal[x, y, z] * signal_function_temp
return signal
def _calc_fwhm(volume,
mask,
voxel_size=[1.0, 1.0, 1.0],
):
""" Calculate the FWHM of a volume
Estimates the FWHM (mm) of a volume's non-masked voxels
Parameters
----------
volume : 3 dimensional array
Functional data to have the FWHM measured.
mask : 3 dimensional array
A binary mask of the brain voxels in volume
voxel_size : length 3 list, float
Millimeters per voxel for x, y and z.
Returns
-------
fwhm : float, list
Returns the FWHM of each TR in mm
"""
# What are the dimensions of the volume
dimensions = volume.shape
# Iterate through the TRs, creating a FWHM for each TR
# Preset
v_count = 0
v_sum = 0
v_sq = 0
d_sum = [0.0, 0.0, 0.0]
d_sq = [0.0, 0.0, 0.0]
d_count = [0, 0, 0]
# Pull out all the voxel coordinates
coordinates = list(product(range(dimensions[0]),
range(dimensions[1]),
range(dimensions[2])))
# Find the sum of squared error for the non-masked voxels in the brain
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# Find the the volume sum and squared values
v_count += 1
v_sum += np.abs(volume[x, y, z])
v_sq += volume[x, y, z] ** 2
# Get the volume variance
v_var = (v_sq - ((v_sum ** 2) / v_count)) / (v_count - 1)
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# For each xyz dimension calculate the squared
# difference of this voxel and the next
in_range = (x < dimensions[0] - 1)
in_mask = in_range and (mask[x + 1, y, z] > 0)
included = in_mask and (~np.isnan(volume[x + 1, y, z]))
if included:
d_sum[0] += volume[x, y, z] - volume[x + 1, y, z]
d_sq[0] += (volume[x, y, z] - volume[x + 1, y, z]) ** 2
d_count[0] += 1
in_range = (y < dimensions[1] - 1)
in_mask = in_range and (mask[x, y + 1, z] > 0)
included = in_mask and (~np.isnan(volume[x, y + 1, z]))
if included:
d_sum[1] += volume[x, y, z] - volume[x, y + 1, z]
d_sq[1] += (volume[x, y, z] - volume[x, y + 1, z]) ** 2
d_count[1] += 1
in_range = (z < dimensions[2] - 1)
in_mask = in_range and (mask[x, y, z + 1] > 0)
included = in_mask and (~np.isnan(volume[x, y, z + 1]))
if included:
d_sum[2] += volume[x, y, z] - volume[x, y, z + 1]
d_sq[2] += (volume[x, y, z] - volume[x, y, z + 1]) ** 2
d_count[2] += 1
# Find the variance
d_var = np.divide((d_sq - np.divide(np.power(d_sum, 2),
d_count)), (np.add(d_count, -1)))
o_var = np.divide(-1, (4 * np.log(1 - (0.5 * d_var / v_var))))
fwhm3 = np.sqrt(o_var) * 2 * np.sqrt(2 * np.log(2))
fwhm = np.prod(np.multiply(fwhm3, voxel_size)) ** (1 / 3)
return fwhm
def _calc_sfnr(volume,
mask,
):
""" Calculate the the SFNR of a volume
Calculates the Signal to Fluctuation Noise Ratio, the mean divided
by the detrended standard deviation of each brain voxel. Based on
Friedman and Glover, 2006
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
Returns
-------
snr : float
The SFNR of the volume
"""
# Make a matrix of brain voxels by time
brain_voxels = volume[mask > 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels, 1)
# Detrend (second order polynomial) the voxels over time and then
# calculate the standard deviation.
order = 2
seq = np.linspace(1, brain_voxels.shape[1], brain_voxels.shape[1])
detrend_poly = np.polyfit(seq, brain_voxels.transpose(), order)
# Detrend for each voxel
detrend_voxels = np.zeros(brain_voxels.shape)
for voxel in range(brain_voxels.shape[0]):
trend = detrend_poly[0, voxel] * seq ** 2 + detrend_poly[1, voxel] * \
seq + detrend_poly[2, voxel]
detrend_voxels[voxel, :] = brain_voxels[voxel, :] - trend
std_voxels = np.nanstd(detrend_voxels, 1)
# Calculate the sfnr of all voxels across the brain
sfnr_voxels = mean_voxels / std_voxels
# Return the average sfnr
return np.mean(sfnr_voxels)
def _calc_snr(volume,
mask,
dilation=5,
reference_tr=None,
):
""" Calculate the the SNR of a volume
Calculates the Signal to Noise Ratio, the mean of brain voxels
divided by the standard deviation across non-brain voxels. Specify a TR
value to calculate the mean and standard deviation for that TR. To
calculate the standard deviation of non-brain voxels we can subtract
any baseline structure away first, hence getting at deviations due to the
system noise and not something like high baseline values in non-brain
parts of the body.
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
dilation : int
How many binary dilations do you want to perform on the mask to
determine the non-brain voxels. If you increase this the SNR
increases and the non-brain voxels (after baseline subtraction) more
closely resemble a gaussian
reference_tr : int or list
Specifies the TR to calculate the SNR for. If multiple are supplied
then it will use the average of them.
Returns
-------
snr : float
The SNR of the volume
"""
# If no TR is specified then take all of them
if reference_tr is None:
reference_tr = list(range(volume.shape[3]))
# Dilate the mask in order to ensure that non-brain voxels are far from
# the brain
if dilation > 0:
mask_dilated = ndimage.morphology.binary_dilation(mask,
iterations=dilation)
else:
mask_dilated = mask
# Make a matrix of brain and non_brain voxels, selecting the timepoint/s
brain_voxels = volume[mask > 0][:, reference_tr]
nonbrain_voxels = (volume[:, :, :, reference_tr]).astype('float64')
# If you have multiple TRs
if len(brain_voxels.shape) > 1:
brain_voxels = np.mean(brain_voxels, 1)
nonbrain_voxels = np.mean(nonbrain_voxels, 3)
nonbrain_voxels = nonbrain_voxels[mask_dilated == 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels)
# Find the standard deviation of the voxels
std_voxels = np.nanstd(nonbrain_voxels)
# Return the snr
return mean_voxels / std_voxels
def _calc_ARMA_noise(volume,
mask,
auto_reg_order=1,
ma_order=1,
sample_num=100,
):
""" Calculate the the ARMA noise of a volume
This calculates the autoregressive and moving average noise of the volume
over time by sampling brain voxels and averaging them.
Parameters
----------
volume : 4d array or 1d array, float
Take a volume time series to extract the middle slice from the
middle TR. Can also accept a one dimensional time course (mask input
is then ignored).
mask : 3d array, binary
A binary mask the same size as the volume
auto_reg_order : int
What order of the autoregression do you want to estimate
sample_num : int
How many voxels would you like to sample to calculate the AR values.
The AR distribution of real data is approximately exponential maxing
at 1. From analyses across a number of participants, to get less
than 3% standard deviation of error from the true mean it is
necessary to sample at least 100 voxels.
Returns
-------
auto_reg_rho : list of floats
Rho of a specific order for the autoregression noise in the data
na_rho : list of floats
Moving average of a specific order for the data
"""
# Pull out the non masked voxels
if len(volume.shape) > 1:
brain_timecourse = volume[mask > 0]
else:
# If a 1 dimensional input is supplied then reshape it to make the
# timecourse
brain_timecourse = volume.reshape(1, len(volume))
# Identify some brain voxels to assess
voxel_idxs = list(range(brain_timecourse.shape[0]))
np.random.shuffle(voxel_idxs)
# If there are more samples than voxels, take all of the voxels
if len(voxel_idxs) < sample_num:
sample_num = len(voxel_idxs)
auto_reg_rho_all = np.zeros((sample_num, auto_reg_order))
ma_all = np.zeros((sample_num, ma_order))
for voxel_counter in range(sample_num):
# Get the timecourse and demean it
timecourse = brain_timecourse[voxel_idxs[voxel_counter], :]
demeaned_timecourse = timecourse - timecourse.mean()
# Pull out the ARMA values (depends on order)
try:
model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order])
model_fit = model.fit(disp=False)
params = model_fit.params
except (ValueError, LinAlgError):
params = np.ones(auto_reg_order + ma_order + 1) * np.nan
# Add to the list
auto_reg_rho_all[voxel_counter, :] = params[1:auto_reg_order + 1]
ma_all[voxel_counter, :] = params[auto_reg_order + 1:]
# Average all of the values and then convert them to a list
auto_reg_rho = np.nanmean(auto_reg_rho_all, 0).tolist()
ma_rho = np.nanmean(ma_all, 0).tolist()
# Return the coefficients
return auto_reg_rho, ma_rho
def calc_noise(volume,
mask,
template,
noise_dict=None,
):
""" Calculates the noise properties of the volume supplied.
This estimates what noise properties the volume has. For instance it
determines the spatial smoothness, the autoregressive noise, system
noise etc. Read the doc string for generate_noise to understand how
these different types of noise interact.
Parameters
----------
volume : 4d numpy array, float
Take in a functional volume (either the file name or the numpy
array) to be used to estimate the noise properties of this
mask : 3d numpy array, binary
A binary mask of the brain, the same size as the volume
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
noise_dict : dict
The initialized dictionary of the calculated noise parameters of the
provided dataset (usually it is only the voxel size)
Returns
-------
noise_dict : dict
Return a dictionary of the calculated noise parameters of the provided
dataset
"""
# Check the inputs
if template.max() > 1.1:
raise ValueError('Template out of range')
# Create the mask if not supplied and set the mask size
if mask is None:
raise ValueError('Mask not supplied')
# Update noise dict if it is not yet created
if noise_dict is None:
noise_dict = {'voxel_size': [1.0, 1.0, 1.0]}
elif 'voxel_size' not in noise_dict:
noise_dict['voxel_size'] = [1.0, 1.0, 1.0]
# What is the max activation of the mean of this voxel (allows you to
# convert between the mask and the mean of the brain volume)
noise_dict['max_activity'] = np.nanmax(np.mean(volume, 3))
# Calculate the temporal variability of the volume
noise_dict['auto_reg_rho'], noise_dict['ma_rho'] = _calc_ARMA_noise(
volume, mask)
# Set it such that all of the temporal variability will be accounted for
# by the AR component
noise_dict['auto_reg_sigma'] = 1
# Preset these values to be zero, as in you are not attempting to
# simulate them
noise_dict['physiological_sigma'] = 0
noise_dict['task_sigma'] = 0
noise_dict['drift_sigma'] = 0
# Calculate the sfnr
noise_dict['sfnr'] = _calc_sfnr(volume,
mask,
)
# Calculate the fwhm on a subset of volumes
if volume.shape[3] > 100:
# Take only 100 shuffled TRs
trs = np.random.choice(volume.shape[3], size=100, replace=False)
else:
trs = list(range(0, volume.shape[3]))
# Go through the trs and pull out the fwhm
fwhm = [0] * len(trs)
for tr in range(len(trs)):
fwhm[tr] = _calc_fwhm(volume[:, :, :, trs[tr]],
mask,
noise_dict['voxel_size'],
)
# Keep only the mean
noise_dict['fwhm'] = np.mean(fwhm)
noise_dict['snr'] = _calc_snr(volume,
mask,
)
# Return the noise dictionary
return noise_dict
def _generate_noise_system(dimensions_tr,
spatial_sd,
temporal_sd,
spatial_noise_type='gaussian',
temporal_noise_type='gaussian',
):
"""Generate the scanner noise
Generate system noise, either rician, gaussian or exponential, for the
scanner. Generates a distribution with a SD of 1. If you look at the
distribution of non-brain voxel intensity in modern scans you will see
it is rician. However, depending on how you have calculated the SNR and
whether the template is being used you will want to use this function
differently: the voxels outside the brain tend to be stable over time and
usually reflect structure in the MR signal (e.g. the
baseline MR of the head coil or skull). Hence the template captures this
rician noise structure. If you are adding the machine noise to the
template, as is done in generate_noise, then you are likely doubling up
on the addition of machine noise. In such cases, machine noise seems to
be better modelled by gaussian noise on top of this rician structure.
Parameters
----------
dimensions_tr : n length array, int
What are the dimensions of the volume you wish to insert
noise into. This can be a volume of any size
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_type : str
String specifying the noise type. If you aren't specifying the noise
template then Rician is the appropriate model of noise. However,
if you are subtracting the template, as is default, then you should
use gaussian. (If the dilation parameter of _calc_snr is <10 then
gaussian is only an approximation)
Returns
----------
system_noise : multidimensional array, float
Create a volume with system noise
"""
def noise_volume(dimensions,
noise_type,
):
if noise_type == 'rician':
# Generate the Rician noise (has an SD of 1)
noise = stats.rice.rvs(b=0, loc=0, scale=1.527, size=dimensions)
elif noise_type == 'exponential':
# Make an exponential distribution (has an SD of 1)
noise = stats.expon.rvs(0, scale=1, size=dimensions)
elif noise_type == 'gaussian':
noise = np.random.randn(np.prod(dimensions)).reshape(dimensions)
# Return the noise
return noise
# Get just the xyz coordinates
dimensions = np.asarray([dimensions_tr[0],
dimensions_tr[1],
dimensions_tr[2],
1])
# Generate noise
spatial_noise = noise_volume(dimensions, spatial_noise_type)
temporal_noise = noise_volume(dimensions_tr, temporal_noise_type)
# Make the system noise have a specific spatial variability
spatial_noise *= spatial_sd
# Set the size of the noise
temporal_noise *= temporal_sd
# The mean in time of system noise needs to be zero, so subtract the
# means of the temporal noise in time
temporal_noise_mean = np.mean(temporal_noise, 3).reshape(dimensions[0],
dimensions[1],
dimensions[2],
1)
temporal_noise = temporal_noise - temporal_noise_mean
# Save the combination
system_noise = spatial_noise + temporal_noise
return system_noise
def _generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
):
"""Generate the signal dependent noise
Create noise specific to the signal, for instance there is variability
in how the signal manifests on each event
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
motion_noise : str
What type of noise will you generate? Can be gaussian or rician
Returns
----------
noise_task : one dimensional array, float
Generates the temporal task noise timecourse
"""
# Make the noise to be added
stimfunction_tr = stimfunction_tr != 0
if motion_noise == 'gaussian':
noise = stimfunction_tr * np.random.normal(0, 1,
size=stimfunction_tr.shape)
elif motion_noise == 'rician':
noise = stimfunction_tr * stats.rice.rvs(0, 1,
size=stimfunction_tr.shape)
noise_task = stimfunction_tr + noise
# Normalize
noise_task = stats.zscore(noise_task).flatten()
return noise_task
def _generate_noise_temporal_drift(trs,
tr_duration,
basis="cos_power_drop",
period=150,
):
"""Generate the drift noise
Create a trend (either sine or discrete_cos), of a given period and random
phase, to represent the drift of the signal over time
Parameters
----------
trs : int
How many volumes (aka TRs) are there
tr_duration : float
How long in seconds is each volume acqusition
basis : str
What is the basis function for the drift. Could be made of discrete
cosines (for longer run durations, more basis functions are
created) that either have equal power ('discrete_cos') or the power
diminishes such that 99% of the power is below a specified frequency
('cos_power_drop'). Alternatively, this drift could simply be a sine
wave ('sine')
period : int
When the basis function is 'cos_power_drop' this is the period over
which no power of the drift exceeds (i.e. the power of the drift
asymptotes at this period). However for the other basis functions,
this is simply how many seconds is the period of oscillation of the
drift
Returns
----------
noise_drift : one dimensional array, float
The drift timecourse of activity
"""
# Calculate drift differently depending on the basis function
if basis == 'discrete_cos':
# Specify each tr in terms of its phase with the given period
timepoints = np.linspace(0, trs - 1, trs)
timepoints = ((timepoints * tr_duration) / period) * 2 * np.pi
# Specify the other timing information
duration = trs * tr_duration
basis_funcs = int(np.floor(duration / period)) # How bases do you have
if basis_funcs == 0:
err_msg = 'Too few timepoints (' + str(trs) + ') to accurately ' \
'model drift'
logger.warning(err_msg)
basis_funcs = 1
noise_drift = np.zeros((timepoints.shape[0], basis_funcs))
for basis_counter in list(range(1, basis_funcs + 1)):
# What steps do you want to take for this basis function
timepoints_basis = (timepoints/basis_counter) + (np.random.rand()
* np.pi * 2)
# Store the drift from this basis func
noise_drift[:, basis_counter - 1] = np.cos(timepoints_basis)
# Average the drift
noise_drift = np.mean(noise_drift, 1)
elif basis == 'sine':
# Calculate the cycles of the drift for a given function.
cycles = trs * tr_duration / period
# Create a sine wave with a given number of cycles and random phase
timepoints = np.linspace(0, trs - 1, trs)
phaseshift = np.pi * 2 * np.random.random()
phase = (timepoints / (trs - 1) * cycles * 2 * np.pi) + phaseshift
noise_drift = np.sin(phase)
elif basis == 'cos_power_drop':
# Make a vector counting each TR
timepoints = np.linspace(0, trs - 1, trs) * tr_duration
# Specify the other timing information
duration = trs * tr_duration
# How bases do you have? This is to adhere to Nyquist
basis_funcs = int(trs)
noise_drift = np.zeros((timepoints.shape[0], basis_funcs))
for basis_counter in list(range(1, basis_funcs + 1)):
# What steps do you want to take for this basis function
random_phase = np.random.rand() * np.pi * 2
timepoint_phase = (timepoints / duration * np.pi * basis_counter)
# In radians, what is the value for each time point
timepoints_basis = timepoint_phase + random_phase
# Store the drift from this basis func
noise_drift[:, basis_counter - 1] = np.cos(timepoints_basis)
def power_drop(r, L, F, tr_duration):
# Function to return the drop rate for the power of basis functions
# In other words, how much should the weight of each basis function
# reduce in order to make the power you retain of the period's
# frequency be 99% of the total power of the highest frequency, as
# defined by the DCT.
# For an example where there are 20 time points, there will be 20
# basis functions in the DCT. If the period of the signal you wish
# to simulate is such that 99% of the power should drop off after
# the equivalent of 5 of these basis functions, then the way this
# code works is it finds the rate at which power must drop off for
# all of the 20 basis functions such that by the 5th one, there is
# only 1% of the power remaining.
# r is the power reduction rate which should be between 0 and 1
# L is the duration of the run in seconds
# F is period of the cycle in seconds It is assumed that this will
# be greater than the tr_duration, or else this will not work
# tr_duration is the duration of each TR in seconds
# Check the TR duration
if F < tr_duration:
msg = 'Period %0.0f > TR duration %0.0f' % ((F, tr_duration))
raise ValueError(msg)
percent_retained = 0.99 # What is the percentage of power retained
# Compare the power at the period frequency (in the numerator) with
# the power at the frequency of the DCT, AKA the highest possible
# frequency in the data (in the denominator)
numerator = 1 - r ** (2 * L / F) # Power of this period
denominator = 1 - r ** (2 * L / tr_duration) # Power of DCT freq.
# Calculate the retained power
power_drop = abs((numerator / denominator) - percent_retained)
return power_drop
# Solve for power reduction rate.
# This assumes that r is between 0 and 1
# Takes the duration and period as arguments
sol = optimize.minimize_scalar(power_drop,
bounds=(0, 1),
method='Bounded',
args=(duration, period, tr_duration))
# Pull out the solution
r = sol.x
# Weight the basis functions based on the power drop off
basis_weights = r ** np.arange(basis_funcs)
# Weigh the basis functions
weighted_basis_funcs = np.multiply(noise_drift, basis_weights)
# Average the drift
noise_drift = np.mean(weighted_basis_funcs, 1)
# Normalize so the sigma is 1
noise_drift = stats.zscore(noise_drift)
# Return noise
return noise_drift
def _generate_noise_temporal_autoregression(timepoints,
noise_dict,
dimensions,
mask,
):
"""Generate the autoregression noise
Make a slowly drifting timecourse with the given autoregression
parameters. This can take in both AR and MA components
Parameters
----------
timepoints : 1 Dimensional array
What time points are sampled by a TR
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
dimensions : 3 length array, int
What is the shape of the volume to be generated
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
Returns
----------
noise_autoregression : one dimensional array, float
Generates the autoregression noise timecourse
"""
# Pull out the relevant noise parameters
auto_reg_rho = noise_dict['auto_reg_rho']
ma_rho = noise_dict['ma_rho']
# Specify the order based on the number of rho supplied
auto_reg_order = len(auto_reg_rho)
ma_order = len(ma_rho)
# This code assumes that the AR order is higher than the MA order
if ma_order > auto_reg_order:
msg = 'MA order (%d) is greater than AR order (%d). Cannot run.' % (
ma_order, auto_reg_order)
raise ValueError(msg)
# Generate a random variable at each time point that is a decayed value
# of the previous time points
noise_autoregression = np.zeros((dimensions[0], dimensions[1],
dimensions[2], len(timepoints)))
err_vols = np.zeros((dimensions[0], dimensions[1], dimensions[2],
len(timepoints)))
for tr_counter in range(len(timepoints)):
# Create a brain shaped volume with appropriate smoothing properties
noise = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Store all of the noise volumes
err_vols[:, :, :, tr_counter] = noise
if tr_counter == 0:
noise_autoregression[:, :, :, tr_counter] = noise
else:
# Preset the volume to collect the AR estimated process
AR_vol = np.zeros((dimensions[0], dimensions[1], dimensions[2]))
# Iterate through both the AR and MA values
for pCounter in list(range(1, auto_reg_order + 1)):
past_TR = int(tr_counter - pCounter)
if tr_counter - pCounter >= 0:
# Pull out a previous TR
past_vols = noise_autoregression[:, :, :, past_TR]
# Add the discounted previous volume
AR_vol += past_vols * auto_reg_rho[pCounter - 1]
# If the MA order has at least this many coefficients
# then consider the error terms
if ma_order >= pCounter:
# Pull out a previous TR
past_noise = err_vols[:, :, :, past_TR]
# Add the discounted previous noise
AR_vol += past_noise * ma_rho[pCounter - 1]
noise_autoregression[:, :, :, tr_counter] = AR_vol + noise
# Z score the data so that all of the standard deviations of the voxels
# are one (but the ARMA coefs are unchanged)
noise_autoregression = stats.zscore(noise_autoregression, 3)
return noise_autoregression
def _generate_noise_temporal_phys(timepoints,
resp_freq=0.2,
heart_freq=1.17,
):
"""Generate the physiological noise.
Create noise representing the heart rate and respiration of the data.
Default values based on Walvaert, Durnez, Moerkerke, Verdoolaege and
Rosseel, 2011
Parameters
----------
timepoints : 1 Dimensional array
What time points, in seconds, are sampled by a TR
resp_freq : float
What is the frequency of respiration (in Hz)
heart_freq : float
What is the frequency of heart beat (in Hz)
Returns
----------
noise_phys : one dimensional array, float
Generates the physiological temporal noise timecourse
"""
resp_phase = (np.random.rand(1) * 2 * np.pi)[0]
heart_phase = (np.random.rand(1) * 2 * np.pi)[0]
# Find the rate for each timepoint
resp_rate = (resp_freq * 2 * np.pi)
heart_rate = (heart_freq * 2 * np.pi)
# Calculate the radians for each variable at this
# given TR
resp_radians = np.multiply(timepoints, resp_rate) + resp_phase
heart_radians = np.multiply(timepoints, heart_rate) + heart_phase
# Combine the two types of noise and append
noise_phys = np.cos(resp_radians) + np.sin(heart_radians)
# Normalize
noise_phys = stats.zscore(noise_phys)
return noise_phys
def _generate_noise_spatial(dimensions,
mask=None,
fwhm=4.0,
):
"""Generate code for Gaussian Random Fields.
Adapted from code found here:
http://andrewwalker.github.io/statefultransitions/post/gaussian-fields/
with permission from the author:
https://twitter.com/walkera101/status/785578499440377858. Original code
comes from http://mathematica.stackexchange.com/questions/4829
/efficiently-generating-n-d-gaussian-random-fields with a WTFPL (
http://www.wtfpl.net).
Parameters
----------
dimensions : 3 length array, int
What is the shape of the volume to be generated. This code
compresesses the range if the x and y dimensions are not equivalent.
This fixes this by upsampling and then downsampling the volume.
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
fwhm : float
What is the full width half max of the gaussian fields being created.
This is converted into a sigma which is used in this function.
However, this conversion was found empirically by testing values of
sigma and how it relates to fwhm values. The relationship that would be
found in such a test depends on the size of the brain (bigger brains
can have bigger fwhm).
However, small errors shouldn't matter too much since the fwhm
generated here can only be approximate anyway: firstly, although the
distribution that is being drawn from is set to this value,
this will manifest differently on every draw. Secondly, because of
the masking and dimensions of the generated volume, this does not
behave simply- wrapping effects matter (the outputs are
closer to the input value if you have no mask).
Use _calc_fwhm on this volume alone if you have concerns about the
accuracy of the fwhm.
Returns
----------
noise_spatial : 3d array, float
Generates the spatial noise volume for these parameters
"""
# Check the input is correct
if len(dimensions) == 4:
logger.warning('4 dimensions have been supplied, only using 3')
dimensions = dimensions[0:3]
# If the dimensions are wrong then upsample now
if dimensions[0] != dimensions[1] or dimensions[1] != dimensions[2]:
max_dim = np.max(dimensions)
new_dim = (max_dim, max_dim, max_dim)
else:
new_dim = dimensions
def _logfunc(x, a, b, c):
"""Solve for y given x for log function.
Parameters
----------
x : float
x value of log function
a : float
x shift of function
b : float
rate of change
c : float
y shift of function
Returns
----------
float
y value of log function
"""
return (np.log(x + a) / np.log(b)) + c
def _fftIndgen(n):
"""# Specify the fft coefficents
Parameters
----------
n : int
Dim size to estimate over
Returns
----------
array of ints
fft indexes
"""
# Pull out the ascending and descending indexes
ascending = np.linspace(0, int(n / 2), int(n / 2 + 1))
elements = int(np.ceil(n / 2 - 1)) # Round up so that len(output)==n
descending = np.linspace(-elements, -1, elements)
return np.concatenate((ascending, descending))
def _Pk2(idxs, sigma):
"""# Specify the amplitude given the fft coefficents
Parameters
----------
idxs : 3 by voxel array int
fft indexes
sigma : float
spatial sigma
Returns
----------
amplitude : 3 by voxel array
amplitude of the fft coefficients
"""
# The first set of idxs ought to be zero so make the first value
# zero to avoid a divide by zero error
amp_start = np.array((0))
# Compute the amplitude of the function for a series of indices
amp_end = np.sqrt(np.sqrt(np.sum(idxs[:, 1:] ** 2, 0)) ** (-1 * sigma))
amplitude = np.append(amp_start, amp_end)
# Return the output
return amplitude
# Convert from fwhm to sigma (relationship discovered empirical, only an
# approximation up to sigma = 0 -> 5 which corresponds to fwhm = 0 -> 8,
# relies on an assumption of brain size).
spatial_sigma = _logfunc(fwhm, -0.36778719, 2.10601011, 2.15439247)
noise = np.fft.fftn(np.random.normal(size=new_dim))
# Create a meshgrid of the object
fft_vol = np.meshgrid(_fftIndgen(new_dim[0]), _fftIndgen(new_dim[1]),
_fftIndgen(new_dim[2]))
# Reshape the data into a vector
fft_vec = np.asarray((fft_vol[0].flatten(), fft_vol[1].flatten(), fft_vol[
2].flatten()))
# Compute the amplitude for each element in the grid
amp_vec = _Pk2(fft_vec, spatial_sigma)
# Reshape to be a brain volume
amplitude = amp_vec.reshape(new_dim)
# The output
noise_fft = (np.fft.ifftn(noise * amplitude)).real
# Fix the dimensionality of the data (if necessary)
noise_spatial = noise_fft[:dimensions[0], :dimensions[1], :dimensions[2]]
# Mask or not, then z score
if mask is not None:
# Mask the output
noise_spatial *= mask
# Z score the specific to the brain
noise_spatial[mask > 0] = stats.zscore(noise_spatial[mask > 0])
else:
# Take the grand mean/std and use for z scoring
grand_mean = (noise_spatial).mean()
grand_std = (noise_spatial).std()
noise_spatial = (noise_spatial - grand_mean) / grand_std
return noise_spatial
def _generate_noise_temporal(stimfunction_tr,
tr_duration,
dimensions,
template,
mask,
noise_dict
):
"""Generate the temporal noise
Generate the time course of the average brain voxel. To change the
relative mixing of the noise components, change the sigma's specified
below.
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
tr_duration : int
How long is a TR, in seconds
dimensions : 3 length array, int
What is the shape of the volume to be generated
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
----------
noise_temporal : one dimensional array, float
Generates the temporal noise timecourse for these parameters
"""
# Set up common parameters
# How many TRs are there
trs = len(stimfunction_tr)
# What time points are sampled by a TR?
timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs))
# Preset the volume
noise_volume = np.zeros((dimensions[0], dimensions[1], dimensions[2], trs))
# Generate the physiological noise
if noise_dict['physiological_sigma'] != 0:
# Calculate the physiological time course
noise = _generate_noise_temporal_phys(timepoints,
)
# Create a brain shaped volume with similar smoothing properties
volume = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'physiological_sigma']
# Generate the AR noise
if noise_dict['auto_reg_sigma'] != 0:
# Calculate the AR time course volume
noise = _generate_noise_temporal_autoregression(timepoints,
noise_dict,
dimensions,
mask,
)
# Combine the volume and noise
noise_volume += noise * noise_dict['auto_reg_sigma']
# Generate the task related noise
if noise_dict['task_sigma'] != 0 and np.sum(stimfunction_tr) > 0:
# Calculate the task based noise time course
noise = _generate_noise_temporal_task(stimfunction_tr,
)
# Create a brain shaped volume with similar smoothing properties
volume = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'task_sigma']
# Finally, z score each voxel so things mix nicely
noise_volume = stats.zscore(noise_volume, 3)
# If it is a nan it is because you just divided by zero (since some
# voxels are zeros in the template)
noise_volume[np.isnan(noise_volume)] = 0
return noise_volume
def mask_brain(volume,
template_name=None,
mask_threshold=None,
mask_self=True,
):
""" Mask the simulated volume
This creates a mask specifying the approximate likelihood that a voxel is
part of the brain. All values are bounded to the range of 0 to 1. An
appropriate threshold to isolate brain voxels is >0.2. Critically,
the data that should be used to create a template shouldn't already be
masked/skull stripped. If it is then it will give in accurate estimates
of non-brain noise and corrupt estimations of SNR.
Parameters
----------
volume : multidimensional array
Either numpy array of a volume or a tuple describing the dimensions
of the mask to be created
template_name : str
What is the path to the template to be loaded? If empty then it
defaults to an MNI152 grey matter mask. This is ignored if mask_self
is True.
mask_threshold : float
What is the threshold (0 -> 1) for including a voxel in the mask? If
None then the program will try and identify the last wide peak in a
histogram of the template (assumed to be the brain voxels) and takes
the minima before that peak as the threshold. Won't work when the
data is not bimodal.
mask_self : bool or None
If set to true then it makes a mask from the volume supplied (by
averaging across time points and changing the range). If it is set
to false then it will use the template_name as an input.
Returns
----------
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
template : 3 dimensional array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
"""
# If the volume supplied is a 1d array then output a volume of the
# supplied dimensions
if len(volume.shape) == 1:
volume = np.ones(volume)
# Load in the mask
if mask_self is True:
mask_raw = volume
elif template_name is None:
mfn = resource_stream(__name__, "sim_parameters/grey_matter_mask.npy")
mask_raw = np.load(mfn)
else:
mask_raw = np.load(template_name)
# Make the masks 3dremove_baseline
if len(mask_raw.shape) == 3:
mask_raw = np.array(mask_raw)
elif len(mask_raw.shape) == 4 and mask_raw.shape[3] == 1:
mask_raw = np.array(mask_raw[:, :, :, 0])
else:
mask_raw = np.mean(mask_raw, 3)
# Find the max value (so you can calulate these as proportions)
mask_max = mask_raw.max()
# Make sure the mask values range from 0 to 1 (make out of max of volume
# so that this is invertible later)
mask_raw = mask_raw / mask_max
# If there is only one brain volume then make this a forth dimension
if len(volume.shape) == 3:
temp = np.zeros([volume.shape[0], volume.shape[1], volume.shape[2], 1])
temp[:, :, :, 0] = volume
volume = temp
# Reshape the mask to be the size as the brain
brain_dim = volume.shape
mask_dim = mask_raw.shape
zoom_factor = (brain_dim[0] / mask_dim[0],
brain_dim[1] / mask_dim[1],
brain_dim[2] / mask_dim[2],
)
# Scale the mask according to the input brain
# You might get a warning if the zoom_factor is not an integer but you
# can safely ignore that.
template = ndimage.zoom(mask_raw, zoom_factor, order=2)
template[template < 0] = 0
# If the mask threshold is not supplied then guess it is a minima
# between the two peaks of the bimodal distribution of voxel activity
if mask_threshold is None:
# How many bins on either side of a peak will be compared
order = 5
# Make the histogram
template_vector = template.reshape(brain_dim[0] * brain_dim[1] *
brain_dim[2])
template_hist = np.histogram(template_vector, 100)
# Zero pad the values
binval = np.concatenate([np.zeros((order,)), template_hist[0]])
bins = np.concatenate([np.zeros((order,)), template_hist[1]])
# Identify the first two peaks
peaks = signal.argrelmax(binval, order=order)[0][0:2]
# What is the minima between peaks
minima = binval[peaks[0]:peaks[1]].min()
# What is the index of the last idx with this min value (since if
# zero, there may be many)
minima_idx = (np.where(binval[peaks[0]:peaks[1]] == minima) + peaks[
0])[-1]
# Convert the minima into a threshold
mask_threshold = bins[minima_idx][0]
# Mask the template based on the threshold
mask = np.zeros(template.shape)
mask[template > mask_threshold] = 1
return mask, template
def _noise_dict_update(noise_dict):
"""
Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary
"""
# Create the default dictionary
default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1,
'auto_reg_rho': [0.5], 'ma_rho': [0.0],
'physiological_sigma': 0, 'sfnr': 90, 'snr': 50,
'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0],
'fwhm': 4, 'matched': 1}
# Check what noise is in the dictionary and add if necessary. Numbers
# determine relative proportion of noise
for default_key in default_dict:
if default_key not in noise_dict:
noise_dict[default_key] = default_dict[default_key]
return noise_dict
def _fit_spatial(noise,
noise_temporal,
drift_noise,
mask,
template,
spatial_sd,
temporal_sd,
noise_dict,
fit_thresh,
fit_delta,
iterations,
):
"""
Fit the noise model to match the SNR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
noise_temporal : multidimensional array, float
The temporal noise that was generated by _generate_temporal_noise
drift_noise : multidimensional array, float
The drift noise generated by _generate_noise_temporal_drift
tr_duration : float
What is the duration, in seconds, of each TR?
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
fit_thresh : float
What proportion of the target parameter value is sufficient error to
warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target parameter
and the actual parameter
iterations : int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The second
element is the number of iterations for the AR fitting. This is much
more time consuming (has to make a new timecourse on each iteration)
so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters
"""
# Pull out information that is needed
dim_tr = noise.shape
base = template * noise_dict['max_activity']
base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1)
mean_signal = (base[mask > 0]).mean()
target_snr = noise_dict['snr']
# Iterate through different parameters to fit SNR and SFNR
spat_sd_orig = np.copy(spatial_sd)
iteration = 0
for iteration in list(range(iterations)):
# Calculate the new metrics
new_snr = _calc_snr(noise, mask)
# Calculate the difference between the real and simulated data
diff_snr = abs(new_snr - target_snr) / target_snr
# If the AR is sufficiently close then break the loop
if diff_snr < fit_thresh:
logger.info('Terminated SNR fit after ' + str(
iteration) + ' iterations.')
break
# Convert the SFNR and SNR
spat_sd_new = mean_signal / new_snr
# Update the variable
spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta)
# Prevent these going out of range
if spatial_sd < 0 or np.isnan(spatial_sd):
spatial_sd = 10e-3
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dim_tr,
spatial_sd=spatial_sd,
temporal_sd=temporal_sd,
)
# Sum up the noise of the brain
noise = base + drift_noise + noise_system
noise += (noise_temporal * temporal_sd) # Add the brain specific noise
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Failed to converge
if iterations == 0:
logger.info('No fitting iterations were run')
elif iteration == iterations:
logger.warning('SNR failed to converge.')
# Return the updated noise
return noise, spatial_sd
def _fit_temporal(noise,
mask,
template,
stimfunction_tr,
tr_duration,
spatial_sd,
temporal_proportion,
temporal_sd,
drift_noise,
noise_dict,
fit_thresh,
fit_delta,
iterations,
):
"""
Fit the noise model to match the SFNR and AR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
stimfunction_tr : Iterable, list
When do the stimuli events occur. Each element is a TR
tr_duration : float
What is the duration, in seconds, of each TR?
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_proportion, float
What is the proportion of the temporal variance (as specified by the
SFNR noise parameter) that is accounted for by the system noise. If
this number is high then all of the temporal variability is due to
system noise, if it is low then all of the temporal variability is
due to brain variability.
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
drift_noise : multidimensional array, float
The drift noise generated by _generate_noise_temporal_drift
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
fit_thresh : float
What proportion of the target parameter value is sufficient error to
warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target parameter
and the actual parameter
iterations : list, int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The second
element is the number of iterations for the AR fitting. This is much
more time consuming (has to make a new timecourse on each iteration)
so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters
"""
# Pull out the
dim_tr = noise.shape
dim = dim_tr[0:3]
base = template * noise_dict['max_activity']
base = base.reshape(dim[0], dim[1], dim[2], 1)
mean_signal = (base[mask > 0]).mean()
# Iterate through different parameters to fit SNR and SFNR
temp_sd_orig = np.copy(temporal_sd)
# Make a copy of the dictionary so it can be modified
new_nd = copy.deepcopy(noise_dict)
# What SFNR do you want
target_sfnr = noise_dict['sfnr']
# What AR do you want?
target_ar = noise_dict['auto_reg_rho'][0]
# Iterate through different MA parameters to fit AR
for iteration in list(range(iterations)):
# If there are iterations left to perform then recalculate the
# metrics and try again
# Calculate the new SFNR
new_sfnr = _calc_sfnr(noise, mask)
# Calculate the AR
new_ar, _ = _calc_ARMA_noise(noise,
mask,
len(noise_dict['auto_reg_rho']),
len(noise_dict['ma_rho']),
)
# Calculate the difference between the real and simulated data
sfnr_diff = abs(new_sfnr - target_sfnr) / target_sfnr
# Calculate the difference in the first AR component
ar_diff = new_ar[0] - target_ar
# If the SFNR and AR is sufficiently close then break the loop
if (abs(ar_diff) / target_ar) < fit_thresh and sfnr_diff < fit_thresh:
msg = 'Terminated AR fit after ' + str(iteration) + ' iterations.'
logger.info(msg)
break
# Otherwise update the noise metrics. Get the new temporal noise value
temp_sd_new = mean_signal / new_sfnr
temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta)
# Prevent these going out of range
if temporal_sd < 0 or np.isnan(temporal_sd):
temporal_sd = 10e-3
# Set the new system noise
temp_sd_system_new = np.sqrt((temporal_sd ** 2) * temporal_proportion)
# Get the new AR value
new_nd['auto_reg_rho'][0] -= (ar_diff * fit_delta)
# Don't let the AR coefficient exceed 1
if new_nd['auto_reg_rho'][0] >= 1:
new_nd['auto_reg_rho'][0] = 0.99
# Generate the noise. The appropriate
noise_temporal = _generate_noise_temporal(stimfunction_tr,
tr_duration,
dim,
template,
mask,
new_nd,
)
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dim_tr,
spatial_sd=spatial_sd,
temporal_sd=temp_sd_system_new,
)
# Sum up the noise of the brain
noise = base + drift_noise + noise_system
noise += (noise_temporal * temporal_sd) # Add the brain specific noise
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Failed to converge
if iterations == 0:
logger.info('No fitting iterations were run')
elif iteration == iterations:
logger.warning('AR failed to converge.')
# Return the updated noise
return noise
def generate_noise(dimensions,
stimfunction_tr,
tr_duration,
template,
mask=None,
noise_dict=None,
temporal_proportion=0.5,
iterations=None,
fit_thresh=0.05,
fit_delta=0.5,
):
""" Generate the noise to be added to the signal.
Default noise parameters will create a noise volume with a standard
deviation of 0.1 (where the signal defaults to a value of 1). This has
built into estimates of how different types of noise mix. All noise
values can be set by the user or estimated with calc_noise.
Parameters
----------
dimensions : nd array
What is the shape of the volume to be generated
stimfunction_tr : Iterable, list
When do the stimuli events occur. Each element is a TR
tr_duration : float
What is the duration, in seconds, of each TR?
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
temporal_proportion, float
What is the proportion of the temporal variance (as specified by the
SFNR noise parameter) that is accounted for by the system noise. If
this number is high then all of the temporal variability is due to
system noise, if it is low then all of the temporal variability is
due to brain variability.
iterations : list, int
The first element is how many steps of fitting the SFNR and SNR values
will be performed. Usually converges after < 5. The second element
is the number of iterations for the AR fitting. This is much more
time consuming (has to make a new timecourse on each iteration) so
be careful about setting this appropriately.
fit_thresh : float
What proportion of the target parameter value is sufficient error to
warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
Returns
----------
noise : multidimensional array, float
Generates the noise volume for these parameters
"""
# Check the input data
if template.max() > 1.1:
raise ValueError('Template out of range')
# Change to be an empty dictionary if it is None
if noise_dict is None:
noise_dict = {}
# Take in the noise dictionary and add any missing information
noise_dict = _noise_dict_update(noise_dict)
# How many iterations will you perform? If unspecified it will set
# values based on whether you are trying to match noise specifically to
# this participant or just get in the ball park
if iterations is None:
if noise_dict['matched'] == 1:
iterations = [20, 20]
else:
iterations = [0, 0]
if abs(noise_dict['auto_reg_rho'][0]) - abs(noise_dict['ma_rho'][0]) < 0.1:
logger.warning('ARMA coefs are close, may have trouble fitting')
# What are the dimensions of the volume, including time
dimensions_tr = (dimensions[0],
dimensions[1],
dimensions[2],
len(stimfunction_tr))
# Get the mask of the brain and set it to be 3d
if mask is None:
mask = np.ones(dimensions)
# Create the base (this inverts the process to make the template)
base = template * noise_dict['max_activity']
# Reshape the base (to be the same size as the volume to be created)
base = base.reshape(dimensions[0], dimensions[1], dimensions[2], 1)
base = np.ones(dimensions_tr) * base
# What is the mean signal of the non masked voxels in this template?
mean_signal = (base[mask > 0]).mean()
# Generate the temporal noise
noise_temporal = _generate_noise_temporal(stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
dimensions=dimensions,
template=template,
mask=mask,
noise_dict=noise_dict,
)
# Generate the drift noise
if noise_dict['drift_sigma'] != 0:
# Calculate the drift time course
noise = _generate_noise_temporal_drift(len(stimfunction_tr),
tr_duration,
)
# Create a volume with the drift properties
volume = np.ones(dimensions[:3])
# Combine the volume and noise
drift_noise = np.multiply.outer(volume, noise) * noise_dict[
'drift_sigma']
else:
# If there is no drift, then just make this zeros (in 4d)
drift_noise = np.zeros(dimensions_tr)
# Convert SFNR into the size of the standard deviation of temporal
# variability
temporal_sd = (mean_signal / noise_dict['sfnr'])
# Calculate the temporal sd of the system noise (as opposed to the noise
# attributed to the functional variability).
temporal_sd_system = np.sqrt((temporal_sd ** 2) * temporal_proportion)
# What is the standard deviation of the background activity
spat_sd = mean_signal / noise_dict['snr']
spatial_sd = np.sqrt((spat_sd ** 2) * (1 - temporal_proportion))
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dimensions_tr,
spatial_sd=spatial_sd,
temporal_sd=temporal_sd_system,
)
# Sum up the noise of the brain
noise = base + drift_noise + noise_system
noise += (noise_temporal * temporal_sd) # Add the brain specific noise
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Fit the SNR
noise, spatial_sd = _fit_spatial(noise,
noise_temporal,
drift_noise,
mask,
template,
spatial_sd,
temporal_sd_system,
noise_dict,
fit_thresh,
fit_delta,
iterations[0],
)
# Fit the SFNR and AR noise
noise = _fit_temporal(noise,
mask,
template,
stimfunction_tr,
tr_duration,
spatial_sd,
temporal_proportion,
temporal_sd,
drift_noise,
noise_dict,
fit_thresh,
fit_delta,
iterations[1],
)
# Return the noise
return noise
def compute_signal_change(signal_function,
noise_function,
noise_dict,
magnitude,
method='PSC',
):
""" Rescale the signal to be a given magnitude, based on a specified
metric (e.g. percent signal change). Metrics are heavily inspired by
Welvaert & Rosseel (2013). The rescaling is based on the maximal
activity in the timecourse. Importantly, all values within the
signal_function are scaled to have a min of -1 or max of 1, meaning that
the voxel value will be the same as the magnitude.
Parameters
----------
signal_function : timepoint by voxel array
The signal time course to be altered. This can have
multiple time courses specified as different columns in this
array. Conceivably you could use the output of
generate_stimfunction as the input but the temporal variance
will be incorrect. Critically, different values across voxels are
considered relative to each other, not independently. E.g., if the
voxel has a peak signal twice as high as another voxel's, then this
means that the signal after these transformations will still be
twice as high (according to the metric) in the first voxel relative
to the second
noise_function : timepoint by voxel numpy array
The time course of noise (a voxel created from generate_noise)
for each voxel specified in signal_function. This is necessary
for computing the mean evoked activity and the noise variability
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, autoregressive,
physiological and task-based noise types are mixed together in
_generate_temporal_noise. The parameter values for 'auto_reg_sigma',
'physiological_sigma' and 'task_sigma' describe the proportion of
mixing of these elements, respectively. However critically, 'SFNR' is
the parameter that controls how much noise these components contribute
to the brain. 'auto_reg_rho' and 'ma_rho' set parameters for the
autoregressive noise being simulated. Second, drift noise is added to
this, according to the size of 'drift_sigma'. Thirdly, system noise is
added based on the 'SNR' parameter. Finally, 'fwhm' is used to estimate
the smoothness of the noise being inserted. If 'matched' is set to
true, then it will fit the parameters to match the participant as best
as possible.
Variables defined as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
magnitude : list of floats
This specifies the size, in terms of the metric choosen below,
of the signal being generated. This can be a single number,
and thus apply to all signal timecourses, or it can be array and
thus different for each voxel.
method : str
Select the procedure used to calculate the signal magnitude,
some of which are based on the definitions outlined in Welvaert &
Rosseel (2013):
- 'SFNR': Change proportional to the temporal variability,
as represented by the (desired) SFNR
- 'CNR_Amp/Noise-SD': Signal magnitude relative to the temporal
noise
- 'CNR_Amp2/Noise-Var_dB': Same as above but converted to decibels
- 'CNR_Signal-SD/Noise-SD': Standard deviation in signal
relative to standard deviation in noise
- 'CNR_Signal-Var/Noise-Var_dB': Same as above but converted to
decibels
- 'PSC': Calculate the percent signal change based on the
average activity of the noise (mean / 100 * magnitude)
Returns
----------
signal_function_scaled : 4d numpy array
The new signal volume with the appropriately set signal change
"""
# If you have only one magnitude value, duplicate the magnitude for each
# timecourse you have
assert type(magnitude) is list, '"magnitude" should be a list of floats'
if len(magnitude) == 1:
magnitude *= signal_function.shape[1]
# Check that the signal_function and noise_function are the same size
if signal_function.shape != noise_function.shape:
msg = 'noise_function is not the same size as signal_function'
raise ValueError(msg)
# Scale all signals that to have a range of -1 to 1. This is
# so that any values less than this will be scaled appropriately
signal_function /= np.max(np.abs(signal_function))
# Iterate through the timecourses and calculate the metric
signal_function_scaled = np.zeros(signal_function.shape)
for voxel_counter in range(signal_function.shape[1]):
# Pull out the values for this voxel
sig_voxel = signal_function[:, voxel_counter]
noise_voxel = noise_function[:, voxel_counter]
magnitude_voxel = magnitude[voxel_counter]
# Calculate the maximum signal amplitude (likely to be 1,
# but not necessarily)
max_amp = np.max(np.abs(sig_voxel))
# Calculate the scaled time course using the specified method
if method == 'SFNR':
# How much temporal variation is there, relative to the mean
# activity
temporal_var = noise_voxel.mean() / noise_dict['sfnr']
# Multiply the timecourse by the variability metric
new_sig = sig_voxel * (temporal_var * magnitude_voxel)
elif method == 'CNR_Amp/Noise-SD':
# What is the standard deviation of the noise
noise_std = np.std(noise_voxel)
# Multiply the signal timecourse by the the CNR and noise (
# rearranging eq.)
new_sig = sig_voxel * (magnitude_voxel * noise_std)
elif method == 'CNR_Amp2/Noise-Var_dB':
# What is the standard deviation of the noise
noise_std = np.std(noise_voxel)
# Rearrange the equation to compute the size of signal change in
# decibels
scale = (10 ** (magnitude_voxel / 20)) * noise_std / max_amp
new_sig = sig_voxel * scale
elif method == 'CNR_Signal-SD/Noise-SD':
# What is the standard deviation of the signal and noise
sig_std = np.std(sig_voxel)
noise_std = np.std(noise_voxel)
# Multiply the signal timecourse by the the CNR and noise (
# rearranging eq.)
new_sig = sig_voxel * ((magnitude_voxel / max_amp) * noise_std
/ sig_std)
elif method == 'CNR_Signal-Var/Noise-Var_dB':
# What is the standard deviation of the signal and noise
sig_std = np.std(sig_voxel)
noise_std = np.std(noise_voxel)
# Rearrange the equation to compute the size of signal change in
# decibels
scale = (10 ** (magnitude_voxel / 20)) * noise_std / (max_amp *
sig_std)
new_sig = sig_voxel * scale
elif method == 'PSC':
# What is the average activity divided by percentage
scale = ((noise_voxel.mean() / 100) * magnitude_voxel)
new_sig = sig_voxel * scale
signal_function_scaled[:, voxel_counter] = new_sig
# Return the scaled time course
return signal_function_scaled
def generate_1d_gaussian_rfs(n_voxels, feature_resolution, feature_range,
rf_size=15, random_tuning=True, rf_noise=0.):
"""
Creates a numpy matrix of Gaussian-shaped voxel receptive fields (RFs)
along one dimension. Can specify whether they are evenly tiled or randomly
tuned along the axis. RF range will be between 0 and 1.
Parameters
----------
n_voxels : int
Number of voxel RFs to create.
feature_resolution : int
Number of points along the feature axis.
feature_range : tuple (numeric)
A tuple indicating the start and end values of the feature range. e.g.
(0, 359) for motion directions.
rf_size : numeric
Width of the Gaussian receptive field. Should be given in units of the
feature dimension. e.g., 15 degrees wide in motion direction space.
random_tuning : boolean [default True]
Indicates whether or not the voxels are randomly tuned along the 1D
feature axis or whether tuning is evenly spaced.
rf_noise : float [default 0.]
Amount of uniform noise to add to the Gaussian RF. This will cause the
generated responses to be distorted by the same uniform noise for a
given voxel.
Returns
----------
voxel_rfs : 2d numpy array (float)
The receptive fields in feature space. Dimensions are n_voxels by
feature_resolution.
voxel_tuning : 1d numpy array (float)
The centers of the voxel RFs, in feature space.
"""
range_start, range_stop = feature_range
if random_tuning:
# Voxel selectivity is random
voxel_tuning = np.floor((np.random.rand(n_voxels) * range_stop)
+ range_start).astype(int)
else:
# Voxel selectivity is evenly spaced along the feature axis
voxel_tuning = np.linspace(range_start, range_stop, n_voxels + 1)
voxel_tuning = voxel_tuning[0:-1]
voxel_tuning = np.floor(voxel_tuning).astype(int)
gaussian = signal.gaussian(feature_resolution, rf_size)
voxel_rfs = np.zeros((n_voxels, feature_resolution))
for i in range(0, n_voxels):
voxel_rfs[i, :] = np.roll(gaussian, voxel_tuning[i] -
((feature_resolution // 2) - 1))
voxel_rfs += np.random.rand(n_voxels, feature_resolution) * rf_noise
voxel_rfs = voxel_rfs / np.max(voxel_rfs, axis=1)[:, None]
return voxel_rfs, voxel_tuning
def generate_1d_rf_responses(rfs, trial_list, feature_resolution,
feature_range, trial_noise=0.25):
"""
Generates trial-wise data for a given set of receptive fields (RFs) and
a 1d array of features presented across trials.
Parameters
----------
voxel_rfs : 2d numpy array (float)
The receptive fields in feature space. Dimensions must be n_voxels
by feature_resolution.
trial_list : 1d numpy array (numeric)
The feature value of the stimulus presented on individual trials.
Array size be n_trials.
feature_resolution : int
Number of points along the feature axis.
feature_range : tuple (numeric)
A tuple indicating the start and end values of the feature range. e.g.
(0, 359) for motion directions.
trial_noise : float [default 0.25]
Amount of uniform noise to inject into the synthetic data. This is
generated independently for every trial and voxel.
Returns
----------
trial_data : 2d numpy array (float)
The synthetic data for each voxel and trial. Dimensions are n_voxels by
n_trials.
"""
range_start, range_stop = feature_range
stim_axis = np.linspace(range_start, range_stop,
feature_resolution)
if range_start > 0:
trial_list = trial_list + range_start
elif range_start < 0:
trial_list = trial_list - range_start
one_hot = np.eye(feature_resolution)
indices = [np.argmin(abs(stim_axis - x)) for x in trial_list]
stimulus_mask = one_hot[:, indices]
trial_data = rfs @ stimulus_mask
trial_data += np.random.rand(rfs.shape[0], trial_list.size) * \
(trial_noise * np.max(trial_data))
return trial_data
| 125,482 | 36.04842 | 79 | py |
brainiak | brainiak-master/brainiak/hyperparamopt/__init__.py | """ Hyper parameter optimization package """
| 45 | 22 | 44 | py |
brainiak | brainiak-master/brainiak/hyperparamopt/hpo.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyper Parameter Optimization (HPO)
This implementation is based on the work in [Bergstra2011]_ and
[Bergstra2013]_.
.. [Bergstra2011] "Algorithms for Hyper-Parameter Optimization",
James S. Bergstra and Bardenet, Rémi and Bengio, Yoshua
and Kégl, Balázs. NIPS 2011
.. [Bergstra2013] "Making a Science of Model Search:
Hyperparameter Optimization in Hundreds of Dimensions for
Vision Architectures", James Bergstra, Daniel Yamins, David Cox.
JMLR W&CP 28 (1) : 115–123, 2013
"""
# Authors: Narayanan Sundaram (Intel Labs)
import logging
import math
import numpy as np
from scipy.special import erf
import scipy.stats as st
logger = logging.getLogger(__name__)
__all__ = [
"fmin",
]
def get_sigma(x, min_limit=-np.inf, max_limit=np.inf):
"""Compute the standard deviations around the points for a 1D GMM.
We take the distance from the nearest left and right neighbors
for each point, then use the max as the estimate of standard
deviation for the gaussian mixture around that point.
Arguments
---------
x : 1D array
Set of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
maximum limit for the distribution
Returns
-------
1D array
Array of standard deviations
"""
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
# Calculate the nearest left neighbor of x[i]
# Find the minimum of (x[i] - k) for k < x[i]
xleft = z[np.argmin([(x[i] - k) if k < x[i] else np.inf for k in z])]
# Calculate the nearest right neighbor of x[i]
# Find the minimum of (k - x[i]) for k > x[i]
xright = z[np.argmin([(k - x[i]) if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if (sigma[i] == -np.inf): # should never happen
sigma[i] = 1.0
return sigma
class gmm_1d_distribution:
"""GMM 1D distribution.
Given a set of points, we create this object so that we
can calculate likelihoods and generate samples from this
1D Gaussian mixture model.
Attributes
----------
points : 1D array
Set of points to create the GMM
N : int
Number of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
Maximum limit for the distribution
weights : Optional[1D array], default : array of ones
Used to weight the points non-uniformly if required
"""
def __init__(self, x, min_limit=-np.inf, max_limit=np.inf, weights=1.0):
self.points = x
self.N = x.size
self.min_limit = min_limit
self.max_limit = max_limit
self.sigma = get_sigma(x, min_limit=min_limit, max_limit=max_limit)
self.weights = (2
/ (erf((max_limit - x) / (np.sqrt(2.) * self.sigma))
- erf((min_limit - x) / (np.sqrt(2.) * self.sigma)))
* weights)
self.W_sum = np.sum(self.weights)
def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y
def __call__(self, x):
"""Return the GMM likelihood for given point(s).
See :eq:`gmm-likelihood`.
Arguments
---------
x : scalar (or) 1D array of reals
Point(s) at which likelihood needs to be computed
Returns
-------
scalar (or) 1D array
Likelihood values at the given point(s)
"""
if np.isscalar(x):
return self.get_gmm_pdf(x)
else:
return np.array([self.get_gmm_pdf(t) for t in x])
def get_samples(self, n):
"""Sample the GMM distribution.
Arguments
---------
n : int
Number of samples needed
Returns
-------
1D array
Samples from the distribution
"""
normalized_w = self.weights / np.sum(self.weights)
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
samples = np.zeros(n)
k = 0
j = 0
while (k < n):
i = get_rand_index[j]
j = j + 1
if (j == n):
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
j = 0
v = np.random.normal(loc=self.points[i], scale=self.sigma[i])
if (v > self.max_limit or v < self.min_limit):
continue
else:
samples[k] = v
k = k + 1
if (k == n):
break
return samples
def get_next_sample(x, y, min_limit=-np.inf, max_limit=np.inf):
"""Get the next point to try, given the previous samples.
We use [Bergstra2013]_ to compute the point that gives the largest
Expected improvement (EI) in the optimization function. This model fits 2
different GMMs - one for points that have loss values in the bottom 15%
and another for the rest. Then we sample from the former distribution
and estimate EI as the ratio of the likelihoods of the 2 distributions.
We pick the point with the best EI among the samples that is also not
very close to a point we have sampled earlier.
Arguments
---------
x : 1D array
Samples generated from the distribution so far
y : 1D array
Loss values at the corresponding samples
min_limit : float, default : -inf
Minimum limit for the distribution
max_limit : float, default : +inf
Maximum limit for the distribution
Returns
-------
float
Next value to use for HPO
"""
z = np.array(list(zip(x, y)), dtype=np.dtype([('x', float), ('y', float)]))
z = np.sort(z, order='y')
n = y.shape[0]
g = int(np.round(np.ceil(0.15 * n)))
ldata = z[0:g]
gdata = z[g:n]
lymin = ldata['y'].min()
lymax = ldata['y'].max()
weights = (lymax - ldata['y']) / (lymax - lymin)
lx = gmm_1d_distribution(ldata['x'], min_limit=min_limit,
max_limit=max_limit, weights=weights)
gx = gmm_1d_distribution(gdata['x'], min_limit=min_limit,
max_limit=max_limit)
samples = lx.get_samples(n=1000)
ei = lx(samples) / gx(samples)
h = (x.max() - x.min()) / (10 * x.size)
# TODO
# assumes prior of x is uniform; should ideally change for other priors
# d = np.abs(x - samples[ei.argmax()]).min()
# CDF(x+d/2) - CDF(x-d/2) < 1/(10*x.size) then reject else accept
s = 0
while (np.abs(x - samples[ei.argmax()]).min() < h):
ei[ei.argmax()] = 0
s = s + 1
if (s == samples.size):
break
xnext = samples[ei.argmax()]
return xnext
def fmin(loss_fn,
space,
max_evals,
trials,
init_random_evals=30,
explore_prob=0.2):
"""Find the minimum of function through hyper parameter optimization.
Arguments
---------
loss_fn : ``function(*args) -> float``
Function that takes in a dictionary and returns a real value.
This is the function to be minimized.
space : dictionary
Custom dictionary specifying the range and distribution of
the hyperparamters.
E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1),
'lo':0, 'hi':1}}``
for a 1-dimensional space with variable x in range [0,1]
max_evals : int
Maximum number of evaluations of loss_fn allowed
trials : list
Holds the output of the optimization trials.
Need not be empty to begin with, new trials are appended
at the end.
init_random_evals : Optional[int], default 30
Number of random trials to initialize the
optimization.
explore_prob : Optional[float], default 0.2
Controls the exploration-vs-exploitation ratio. Value should
be in [0,1]. By default, 20% of trails are random samples.
Returns
-------
trial entry (dictionary of hyperparameters)
Best hyperparameter setting found.
E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter
value found and loss is the value of the function for the
best hyperparameter value(s).
Raises
------
ValueError
If the distribution specified in space does not support a ``rvs()``
method to generate random numbers, a ValueError is raised.
"""
for s in space:
if not hasattr(space[s]['dist'], 'rvs'):
raise ValueError('Unknown distribution type for variable')
if 'lo' not in space[s]:
space[s]['lo'] = -np.inf
if 'hi' not in space[s]:
space[s]['hi'] = np.inf
if len(trials) > init_random_evals:
init_random_evals = 0
for t in range(max_evals):
sdict = {}
if t >= init_random_evals and np.random.random() > explore_prob:
use_random_sampling = False
else:
use_random_sampling = True
yarray = np.array([tr['loss'] for tr in trials])
for s in space:
sarray = np.array([tr[s] for tr in trials])
if use_random_sampling:
sdict[s] = space[s]['dist'].rvs()
else:
sdict[s] = get_next_sample(sarray, yarray,
min_limit=space[s]['lo'],
max_limit=space[s]['hi'])
logger.debug('Explore' if use_random_sampling else 'Exploit')
logger.info('Next point ', t, ' = ', sdict)
y = loss_fn(sdict)
sdict['loss'] = y
trials.append(sdict)
yarray = np.array([tr['loss'] for tr in trials])
yargmin = yarray.argmin()
logger.info('Best point so far = ', trials[yargmin])
return trials[yargmin]
| 11,678 | 30.06117 | 79 | py |
brainiak | brainiak-master/examples/isc/isfc.py | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of intersubject correlation (ISC) analysis
Computes ISC for all voxels within a brain mask, and computes
ISFC for voxels with high ISC.
First download the example dataset by running the download_data.sh
script locally (e.g., ./download_data.sh). This download includes
functional data for 5 subjects and a gray-matter anatomical mask.
"""
# Authors: Christopher Baldassano, Sam Nastase, and Mor Regev
# Princeton University, 2018
from os.path import abspath, dirname, join
from brainiak.isc import isc, isfc
import numpy as np
import nibabel as nib
from brainiak import image, io
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import fcluster, linkage
curr_dir = dirname(abspath("__file__"))
mask_fn = join(curr_dir,'avg152T1_gray_3mm.nii.gz')
func_fns = [join(curr_dir,
'sub-{0:03d}-task-intact1.nii.gz'.format(sub))
for sub in np.arange(1, 6)]
print('Loading data from {0} subjects...'.format(len(func_fns)))
mask_image = io.load_boolean_mask(mask_fn, lambda x: x > 50)
masked_images = image.mask_images(io.load_images(func_fns),
mask_image)
coords = np.where(mask_image)
data = image.MaskedMultiSubjectData.from_masked_images(masked_images,
len(func_fns))
print('Calculating mean ISC on {0} voxels'.format(data.shape[1]))
iscs = isc(data, pairwise=False, summary_statistic='mean')
iscs = np.nan_to_num(iscs)
print('Writing ISC map to file...')
nii_template = nib.load(mask_fn)
isc_vol = np.zeros(nii_template.shape)
isc_vol[coords] = iscs
isc_image = nib.Nifti1Image(isc_vol, nii_template.affine,
nii_template.header)
nib.save(isc_image, 'example_isc.nii.gz')
isc_mask = (iscs > 0.2)[0, :]
print('Calculating mean ISFC on {0} voxels...'.format(np.sum(isc_mask)))
data_masked = data[:, isc_mask, :]
isfcs = isfc(data_masked, pairwise=False, summary_statistic='mean')
print('Clustering ISFC...')
Z = linkage(isfcs, 'ward')
z = fcluster(Z, 2, criterion='maxclust')
clust_inds = np.argsort(z)
# Show the ISFC matrix, sorted to show the two main clusters
plt.imshow(isfcs[np.ix_(clust_inds, clust_inds)])
plt.show()
| 2,791 | 35.25974 | 75 | py |
brainiak | brainiak-master/examples/factoranalysis/get_tfa_input_from_nifti.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.io
from scipy.stats import stats
import numpy as np
import nibabel as nib
from nilearn.input_data import NiftiMasker
import os
import logging
import sys
import click
@click.command()
@click.argument('nifti-file', type=click.File('rb'))
@click.argument('out-file', type=click.File('wb'))
@click.option('--mask-file', default=None, type=click.File('rb'), help='The mask file to get ROI')
@click.option('--zscore', is_flag=True, help='to zscore fMRI data')
@click.option('--detrend', is_flag=True, help='to detrend fMRI data')
@click.option('--smoothing-fwmw', default=False, type=float, help='the spatial smooth window size')
def extract_data(nifti_file, mask_file, out_file, zscore, detrend, smoothing_fwmw):
if mask_file is None:
#whole brain, get coordinate info from nifti_file itself
mask = nib.load(nifti_file.name)
else:
mask = nib.load(mask_file.name)
affine = mask.get_affine()
if mask_file is None:
mask_data = mask.get_data()
if mask_data.ndim == 4:
#get mask in 3D
img_data_type = mask.header.get_data_dtype()
n_tr = mask_data.shape[3]
mask_data = mask_data[:,:,:,n_tr//2].astype(bool)
mask = nib.Nifti1Image(mask_data.astype(img_data_type), affine)
else:
mask_data = mask_data.astype(bool)
else:
mask_data = mask.get_data().astype(bool)
#get voxel coordinates
R = np.float64(np.argwhere(mask_data))
#get scanner RAS coordinates based on voxel coordinates
if affine is not []:
R = (np.dot(affine[:3,:3], R.T) + affine[:3,3:4]).T
#get ROI data, and run preprocessing
nifti_masker = NiftiMasker(mask_img=mask, standardize=zscore, detrend=detrend, smoothing_fwhm=smoothing_fwmw)
img = nib.load(nifti_file.name)
all_images = np.float64(nifti_masker.fit_transform(img))
data = all_images.T.copy()
#save data
subj_data = {'data': data, 'R': R}
scipy.io.savemat(out_file.name, subj_data)
if __name__ == '__main__':
extract_data()
| 2,657 | 34.44 | 113 | py |
brainiak | brainiak-master/examples/factoranalysis/latent_factor_from_tfa.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
import scipy.io
import numpy as np
import nibabel as nib
from subprocess import call
from scipy.stats import stats
from nilearn.input_data import NiftiMasker
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
#download data
data_dir = os.path.join(os.getcwd(), 'data')
file_name = os.path.join(data_dir, 's0.mat')
url = ' https://www.dropbox.com/s/r5s9tg4ekxzbrco/s0.mat?dl=0'
cmd = 'curl --location --create-dirs -o ' + file_name + url
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("File download was terminated by signal", -retcode, file=sys.stderr)
else:
print("File download returned", retcode, file=sys.stderr)
except OSError as e:
print("File download failed:", e, file=sys.stderr)
#get fMRI data and scanner RAS coordinates
all_data = scipy.io.loadmat(file_name)
data = all_data['data']
R = all_data['R']
# Z-score the data
data = stats.zscore(data, axis=1, ddof=1)
n_voxel, n_tr = data.shape
# Run TFA with downloaded data
from brainiak.factoranalysis.tfa import TFA
# uncomment below line to get help message on TFA
#help(TFA)
tfa = TFA(K=5,
max_num_voxel=int(n_voxel*0.5),
max_num_tr=int(n_tr*0.5),
verbose=True)
tfa.fit(data, R)
print("\n centers of latent factors are:")
print(tfa.get_centers(tfa.local_posterior_))
print("\n widths of latent factors are:")
widths = tfa.get_widths(tfa.local_posterior_)
print(widths)
print("\n stds of latent RBF factors are:")
rbf_std = np.sqrt(widths/(2.0))
print(rbf_std)
| 2,134 | 29.942029 | 82 | py |
brainiak | brainiak-master/examples/factoranalysis/latent_factor_from_htfa.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import scipy.io
import numpy as np
import nibabel as nib
from mpi4py import MPI
from subprocess import call
from scipy.stats import stats
from nilearn.input_data import NiftiMasker
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
n_subj = 2
data_dir = os.path.join(os.getcwd(), 'data')
if rank == 0 and not os.path.exists(data_dir):
os.makedirs(data_dir)
url = []
url.append(' https://www.dropbox.com/s/r5s9tg4ekxzbrco/s0.mat?dl=0')
url.append(' https://www.dropbox.com/s/39tr01m76vxwaqa/s1.mat?dl=0')
#get fMRI data and scanner RAS coordinates
data = []
R = []
for idx in range(n_subj):
if idx % size == rank:
#download data
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
cmd = 'curl --location -o ' + file_name + url[idx]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("File download was terminated by signal", -retcode, file=sys.stderr)
else:
print("File download returned", retcode, file=sys.stderr)
except OSError as e:
print("File download failed:", e, file=sys.stderr)
all_data = scipy.io.loadmat(file_name)
bold = all_data['data']
# z-score the data
bold = stats.zscore(bold, axis=1, ddof=1)
data.append(bold)
R.append(all_data['R'])
n_voxel, n_tr = data[0].shape
# Run HTFA with downloaded data
from brainiak.factoranalysis.htfa import HTFA
# uncomment below line to get help message on HTFA
#help(HTFA)
K = 5
htfa = HTFA(K=K,
n_subj=n_subj,
max_global_iter=5,
max_local_iter=2,
voxel_ratio=0.5,
tr_ratio=0.5,
max_voxel=n_voxel,
max_tr=n_tr,
verbose=True)
htfa.fit(data, R)
if rank == 0:
print("\n centers of global latent factors are:")
print(htfa.get_centers(htfa.global_posterior_))
print("\n widths of global latent factors are:")
widths = htfa.get_widths(htfa.global_posterior_)
print(widths)
print("\n stds of global latent RBF factors are:")
rbf_std = np.sqrt(widths/(2.0))
print(rbf_std)
| 2,838 | 30.197802 | 90 | py |
brainiak | brainiak-master/examples/factoranalysis/htfa_cv_example.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import math
import requests
import scipy.io
import numpy as np
from mpi4py import MPI
from subprocess import call
from scipy.stats import stats
from sklearn import model_selection
from sklearn.metrics import mean_squared_error
def recon_err(data, F, W):
"""Calcuate reconstruction error
Parameters
----------
data : 2D array
True data to recover.
F : 2D array
HTFA factor matrix.
W : 2D array
HTFA weight matrix.
Returns
-------
float
Returns root mean squared reconstruction error.
"""
recon = F.dot(W).ravel()
err = mean_squared_error(
data.ravel(),
recon,
multioutput='uniform_average')
return math.sqrt(err)
def get_train_err(htfa, data, F):
"""Calcuate training error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
data : 2D array
Input data to HTFA.
F : 2D array
HTFA factor matrix.
Returns
-------
float
Returns root mean squared error on training.
"""
W = htfa.get_weights(data, F)
return recon_err(data, F, W)
def get_test_err(htfa, test_weight_data, test_recon_data,
test_weight_R, test_recon_R, centers, widths):
"""Calcuate test error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
test_weigth_data : 2D array
Data used for testing weights.
test_recon_data : 2D array
Data used for testing reconstruction error.
test_weigth_R : 2D array
Coordinate matrix used for testing weights.
test_recon_R : 2D array
Coordinate matrix used for testing reconstruction error.
centers : 2D array
Center matrix of HTFA factors.
widths : 1D array
Width matrix of HTFA factors.
Returns
-------
float
Returns root mean squared error on test.
"""
# calculate F on test_weight_R, based on trained centers/widths
unique_R, inds = htfa.get_unique_R(test_weight_R)
F = htfa.get_factors(unique_R,
inds,
centers,
widths)
# calculate weights on test_weight_data
W = htfa.get_weights(test_weight_data, F)
# calculate F on final test_recon_data
unique_R, inds = htfa.get_unique_R(test_recon_R)
F = htfa.get_factors(unique_R,
inds,
centers,
widths)
return recon_err(test_recon_data, F, W)
n_subj = 2
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
group_id = int(rank/n_subj)
n_group = math.ceil(size/n_subj)
htfa_comm = comm.Split(group_id, rank)
htfa_rank = htfa_comm.Get_rank()
htfa_size = htfa_comm.Get_size()
if rank == 0:
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
data_dir = os.path.join(os.getcwd(), 'data')
if rank == 0 and not os.path.exists(data_dir):
os.makedirs(data_dir)
url = []
url.append(' https://www.dropbox.com/s/r5s9tg4ekxzbrco/s0.mat?dl=0')
url.append(' https://www.dropbox.com/s/39tr01m76vxwaqa/s1.mat?dl=0')
for idx in range(n_subj):
if idx % size == rank:
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
#check if file has already been downloaded
if not os.path.exists(file_name):
#check if URL exists
ret = requests.head(url[idx])
if ret.status_code == 200:
#download data
cmd = 'curl --location -o ' + file_name + url[idx]
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print("File download was terminated by signal", -retcode, file=sys.stderr)
else:
print("File download returned", retcode, file=sys.stderr)
except OSError as e:
print("File download failed:", e, file=sys.stderr)
else:
print("File s%d.mat does not exist!\n"%idx)
comm.Barrier()
#get fMRI data and scanner RAS coordinates
data = []
R = []
mapping = {}
n_local_subj = 0
for idx in range(n_subj):
if idx % htfa_size == htfa_rank:
file_name = os.path.join(data_dir, 's' + str(idx) + '.mat')
all_data = scipy.io.loadmat(file_name)
bold = all_data['data']
# z-score the data
bold = stats.zscore(bold, axis=1, ddof=1)
data.append(bold)
R.append(all_data['R'])
mapping[str(n_local_subj)] = idx
n_local_subj += 1
min_K = 3
max_K = 6
n_K = 2
Ks = np.linspace(min_K, max_K, n_K, endpoint=True).astype(int)
n_splits = 3
# recon_err in shape n_splits*n_K
test_recon_errs = np.zeros((n_subj, n_splits, n_K))
tmp_test_recon_errs = np.zeros((n_subj, n_splits, n_K))
train_recon_errs = np.zeros((n_subj, n_splits, n_K))
tmp_train_recon_errs = np.zeros((n_subj, n_splits, n_K))
local_size = math.ceil(n_subj/size)
if n_local_subj > 0:
from brainiak.factoranalysis.htfa import HTFA
n_voxel, n_tr = data[0].shape
n_dim = R[0].shape[1]
test_size = 0.3
rnd_seed_voxel = 30000
rnd_seed_tr = 3000
tr_solver = 'exact'
nlss_method = 'dogbox'
nlss_loss = 'linear'
upper_ratio = 1.8
lower_ratio = 0.1
voxel_ratio = 0.25
tr_ratio = 0.1
max_voxel = 2000
max_tr = 200
max_sample_voxel = min(max_voxel,
int(voxel_ratio * n_voxel))
max_sample_tr = min(max_tr, int(tr_ratio * n_tr))
#split voxel and TR for two-level cross validation
ss_voxel = model_selection.ShuffleSplit(
n_splits=n_splits,
test_size=test_size,
random_state=rnd_seed_voxel)
voxel_indices = np.arange(n_voxel)
ss_voxel.get_n_splits(voxel_indices)
ss_tr = model_selection.ShuffleSplit(
n_splits=n_splits,
test_size=test_size,
random_state=rnd_seed_tr)
tr_indices = np.arange(n_tr)
ss_tr.get_n_splits(tr_indices)
train_voxels = []
test_voxels = []
train_trs = []
test_trs = []
for train_index, test_index in ss_voxel.split(voxel_indices):
train_voxels.append(train_index)
test_voxels.append(test_index)
for train_index, test_index in ss_tr.split(tr_indices):
train_trs.append(train_index)
test_trs.append(test_index)
for p in range(n_splits):
for idx in range(n_K):
index = p*n_K + idx
if index % n_group == group_id:
#split data and R
train_voxel_indices = train_voxels[p]
test_voxel_indices = test_voxels[p]
train_tr_indices = train_trs[p]
test_tr_indices = test_trs[p]
train_data = []
total_test_data = []
test_weight_data = []
test_recon_data = []
test_weight_R = []
test_recon_R = []
for s in range(n_local_subj):
train_data.append(data[s][:, train_tr_indices])
total_test_data.append(data[s][:, test_tr_indices])
test_weight_data.append(
total_test_data[s][train_voxel_indices, :])
test_recon_data.append(
total_test_data[s][test_voxel_indices, :])
test_weight_R.append(R[s][train_voxel_indices])
test_recon_R.append(R[s][test_voxel_indices])
htfa = HTFA(K=Ks[idx],
max_global_iter=5,
max_local_iter=2,
n_subj=n_subj,
nlss_method=nlss_method,
nlss_loss=nlss_loss,
tr_solver=tr_solver,
upper_ratio=upper_ratio,
lower_ratio=lower_ratio,
max_tr=max_sample_tr,
max_voxel=max_sample_voxel,
comm=htfa_comm,
verbose=True)
htfa.fit(train_data, R)
for s in range(n_local_subj):
#get posterior for each subject
subj_idx = mapping[str(s)]
start_idx = s * htfa.prior_size
end_idx = (s + 1) * htfa.prior_size
local_posteiror = htfa.local_posterior_[start_idx:end_idx]
local_centers = htfa.get_centers(local_posteiror)
local_widths = htfa.get_widths(local_posteiror)
htfa.n_dim = n_dim
htfa.cov_vec_size = np.sum(np.arange(htfa.n_dim) + 1)
htfa.map_offset = htfa.get_map_offset()
#training happens on all voxels, but part of TRs
unique_R_all, inds_all = htfa.get_unique_R(R[s])
train_F = htfa.get_factors(unique_R_all,
inds_all,
local_centers,
local_widths)
#calculate train_recon_err
tmp_train_recon_errs[subj_idx, p,idx] = get_train_err(htfa,
train_data[s],
train_F)
#calculate weights on test_weight_data, test_recon_err on test_recon_data
tmp_test_recon_errs[subj_idx, p,idx] = get_test_err(htfa,
test_weight_data[s],
test_recon_data[s],
test_weight_R[s],
test_recon_R[s],
local_centers,
local_widths)
comm.Reduce(tmp_test_recon_errs, test_recon_errs, op=MPI.SUM)
comm.Reduce(tmp_train_recon_errs, train_recon_errs, op=MPI.SUM)
if rank == 0:
errs = train_recon_errs.reshape(n_subj * n_splits, n_K)
mean_errs = np.average(errs, axis=0)
print("train error on each K is\n")
print(mean_errs)
errs = test_recon_errs.reshape(n_subj * n_splits, n_K)
mean_errs = np.average(errs, axis=0)
print("test error on each K is\n")
print(mean_errs)
best_idx = np.argmin(mean_errs)
print("best K for test recon is %d " % (Ks[best_idx]))
| 11,484 | 32.289855 | 98 | py |
brainiak | brainiak-master/examples/eventseg/simulated_data.py | """Example of finding event segmentations on simulated data
This code generates simulated datasets that have temporally-clustered
structure (with the same series of latent event patterns). An event
segmentation is learned on the first dataset, and then we try to find the same
series of events in other datasets. We measure how well we find the latent
boundaries and the log-likelihood of the fits, and compare to a null model
in which the event order is randomly shuffled.
"""
import brainiak.eventseg.event
import numpy as np
from scipy import stats
import logging
import matplotlib.pyplot as plt
logging.basicConfig(level=logging.DEBUG)
def generate_event_labels(T, K, length_std):
event_labels = np.zeros(T, dtype=int)
start_TR = 0
for e in range(K - 1):
length = round(
((T - start_TR) / (K - e)) * (1 + length_std * np.random.randn()))
length = min(max(length, 1), T - start_TR - (K - e))
event_labels[start_TR:(start_TR + length)] = e
start_TR = start_TR + length
event_labels[start_TR:] = K - 1
return event_labels
def generate_data(V, T, event_labels, event_means, noise_std):
simul_data = np.empty((V, T))
for t in range(T):
simul_data[:, t] = stats.multivariate_normal.rvs(
event_means[:, event_labels[t]], cov=noise_std, size=1)
simul_data = stats.zscore(simul_data, axis=1, ddof=1)
return simul_data
# Parameters for creating small simulated datasets
V = 10
K = 10
T = 500
T2 = 300
# Generate the first dataset
np.random.seed(1)
event_means = np.random.randn(V, K)
event_labels = generate_event_labels(T, K, 0.1)
simul_data = generate_data(V, T, event_labels, event_means, 1)
# Find the events in this dataset
simul_seg = brainiak.eventseg.event.EventSegment(K)
simul_seg.fit(simul_data.T)
# Generate other datasets with the same underlying sequence of event
# patterns, and try to find matching events
test_loops = 10
bound_match = np.empty((2, test_loops))
LL = np.empty((2, test_loops))
for test_i in range(test_loops):
# Generate data
event_labels2 = generate_event_labels(T2, K, 0.5)
simul_data2 = generate_data(V, T2, event_labels2, event_means, 0.1)
# Find events matching previously-learned events
gamma, LL[0, test_i] = simul_seg.find_events(simul_data2.T)
est_events2 = np.argmax(gamma, axis=1)
bound_match[0, test_i] = 1 - np.sum(abs(np.diff(event_labels2) -
np.diff(est_events2))) / (2 * K)
# Run again, but with the order of events shuffled so that it no longer
# corresponds to the training data
gamma, LL[1, test_i] = simul_seg.find_events(simul_data2.T, scramble=True)
est_events2 = np.argmax(gamma, axis=1)
bound_match[1, test_i] = 1 - np.sum(abs(np.diff(event_labels2) -
np.diff(est_events2))) / (2 * K)
# Across the testing datasets, print how well we identify the true event
# boundaries and the log-likehoods in real vs. shuffled data
print("Boundary match: {:.2} (null: {:.2})".format(
np.mean(bound_match[0, :]), np.mean(bound_match[1, :])))
print("Log-likelihood: {:.3} (null: {:.3})".format(
np.mean(LL[0, :]), np.mean(LL[1, :])))
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(simul_data2, interpolation='nearest', cmap=plt.cm.bone,
aspect='auto')
plt.xlabel('Timepoints')
plt.ylabel('Voxels')
plt.subplot(2, 1, 2)
gamma, LL[0, test_i] = simul_seg.find_events(simul_data2.T)
est_events2 = np.argmax(gamma, axis=1)
plt.plot(est_events2)
plt.xlabel('Timepoints')
plt.ylabel('Event label')
plt.show()
| 3,607 | 34.372549 | 78 | py |
brainiak | brainiak-master/examples/fcma/corr_comp.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import numpy as np
from brainiak.fcma.util import compute_correlation
from brainiak.fcma.preprocessing import generate_epochs_info
from brainiak import image, io
import scipy.io
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
# python3 corr_comp.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
if len(sys.argv) != 5:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
extension = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
images = io.load_images_from_dir(data_dir, extension)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
raw_data = list(image.multimask_images(images, (mask,)))
epoch_info = generate_epochs_info(conditions)
for idx, epoch in enumerate(epoch_info):
label = epoch[0]
sid = epoch[1]
start = epoch[2]
end = epoch[3]
mat = raw_data[sid][0][:, start:end]
mat = np.ascontiguousarray(mat, dtype=np.float32)
logger.info(
'start to compute correlation for subject %d epoch %d with label %d' %
(sid, idx, label)
)
corr = compute_correlation(mat, mat)
mdict = {}
mdict['corr'] = corr
filename = str(label) + '_' + str(sid) + '_' + str(idx)
logger.info(
'start to write the correlation matrix to disk as %s' %
filename
)
scipy.io.savemat(filename, mdict)
| 2,441 | 36 | 113 | py |
brainiak | brainiak-master/examples/fcma/classification.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.classifier import Classifier
from brainiak.fcma.preprocessing import prepare_fcma_data
from brainiak import io
from sklearn import svm
#from sklearn.linear_model import LogisticRegression
import sys
import logging
import numpy as np
from scipy.spatial.distance import hamming
from sklearn import model_selection
#from sklearn.externals import joblib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
def example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
rearranged_data = raw_data[num_epochs_per_subj:] + raw_data[0:num_epochs_per_subj]
rearranged_labels = labels[num_epochs_per_subj:] + labels[0:num_epochs_per_subj]
clf.fit(list(zip(rearranged_data, rearranged_data)), rearranged_labels,
num_training_samples=num_epochs_per_subj*(num_subjects-1))
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[0:num_epochs_per_subj]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
def example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj):
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
for i in range(num_subjects):
leave_start = i * num_epochs_per_subj
leave_end = (i+1) * num_epochs_per_subj
training_data = raw_data[0:leave_start] + raw_data[leave_end:]
test_data = raw_data[leave_start:leave_end]
training_labels = labels[0:leave_start] + labels[leave_end:]
test_labels = labels[leave_start:leave_end]
clf.fit(list(zip(training_data, training_data)), training_labels)
# joblib can be used for saving and loading models
#joblib.dump(clf, 'model/logistic.pkl')
#clf = joblib.load('model/svm.pkl')
predict = clf.predict(list(zip(test_data, test_data)))
print(predict)
print(clf.decision_function(list(zip(test_data, test_data))))
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when leaving subject %d out for testing, the accuracy is %d / %d = %.2f' %
(i, num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
print(clf.score(list(zip(test_data, test_data)), test_labels))
def example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj):
# NOTE: this method does not work for sklearn.svm.SVC with precomputed kernel
# when the kernel matrix is computed in portions; also, this method only works
# for self-correlation, i.e. correlation between the same data matrix.
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_subjects,
shuffle=False)
scores = model_selection.cross_val_score(clf, list(zip(raw_data, raw_data)),
y=labels,
cv=skf)
print(scores)
logger.info(
'the overall cross validation accuracy is %.2f' %
np.mean(scores)
)
def example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data[0:num_training_samples], raw_data2[0:num_training_samples])),
labels[0:num_training_samples])
X = list(zip(raw_data[num_training_samples:], raw_data2[num_training_samples:]))
predict = clf.predict(X)
print(predict)
print(clf.decision_function(X))
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(X, test_labels))
def example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels,
num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data, raw_data2)), labels,
num_training_samples=num_training_samples)
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
# python3 classification.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
if len(sys.argv) != 5:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
extension = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
epoch_list = np.load(epoch_file)
num_subjects = len(epoch_list)
num_epochs_per_subj = epoch_list[0].shape[1]
images = io.load_images_from_dir(data_dir, extension)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj)
# test of two different components for correlation computation
# images = io.load_images_from_dir(data_dir, extension)
# mask2 = io.load_boolean_mask('face_scene/visual_top_mask.nii.gz')
# raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
# mask2)
#example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
#example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
| 9,178 | 49.434066 | 129 | py |
brainiak | brainiak-master/examples/fcma/mvpa_voxel_selection.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.mvpa_voxelselector import MVPAVoxelSelector
from brainiak.fcma.preprocessing import prepare_searchlight_mvpa_data
from brainiak import io
from sklearn import svm
import sys
from mpi4py import MPI
import logging
import nibabel as nib
import numpy as np
from brainiak.searchlight.searchlight import Searchlight
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
"""
example running command in run_mvpa_voxel_selection.sh
"""
if __name__ == '__main__':
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'programming starts in %d process(es)' %
MPI.COMM_WORLD.Get_size()
)
if len(sys.argv) != 6:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
suffix = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
# all MPI processes read the mask; the mask file is small
mask_image = nib.load(mask_file)
mask = io.load_boolean_mask(mask_file)
data = None
labels = None
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'mask size: %d' %
np.sum(mask)
)
images = io.load_images_from_dir(data_dir, suffix=suffix)
conditions = io.load_labels(epoch_file)
data, labels = prepare_searchlight_mvpa_data(images, conditions)
# setting the random argument produces random voxel selection results
# for non-parametric statistical analysis.
# There are three random options:
# RandomType.NORANDOM is the default
# RandomType.REPRODUCIBLE permutes the voxels in the same way every run
# RandomType.UNREPRODUCIBLE permutes the voxels differently across runs
# example
#from brainiak.fcma.preprocessing import RandomType
#data, labels = prepare_searchlight_mvpa_data(images, conditions,
# random=RandomType.UNREPRODUCIBLE)
# the following line is an example to leaving a subject out
#epoch_info = [x for x in epoch_info if x[1] != 0]
num_subjs = int(sys.argv[5])
# create a Searchlight object
sl = Searchlight(sl_rad=1)
mvs = MVPAVoxelSelector(data, mask, labels, num_subjs, sl)
clf = svm.SVC(kernel='linear', shrinking=False, C=1, gamma='auto')
# only rank 0 has meaningful return values
score_volume, results = mvs.run(clf)
# this output is just for result checking
if MPI.COMM_WORLD.Get_rank()==0:
score_volume = np.nan_to_num(score_volume.astype(np.float))
io.save_as_nifti_file(score_volume, mask_image.affine,
'result_score.nii.gz')
seq_volume = np.zeros(mask.shape, dtype=np.int)
seq = np.zeros(len(results), dtype=np.int)
with open('result_list.txt', 'w') as fp:
for idx, tuple in enumerate(results):
fp.write(str(tuple[0]) + ' ' + str(tuple[1]) + '\n')
seq[tuple[0]] = idx
seq_volume[mask] = seq
io.save_as_nifti_file(seq_volume, mask_image.affine,
'result_seq.nii.gz')
| 3,996 | 39.373737 | 94 | py |
brainiak | brainiak-master/examples/fcma/generate_fcma_data.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate simulated data for FCMA example
Generate example data for FCMA analyses that uses fmrisim.
This creates two conditions, a and b, with 5 trials for each condtion and
5 participants total. Each trial is 10 TRs long with 7 TRs of stimulation.
Brains are extremely downsampled (10 voxels cubed) in order to help
processing speed.
The signal that discriminates these conditions is such that one region
responds to both conditions whereas another region responds differently to
the two conditions. For instance, imagine voxel X responds to common
activation but voxel A only responds in condition A and voxel B only
responds in condition B.
Authors: Cameron Ellis (Princeton) 2017
"""
import logging
import numpy as np
from brainiak.utils import fmrisim as sim
import nibabel
import os
logger = logging.getLogger(__name__)
# Default experimental parameters (These can all be changed (will affect
# processing time)
participants = 5 # How many participants are being created
epochs = 5 # How many trials
dimensions = np.array([10, 10, 10]) # What is the size of the brain
stim_dur = 7 # How long is each stimulation period
rest_dur = 3 # How long is the rest between stimulation
conds = 2 # How many conditions are there
fcma_better = 1 # this data is made so fcma will succeed and mvpa will fail
# Where will the data be stored?
directory = 'simulated/'
# Make the directory if it hasn't been made yet
if os.path.isdir(directory) is False:
os.mkdir(directory)
# Prepare the feature attributes
feature_size = [1]
feature_type = ['cube']
coordinates=[]
coordinates += [np.array(
[[3, 3, 3], [3, 5, 4]])]
coordinates += [np.array(
[[3, 3, 3], [6, 5, 4]])]
coordinates += [np.array(
[[3, 3, 3], [5, 6, 4]])]
signal_magnitude = [1] # How big is the signal (in SD)
# Inputs for generate_stimfunction
onsets = list(range(conds))
weights = list(range(conds))
tr_duration = 2
event_durations = [tr_duration]
trial_dur = stim_dur + rest_dur
duration = epochs * trial_dur * conds
# Create the epoch cube
epoch = np.zeros([conds, epochs * conds, int(duration / tr_duration)], np.int8,
order='C')
# Iterate through the epochs and conditions
for cond_counter in list(range(conds)):
onsets[cond_counter] = [] # Add a list for this condition
weights[cond_counter] = []
for idx in list(range(0, epochs)):
# When does each epoch start and end
start_idx = (idx * trial_dur * conds) + (trial_dur * cond_counter)
end_idx = start_idx + stim_dur
# Store these start and end times
onsets[cond_counter] += list(range(start_idx, end_idx, tr_duration))
epoch[cond_counter, idx * conds + cond_counter, start_idx:end_idx] = 1
# The pattern of activity for each trial
weight = ([1] * int(np.floor(stim_dur / 2))) + ([-1]*int(np.ceil(
stim_dur / 2)))
weights[cond_counter] += weight
# Iterate through the conditions to make the necessary functions
for cond in list(range(conds)):
# Generate a volume representing the location and quality of the signal
volume_signal = sim.generate_signal(dimensions=dimensions,
feature_coordinates=coordinates[cond],
feature_type=feature_type,
feature_size=feature_size,
signal_magnitude=signal_magnitude,
)
# Create the time course for the signal to be generated
stimfunction_cond = sim.generate_stimfunction(onsets=onsets[cond],
event_durations=
event_durations,
total_time=duration,
weights=weights[cond],
)
# Convolve the HRF with the stimulus sequence
signal_function = sim.double_gamma_hrf(stimfunction=stimfunction_cond,
tr_duration=tr_duration,
)
# Multiply the HRF timecourse with the signal
signal_cond = sim.apply_signal(signal_function=signal_function,
volume_signal=volume_signal,
)
# Concatenate all the signal and function files
if cond == 0:
stimfunction = stimfunction_cond
signal = signal_cond
else:
stimfunction = list(np.add(stimfunction, stimfunction_cond))
signal += signal_cond
# Generate the mask of the signal
mask, template = sim.mask_brain(signal)
# Mask the signal to the shape of a brain (does not attenuate signal according
# to grey matter likelihood)
signal *= mask.reshape(dimensions[0], dimensions[1], dimensions[2], 1)
# Downsample the stimulus function to generate it in TR time
stimfunction_tr = stimfunction[::int(tr_duration * 1000)]
# Iterate through the participants and store participants
epochs = []
for participantcounter in range(1, participants + 1):
# Add the epoch cube
epochs += [epoch]
# Save a file name
savename = directory + 'p' + str(participantcounter) + '.nii'
# Create the noise volumes (using the default parameters
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=stimfunction_tr,
tr_duration=tr_duration,
template=template,
mask=mask,
)
# Combine the signal and the noise
brain = signal + noise
# Save the volume
affine_matrix = np.diag([-1, 1, 1, 1]) # LR gets flipped
brain_nifti = nibabel.Nifti1Image(brain, affine_matrix)
nibabel.save(brain_nifti, savename)
# Save the epochs
np.save(directory + 'epoch_labels.npy', epochs)
# Store the mask
brain_nifti = nibabel.Nifti1Image(mask, affine_matrix)
nibabel.save(brain_nifti, directory + 'mask.nii')
| 6,688 | 35.752747 | 79 | py |
brainiak | brainiak-master/examples/fcma/mvpa_classification.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import svm
#from sklearn.linear_model import LogisticRegression
import sys
import logging
from brainiak.fcma.preprocessing import prepare_mvpa_data
from brainiak import io
import numpy as np
from scipy.spatial.distance import hamming
from sklearn import model_selection
#from sklearn.externals import joblib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
# python3 mvpa_classification.py face_scene bet.nii.gz face_scene/visual_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
if len(sys.argv) != 5:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
extension = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
epoch_list = np.load(epoch_file)
num_subjects = len(epoch_list)
num_epochs_per_subj = epoch_list[0].shape[1]
logger.info(
'doing MVPA training and classification on %d subjects, each of which has %d epochs' %
(num_subjects, num_epochs_per_subj)
)
images = io.load_images_from_dir(data_dir, extension)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
processed_data, labels = prepare_mvpa_data(images, conditions, mask)
# transpose data to facilitate training and prediction
processed_data = processed_data.T
clf = svm.SVC(kernel='linear', shrinking=False, C=1, gamma='auto')
# doing leave-one-subject-out cross validation
for i in range(num_subjects):
leave_start = i * num_epochs_per_subj
leave_end = (i+1) * num_epochs_per_subj
training_data = np.concatenate((processed_data[0:leave_start], processed_data[leave_end:]), axis=0)
test_data = processed_data[leave_start:leave_end]
training_labels = np.concatenate((labels[0:leave_start], labels[leave_end:]), axis=0)
test_labels = labels[leave_start:leave_end]
clf.fit(training_data, training_labels)
# joblib can be used for saving and loading models
#joblib.dump(clf, 'model/logistic.pkl')
#clf = joblib.load('model/svm.pkl')
predict = clf.predict(test_data)
print(predict)
print(clf.decision_function(test_data))
print(np.asanyarray(test_labels))
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when leaving subject %d out for testing, the accuracy is %d / %d = %.2f' %
(i, num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# use model selection
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_subjects,
shuffle=False)
scores = model_selection.cross_val_score(clf, processed_data,
y=labels,
cv=skf)
print(scores)
logger.info(
'the overall cross validation accuracy is %.2f' %
np.mean(scores)
)
logger.info('MVPA training and classification done')
| 4,017 | 40.42268 | 119 | py |
brainiak | brainiak-master/examples/fcma/voxel_selection.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.voxelselector import VoxelSelector
from brainiak.fcma.preprocessing import prepare_fcma_data
from brainiak import io
from sklearn import svm
import sys
from mpi4py import MPI
import logging
import numpy as np
import nibabel as nib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
"""
example running command in run_voxel_selection.sh
"""
if __name__ == '__main__':
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'programming starts in %d process(es)' %
MPI.COMM_WORLD.Get_size()
)
if len(sys.argv) != 7:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
suffix = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
images = io.load_images_from_dir(data_dir, suffix=suffix)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
# setting the random argument produces random voxel selection results
# for non-parametric statistical analysis.
# There are three random options:
# RandomType.NORANDOM is the default
# RandomType.REPRODUCIBLE permutes the voxels in the same way every run
# RandomType.UNREPRODUCIBLE permutes the voxels differently across runs
# example:
# from brainiak.fcma.preprocessing import RandomType
# raw_data, _, labels = prepare_fcma_data(images, conditions, mask,
# random=RandomType.REPRODUCIBLE)
# if providing two masks, just append the second mask as the last input argument
# and specify raw_data2
# example:
# images = io.load_images_from_dir(data_dir, extension)
# mask2 = io.load_boolean_mask('face_scene/mask.nii.gz')
# raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
# mask2)
epochs_per_subj = int(sys.argv[5])
num_subjs = int(sys.argv[6])
# the following line is an example to leaving a subject out
#vs = VoxelSelector(labels[0:204], epochs_per_subj, num_subjs-1, raw_data[0:204])
# if using all subjects
vs = VoxelSelector(labels, epochs_per_subj, num_subjs, raw_data)
# if providing two masks, just append raw_data2 as the last input argument
#vs = VoxelSelector(labels, epochs_per_subj, num_subjs, raw_data, raw_data2=raw_data2)
# for cross validation, use SVM with precomputed kernel
clf = svm.SVC(kernel='precomputed', shrinking=False, C=10, gamma='auto')
results = vs.run(clf)
# this output is just for result checking
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'correlation-based voxel selection is done'
)
#print(results[0:100])
mask_img = nib.load(mask_file)
mask = mask_img.get_data().astype(np.bool)
score_volume = np.zeros(mask.shape, dtype=np.float32)
score = np.zeros(len(results), dtype=np.float32)
seq_volume = np.zeros(mask.shape, dtype=np.int)
seq = np.zeros(len(results), dtype=np.int)
with open('result_list.txt', 'w') as fp:
for idx, tuple in enumerate(results):
fp.write(str(tuple[0]) + ' ' + str(tuple[1]) + '\n')
score[tuple[0]] = tuple[1]
seq[tuple[0]] = idx
score_volume[mask] = score
seq_volume[mask] = seq
io.save_as_nifti_file(score_volume, mask_img.affine,
'result_score.nii.gz')
io.save_as_nifti_file(seq_volume, mask_img.affine,
'result_seq.nii.gz')
| 4,518 | 42.038095 | 90 | py |
brainiak | brainiak-master/examples/funcalign/searchlight_srm_example.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed Searchlight SRM Example.
This example runs searchlight srm on time segment matching experiment using Sherlock
dataset.
Example Usage
-------
If run 4 ranks:
$ mpirun -n 4 python3 searchlight_srm_example.py
Author
-------
Hejia Zhang (Princeton University ELE Department)
Notes
-------
It's an implementation of:
Zhang, Hejia, et al. "A Searchlight Factor Model Approach for Locating Shared
Information in Multi-Subject fMRI Analysis." arXiv preprint arXiv:1609.09432 (2016).
https://arxiv.org/abs/1609.09432
"""
import numpy as np
from mpi4py import MPI
import sys
import scipy.io as sio
from scipy.stats import stats
from brainiak.searchlight.searchlight import Searchlight
from brainiak.funcalign.srm import SRM
from brainiak.fcma.util import compute_correlation
import warnings
# parameters
sl_rad = 1 #searchlight length (of each edge) will be 1+2*sl_rad
nfeature = 10 #number of features in SRM for each searchlight
niter = 10 #number of interations in SRM
# sanity check
if sl_rad <= 0:
raise ValueError('sl_rad must be positive')
if nfeature > (1+2*sl_rad)**3:
print ('nfeature truncated')
nfeature = int((1+2*sl_rad)**3)
# MPI parameters, do not need to change
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# load data
movie_file = sio.loadmat('data/sl_movie_data.mat')
movie_data = movie_file['data']
# Dataset size parameters
dim1,dim2,dim3,ntr,nsubj = movie_data.shape
# preprocess data, zscore and set NaN to 0
all_data = [] # first half train, second half test
for s in range(nsubj):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# To ignore "RuntimeWarning: invalid value encountered in true_divide"
# There are some 0 voxels in the data which I have to keep, so there will be a warning
# when z-scoring the data. It should be safe to ignore this warning. If your data does
# not contain 0 voxel, you can remove the 2 lines above
train_tmp = np.nan_to_num(stats.zscore(movie_data[:,:,:,:int(ntr/2),s],axis=3,ddof=1))
test_tmp = np.nan_to_num(stats.zscore(movie_data[:,:,:,int(ntr/2):,s],axis=3,ddof=1))
all_data.append(np.concatenate((train_tmp,test_tmp),axis=3))
# print information
if rank == 0:
print ('searchlight length is {}'.format(sl_rad))
print ('number of features in SRM: {}'.format(nfeature))
print ('number of subjects is: {}'.format(len(all_data)))
print ('number of TR is: {}'.format(ntr))
print ('brain data dimension is {}-by-{}-by-{}'.format(dim1,dim2,dim3))
# Generate mask: mask is a 3D binary array, with active voxels being 1. I simply set
# all voxels to be active in this example, but you should set the mask to fit your ROI
# in practice.
mask = np.ones((dim1,dim2,dim3), dtype=np.bool)
# Create searchlight object
sl = Searchlight(sl_rad=sl_rad)
# Distribute data to processes
# the first argument of "distribute" is a list of 4D arrays, and each 4D array is data
# from a single subject
sl.distribute(all_data, mask)
# broadcast something that should be shared by all ranks
sl.broadcast([niter,nfeature])
# time segment matching experiment. Define your own experiment function here
def time_segment_matching_accuracy(data, win_size=6):
nsubjs = len(data)
(ndim, nsample) = data[0].shape
accu = np.zeros(shape=nsubjs)
nseg = nsample - win_size
# mysseg prediction prediction
trn_data = np.zeros((ndim*win_size, nseg),order='f')
# the trn data also include the tst data, but will be subtracted when
# calculating A
for m in range(nsubjs):
for w in range(win_size):
trn_data[w*ndim:(w+1)*ndim,:] += data[m][:,w:(w+nseg)]
for tst_subj in range(nsubjs):
tst_data = np.zeros((ndim*win_size, nseg),order='f')
for w in range(win_size):
tst_data[w*ndim:(w+1)*ndim,:] = data[tst_subj][:,w:(w+nseg)]
A = np.nan_to_num(stats.zscore((trn_data - tst_data),axis=0, ddof=1))
B = np.nan_to_num(stats.zscore(tst_data,axis=0, ddof=1))
# compute correlation matrix
corr_mtx = compute_correlation(B.T,A.T)
for i in range(nseg):
for j in range(nseg):
if abs(i-j)<win_size and i != j :
corr_mtx[i,j] = -np.inf
max_idx = np.argmax(corr_mtx, axis=1)
accu[tst_subj] = sum(max_idx == range(nseg)) / float(nseg)
return accu
# Define voxel function: The function to be applied on each searchlight
def sfn(l, msk, myrad, bcast_var):
# Arguments:
# l -- a list of 4D arrays, containing data from a single searchlight
# msk -- a 3D binary array, mask of this searchlight
# myrad -- an integer, sl_rad
# bcast_var -- whatever is broadcasted
# extract training and testing data
train_data = []
test_data = []
d1,d2,d3,ntr = l[0].shape
nvx = d1*d2*d3
for s in l:
train_data.append(np.reshape(s[:,:,:,:int(ntr/2)],(nvx,int(ntr/2))))
test_data.append(np.reshape(s[:,:,:,int(ntr/2):],(nvx,ntr-int(ntr/2))))
# train an srm model
srm = SRM(bcast_var[0],bcast_var[1])
srm.fit(train_data)
# transform test data
shared_data = srm.transform(test_data)
for s in range(len(l)):
shared_data[s] = np.nan_to_num(stats.zscore(shared_data[s],axis=1,ddof=1))
# run experiment
accu = time_segment_matching_accuracy(shared_data)
# return: can also return several values. In that case, the final output will be
# a 3D array of tuples
return np.mean(accu)
# Run searchlight
acc = sl.run_searchlight(sfn) # output is a 3D array in shape (dim1,dim2,dim3)
# save result
if rank == 0:
print (acc)
np.savez_compressed('data/searchlight_srm_tsm_acc.npz',acc=acc)
| 6,342 | 33.851648 | 95 | py |
brainiak | brainiak-master/examples/funcalign/sssrm_image_prediction_example.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.io
from scipy.stats import stats
import numpy as np
# Define the Theano flags to use cpu and float64 before theano is imported in brainiak
import os
os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64'
import brainiak.funcalign.sssrm
# Load the input data that contains the movie stimuli for unsupervised training with SS-SRM
movie_file = scipy.io.loadmat('data/movie_data.mat')
movie_data_left = movie_file['movie_data_lh']
movie_data_right = movie_file['movie_data_rh']
subjects = movie_data_left.shape[2]
# Load the input data that contains the image stimuli and its labels for training a classifier
image_file = scipy.io.loadmat('data/image_data.mat')
image_data_left = image_file['image_data_lh']
image_data_right = image_file['image_data_rh']
# Merge the two hemispheres into one piece of data and
# convert data to a list of arrays matching SS-SRM input.
# Each element is a matrix of voxels by TRs_i.
image_data = []
movie_data = []
for s in range(subjects):
image_data.append(np.concatenate([image_data_left[:, :, s], image_data_right[:, :, s]], axis=0))
movie_data.append(np.concatenate([movie_data_left[:, :, s], movie_data_right[:, :, s]], axis=0))
# Read the labels of the image data for training the classifier.
labels = scipy.io.loadmat('data/label.mat')
labels = np.squeeze(labels['label'])
image_samples = labels.size
# Z-score the data
for subject in range(subjects):
image_data[subject] = stats.zscore(image_data[subject], axis=1, ddof=1)
movie_data[subject] = stats.zscore(movie_data[subject], axis=1, ddof=1)
# Run cross validation on the blocks of image stimuli (leave one block out)
# Note: There are 8 blocks of 7 samples (TRs) each
print("Running cross-validation with SS-SRM... (this may take a while)")
accuracy = np.zeros((8,))
for block in range(8):
print("Block ", block)
# Create masks with the train and validation samples
idx_validation = np.zeros((image_samples,), dtype=bool)
idx_validation[block*7:(block+1)*7] = True
idx_train = np.ones((image_samples,), dtype=bool)
idx_train[block*7:(block+1)*7] = False
# Divide the samples and labels in train and validation sets
image_data_train = [None] * subjects
labels_train = [None] * subjects
image_data_validation = [None] * subjects
labels_validation = [None] * subjects
for s in range(subjects):
image_data_train[s] = image_data[s][:, idx_train]
labels_train[s] = labels[idx_train]
image_data_validation[s] = image_data[s][:, idx_validation]
labels_validation[s] = labels[idx_validation]
# Run SS-SRM with the movie data and training image data
model = brainiak.funcalign.sssrm.SSSRM(n_iter=10, features=50, gamma=1.0, alpha=0.2)
model.fit(movie_data, labels_train, image_data_train)
# Predict on the validation samples and check results
prediction = model.predict(image_data_validation)
predicted = 0
total_predicted = 0
for s in range(subjects):
predicted += sum(prediction[s] == labels_validation[s])
total_predicted += prediction[s].size
accuracy[block] = predicted/total_predicted
print("Accuracy for this block: ",accuracy[block])
print("SS-SRM: The average accuracy among all subjects is {0:f} +/- {1:f}".format(np.mean(accuracy), np.std(accuracy)))
| 3,919 | 39.833333 | 119 | py |
brainiak | brainiak-master/examples/funcalign/srm_image_prediction_example.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.io
from scipy.stats import stats
from sklearn.metrics import confusion_matrix
from sklearn.svm import NuSVC
import numpy as np
import brainiak.funcalign.srm
# Load the input data that contains the movie stimuli for unsupervised training with SRM
movie_file = scipy.io.loadmat('data/movie_data.mat')
movie_data_left = movie_file['movie_data_lh']
movie_data_right = movie_file['movie_data_rh']
subjects = movie_data_left.shape[2]
# Convert data to a list of arrays matching SRM input.
# Each element is a matrix of voxels by TRs.
# Also, concatenate data from both hemispheres in the brain.
movie_data = []
for s in range(subjects):
movie_data.append(np.concatenate([movie_data_left[:, :, s], movie_data_right[:, :, s]], axis=0))
# Z-score the data
for subject in range(subjects):
movie_data[subject] = stats.zscore(movie_data[subject], axis=1, ddof=1)
# Run SRM with the movie data
help(brainiak.funcalign.srm.SRM)
srm = brainiak.funcalign.srm.SRM(n_iter=10, features=50)
srm.fit(movie_data)
# We define a function to present the output of the experiment.
def plot_confusion_matrix(cm, title="Confusion Matrix"):
"""Plots a confusion matrix for each subject
"""
import matplotlib.pyplot as plt
import math
plt.figure()
subjects = len(cm)
root_subjects = math.sqrt(subjects)
cols = math.ceil(root_subjects)
rows = math.ceil(subjects/cols)
classes = cm[0].shape[0]
for subject in range(subjects):
plt.subplot(rows, cols, subject+1)
plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone)
plt.xticks(np.arange(classes), range(1, classes+1))
plt.yticks(np.arange(classes), range(1, classes+1))
cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6)
cbar.set_clim(0.0, 1.0)
plt.xlabel("Predicted")
plt.ylabel("True label")
plt.title("{0:d}".format(subject + 1))
plt.suptitle(title)
plt.tight_layout()
plt.show()
# Load the input data that contains the image stimuli and its labels for training a classifier
image_file = scipy.io.loadmat('data/image_data.mat')
image_data_left = image_file['image_data_lh']
image_data_right = image_file['image_data_rh']
# Convert data to a list of arrays matching SRM input.
# Each element is a matrix of voxels by TRs.
# Also, concatenate data from both hemispheres in the brain.
image_data = []
for s in range(subjects):
image_data.append(np.concatenate([image_data_left[:, :, s], image_data_right[:, :, s]], axis=0))
assert image_data[0].shape[0] == movie_data[0].shape[0], "Number of voxels in movie data and image data do not match!"
# Z-score the image data
for subject in range(subjects):
image_data[subject] = stats.zscore(image_data[subject], axis=1, ddof=1)
# Z-score the shared response data
image_data_shared = srm.transform(image_data)
for subject in range(subjects):
image_data_shared[subject] = stats.zscore(image_data_shared[subject], axis=1, ddof=1)
# Read the labels of the image data for training the classifier.
labels = scipy.io.loadmat('data/label.mat')
labels = np.squeeze(labels['label'])
# Run a leave-one-out cross validation with the subjects
train_labels = np.tile(labels, subjects-1)
test_labels = labels
accuracy = np.zeros((subjects,))
cm = [None] * subjects
for subject in range(subjects):
# Concatenate the subjects' data for training into one matrix
train_subjects = list(range(subjects))
train_subjects.remove(subject)
TRs = image_data_shared[0].shape[1]
train_data = np.zeros((image_data_shared[0].shape[0], len(train_labels)))
for train_subject in range(len(train_subjects)):
start_index = train_subject*TRs
end_index = start_index+TRs
train_data[:, start_index:end_index] = image_data_shared[train_subjects[train_subject]]
# Train a Nu-SVM classifier using scikit learn
classifier = NuSVC(nu=0.5, kernel='linear', gamma='auto')
classifier = classifier.fit(train_data.T, train_labels)
# Predict on the test data
predicted_labels = classifier.predict(image_data_shared[subject].T)
accuracy[subject] = sum(predicted_labels == test_labels)/float(len(predicted_labels))
# Create a confusion matrix to see the accuracy of each class
cm[subject] = confusion_matrix(test_labels, predicted_labels)
# Normalize the confusion matrix
cm[subject] = cm[subject].astype('float') / cm[subject].sum(axis=1)[:, np.newaxis]
# Plot and print the results
plot_confusion_matrix(cm, title="Confusion matrices for different test subjects with Probabilistic SRM")
print("SRM: The average accuracy among all subjects is {0:f} +/- {1:f}".format(np.mean(accuracy), np.std(accuracy)))
# Now we repeat the experiment with the Deterministic SRM
# Fit the model
srm = brainiak.funcalign.srm.DetSRM(n_iter=10, features=50)
srm.fit(movie_data)
# Transform the image stimuli data
image_data_shared = srm.transform(image_data)
for subject in range(subjects):
image_data_shared[subject] = stats.zscore(image_data_shared[subject], axis=1, ddof=1)
# Run a leave-one-out cross validation with the subjects
accuracy = np.zeros((subjects,))
cm = [None] * subjects
for subject in range(subjects):
# Concatenate the subjects' data for training into one matrix
train_subjects = list(range(subjects))
train_subjects.remove(subject)
TRs = image_data_shared[0].shape[1]
train_data = np.zeros((image_data_shared[0].shape[0], len(train_labels)))
for train_subject in range(len(train_subjects)):
start_index = train_subject*TRs
end_index = start_index+TRs
train_data[:, start_index:end_index] = image_data_shared[train_subjects[train_subject]]
# Train a Nu-SVM classifier using scikit learn
classifier = NuSVC(nu=0.5, kernel='linear', gamma='auto')
classifier = classifier.fit(train_data.T, train_labels)
# Predict on the test data
predicted_labels = classifier.predict(image_data_shared[subject].T)
accuracy[subject] = sum(predicted_labels == test_labels)/float(len(predicted_labels))
# Create a confusion matrix to see the accuracy of each class
cm[subject] = confusion_matrix(test_labels, predicted_labels)
# Normalize the confusion matrix
cm[subject] = cm[subject].astype('float') / cm[subject].sum(axis=1)[:, np.newaxis]
# Plot and print the results
plot_confusion_matrix(cm, title="Confusion matrices for different test subjects with Deterministic SRM")
print("Det. SRM: The average accuracy among all subjects is {0:f} +/- {1:f}".format(np.mean(accuracy), np.std(accuracy)))
| 7,161 | 40.639535 | 121 | py |
brainiak | brainiak-master/examples/funcalign/srm_image_prediction_example_distributed.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.io
from scipy.stats import stats
from sklearn.metrics import confusion_matrix
from sklearn.svm import NuSVC
import numpy as np
import brainiak.funcalign.srm
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# Load the input data that contains the movie stimuli for unsupervised training with SRM
movie_file = scipy.io.loadmat('data/movie_data.mat')
movie_data_left = movie_file['movie_data_lh']
movie_data_right = movie_file['movie_data_rh']
subjects = movie_data_left.shape[2]
# Convert data to a list of arrays matching SRM input.
# Each element is a matrix of voxels by TRs.
# Also, concatenate data from both hemispheres in the brain.
movie_data = []
for s in range(subjects):
if s % size == rank:
movie_data.append(np.concatenate([movie_data_left[:, :, s], movie_data_right[:, :, s]], axis=0))
else:
movie_data.append(None)
# Z-score the data
for subject in range(subjects):
if movie_data[subject] is not None:
movie_data[subject] = stats.zscore(movie_data[subject], axis=1, ddof=1)
# Run SRM with the movie data
srm = brainiak.funcalign.srm.SRM(n_iter=10, features=50, comm=comm)
srm.fit(movie_data)
# We define a function to present the output of the experiment.
def plot_confusion_matrix(cm, title="Confusion Matrix"):
"""Plots a confusion matrix for each subject
"""
import matplotlib.pyplot as plt
import math
plt.figure()
subjects = len(cm)
root_subjects = math.sqrt(subjects)
cols = math.ceil(root_subjects)
rows = math.ceil(subjects/cols)
classes = cm[0].shape[0]
for subject in range(subjects):
plt.subplot(rows, cols, subject+1)
plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone)
plt.xticks(np.arange(classes), range(1, classes+1))
plt.yticks(np.arange(classes), range(1, classes+1))
cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6)
cbar.set_clim(0.0, 1.0)
plt.xlabel("Predicted")
plt.ylabel("True label")
plt.title("{0:d}".format(subject + 1))
plt.suptitle(title)
plt.tight_layout()
plt.show()
# Load the input data that contains the image stimuli and its labels for training a classifier
image_file = scipy.io.loadmat('data/image_data.mat')
image_data_left = image_file['image_data_lh']
image_data_right = image_file['image_data_rh']
# Convert data to a list of arrays matching SRM input.
# Each element is a matrix of voxels by TRs.
# Also, concatenate data from both hemispheres in the brain.
image_data = []
for s in range(subjects):
if s % size == rank:
image_data.append(np.concatenate([image_data_left[:, :, s], image_data_right[:, :, s]], axis=0))
else:
image_data.append(None)
# Z-score the image data
for subject in range(subjects):
if subject % size == rank:
image_data[subject] = stats.zscore(image_data[subject], axis=1, ddof=1)
else:
image_data[subject] = None
# Z-score the shared response data
image_data_shared = srm.transform(image_data)
for subject in range(subjects):
image_data_shared[subject] = comm.bcast(image_data_shared[subject],
root=(subject % size))
if rank == 0:
for subject in range(subjects):
image_data_shared[subject] = stats.zscore(image_data_shared[subject], axis=1, ddof=1)
# Read the labels of the image data for training the classifier.
labels = scipy.io.loadmat('data/label.mat')
labels = np.squeeze(labels['label'])
# Run a leave-one-out cross validation with the subjects
train_labels = np.tile(labels, subjects-1)
test_labels = labels
accuracy = np.zeros((subjects,))
cm = [None] * subjects
for subject in range(subjects):
# Concatenate the subjects' data for training into one matrix
train_subjects = list(range(subjects))
train_subjects.remove(subject)
TRs = image_data_shared[0].shape[1]
train_data = np.zeros((image_data_shared[0].shape[0], len(train_labels)))
for train_subject in range(len(train_subjects)):
start_index = train_subject*TRs
end_index = start_index+TRs
train_data[:, start_index:end_index] = image_data_shared[train_subjects[train_subject]]
# Train a Nu-SVM classifier using scikit learn
classifier = NuSVC(nu=0.5, kernel='linear', gamma='auto')
classifier = classifier.fit(train_data.T, train_labels)
# Predict on the test data
predicted_labels = classifier.predict(image_data_shared[subject].T)
accuracy[subject] = sum(predicted_labels == test_labels)/float(len(predicted_labels))
# Create a confusion matrix to see the accuracy of each class
cm[subject] = confusion_matrix(test_labels, predicted_labels)
# Normalize the confusion matrix
cm[subject] = cm[subject].astype('float') / cm[subject].sum(axis=1)[:, np.newaxis]
# Plot and print the results
plot_confusion_matrix(cm, title="Confusion matrices for different test subjects with Probabilistic SRM")
print("SRM: The average accuracy among all subjects is {0:f} +/- {1:f}".format(np.mean(accuracy), np.std(accuracy)))
| 5,821 | 38.337838 | 120 | py |
brainiak | brainiak-master/examples/searchlight/example_searchlight.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mpi4py import MPI
import sys
from brainiak.searchlight.searchlight import Searchlight
from brainiak.searchlight.searchlight import Diamond
"""Distributed Searchlight Example
example usage: mpirun -n 4 python3 example_searchlight.py
"""
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# Dataset size parameters
dim = 40
ntr = 400
maskrad = 15
# Predictive point parameters
pt = (23,23,23)
kernel_dim = 5
weight = 1
# Generate data
data = np.random.random((dim,dim,dim,ntr)) if rank == 0 else None
mask = np.zeros((dim,dim,dim), dtype=np.bool)
for i in range(dim):
for j in range(dim):
for k in range(dim):
dist = np.sqrt(((dim/2)-i)**2 + ((dim/2)-j)**2 + ((dim/2)-k)**2)
if(dist < maskrad):
mask[i,j,k] = 1
# Generate labels
labels = np.random.choice([True, False], (ntr,)) if rank == 0 else None
# Inject predictive region in random data
if rank == 0:
kernel = np.zeros((kernel_dim,kernel_dim,kernel_dim))
for i in range(kernel_dim):
for j in range(kernel_dim):
for k in range(kernel_dim):
arr = np.array([i-(kernel_dim/2),j-(kernel_dim/2),k-(kernel_dim/2)])
kernel [i,j,k] = np.exp(-np.dot(arr.T,arr))
kernel = kernel / np.sum(kernel)
for (idx, l) in enumerate(labels):
if l:
data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] += kernel * weight
else:
data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] -= kernel * weight
# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5, shape=Diamond,
min_active_voxels_proportion=0)
# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)
# Define voxel function
def sfn(l, msk, myrad, bcast_var):
import sklearn.svm
import sklearn.model_selection
classifier = sklearn.svm.SVC(gamma='auto')
data = l[0][msk,:].T
return np.mean(sklearn.model_selection.cross_val_score(classifier, data, bcast_var,n_jobs=1))
# Run searchlight
global_outputs = sl.run_searchlight(sfn)
# Visualize result
if rank == 0:
print(global_outputs)
global_outputs = np.array(global_outputs, dtype=np.float)
import matplotlib.pyplot as plt
for (cnt, img) in enumerate(global_outputs):
plt.imshow(img,cmap='hot',vmin=0,vmax=1)
plt.savefig('img' + str(cnt) + '.png')
plt.clf()
| 2,954 | 27.970588 | 103 | py |
brainiak | brainiak-master/examples/searchlight/genre_searchlight_example.py | # The following code is designed to perform a searchlight at every voxel in the brain looking at the difference in pattern similarity between musical genres (i.e. classical and jazz). In the study where the data was obtained, subjects were required to listen to a set of 16 songs twice (two runs) in an fMRI scanner. The 16 songs consisted of 8 jazz songs and 8 classical songs. The goal of this searchlight is to find voxels that seem to represent distinct information about these different musical genres. Presumably, these voxels would be found in the auditory cortex which happens to be the most organized system in the brain for processing sound information.
import numpy as np
import time
from mpi4py import MPI
from nilearn.image import load_img
import sys
from brainiak.searchlight.searchlight import Searchlight
from scipy import stats
from scipy.sparse import random
import os
# MPI variables
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# Generate random data
if rank == 0:
np.random.seed(0)
data1_rand = np.random.rand(91,109,91,16)
data2_rand = np.random.rand(91,109,91,16)
classical = np.random.rand(2600)
jazz = np.random.rand(2600)
d1_reshape = np.reshape(data1_rand,(91*109*91,16))
d2_reshape = np.reshape(data2_rand,(91*109*91,16))
a1 = load_img('a1plus_2mm.nii.gz')
a1_vec = np.reshape(a1.get_data(),(91*109*91))
a1_idx = np.nonzero(a1_vec)
for i in range(8):
d1_reshape[a1_idx[0],i] += classical
d1_reshape[a1_idx[0],i+8] += jazz
d2_reshape[a1_idx[0],i] += classical
d2_reshape[a1_idx[0],i+8] += jazz
data1 = np.reshape(d1_reshape,(91,109,91,16))
data2 = np.reshape(d2_reshape,(91,109,91,16))
# Flatten data, then zscore data, then reshape data back into MNI coordinate space
data1 = stats.zscore(np.reshape(data1,(91*109*91,16)))
data1 = np.reshape(data1,(91,109,91,16))
data2 = stats.zscore(np.reshape(data2,(91*109*91,16)))
data2 = np.reshape(data2,(91,109,91,16))
else:
data1 = None
data2 = None
# Load mask
mask_img = load_img('MNI152_T1_2mm_brain_mask.nii')
mask_img = mask_img.get_data()
# Definte function that takes the difference between within vs. between genre comparisons
def corr2_coeff(AB,msk,myrad,bcast_var):
if not np.all(msk):
return None
A,B = (AB[0], AB[1])
A = A.reshape((-1,A.shape[-1]))
B = B.reshape((-1,B.shape[-1]))
corrAB = np.corrcoef(A.T,B.T)[16:,:16]
classical_within = np.mean(corrAB[0:8,0:8])
jazz_within = np.mean(corrAB[8:16,8:16])
classJazz_between = np.mean(corrAB[8:16,0:8])
jazzClass_between = np.mean(corrAB[0:8,8:16])
within_genre = np.mean([classical_within,jazz_within])
between_genre = np.mean([classJazz_between,jazzClass_between])
diff = within_genre - between_genre
return diff
comm.Barrier()
begin_time = time.time()
comm.Barrier()
# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)
comm.Barrier()
end_time = time.time()
comm.Barrier()
# Plot searchlight results
if rank == 0:
print('Searchlight Done: ', end_time - begin_time)
maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
global_outputs = np.array(global_outputs, dtype=np.float)
print(global_outputs)
# Save searchlight images
out_dir = "searchlight_images"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
import matplotlib.pyplot as plt
for (cnt, img) in enumerate(global_outputs):
plt.imshow(img,vmin=minval,vmax=maxval)
plt.colorbar()
plt.savefig('searchlight_images/' + 'img' + str(cnt) + '.png')
plt.clf()
| 3,829 | 36.54902 | 664 | py |
brainiak | brainiak-master/examples/hyperparamopt/hpo_example.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example for using hyperparameter optimization (hpo) package.
In this example, we will try to optimize a function of
2 variables (branin) using both hpo and grid search.
"""
import brainiak.hyperparamopt.hpo as hpo
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
# Branin is the function we want to minimize.
# It is a function of 2 variables.
# In the range x1 in [-5, 10] and x2 in [0, 15],
# this function has 2 local minima and 1 global minima.
# Global minima of -16.6 at (-3.7, 13.7).
# This is the modified version (Branin-Hoo) of the standard branin function.
# If you want the standard version (which has 3 global minima),
# you can omit the "+5*x1" term at the end
# For more details, see http://www.sfu.ca/~ssurjano/branin.html
def branin(x1, x2):
a = 1.0
b = 5.1/(4*np.pi*np.pi)
c = 5.0/np.pi
r = 6.0
s = 10.0
t = 1.0/(8*np.pi)
return a*((x2 - b*x1*x1 + c*x1 - r)**2) + s*(1-t)*np.cos(x1) + s + 5*x1
# This is a wrapper around branin that takes in a dictionary
def branin_wrapper(args):
x1 = args['x1']
x2 = args['x2']
return branin(x1,x2)
# Define ranges for the two variables
x1lo = -5
x1hi = 10
x2lo = 0
x2hi = 15
##############################
# Optimization through hpo
##############################
# Define a space for hpo to use
# The space needs to define
# 1. Name of the variables
# 2. Default samplers for the variables (use scipy.stats objects)
# 3. lo and hi ranges for the variables (will use -inf, inf if not specified)
space = {'x1':{'dist': st.uniform(x1lo, x1hi-x1lo), 'lo':x1lo, 'hi':x1hi},
'x2':{'dist': st.uniform(x2lo, x2hi-x2lo), 'lo':x2lo, 'hi':x2hi}}
# The trials object is just a list that stores the samples generated and the
# corresponding function values at those sample points.
trials = []
# Maximum number of samples that will be generated.
# This is the maximum number of function evaluations that will be performed.
n_hpo_samples = 100
# Call the fmin function that does the optimization.
# The function to be optimized should take in a dictionary. You will probably
# need to wrap your function to do this (see branin() and branin_wrapper()).
# You can pass in a non-empty trials object as well e.g. from a previous
# fmin run. We just append to the trials object and will use existing data
# in our optimization.
print("Starting optimization through hpo")
best = hpo.fmin(loss_fn=branin_wrapper, space=space,
max_evals=n_hpo_samples, trials=trials)
# Print out the best value obtained through HPO
print("Best obtained through HPO (", n_hpo_samples, " samples) = ",
best['x1'], best['x2'], "; min value = ", best['loss'])
#####################################
# Optimization through grid search
#####################################
# Divide the space into a uniform grid (meshgrid)
n = 200
x1 = np.linspace(x1lo, x1hi, n)
x2 = np.linspace(x2lo, x2hi, n)
x1_grid, x2_grid = np.meshgrid(x1, x2)
# Calculate the function values along the grid
print("Starting optimization through grid search")
z = branin(x1_grid, x2_grid)
# Print out the best value obtained through grid search
print("Best obtained through grid search (", n*n, " samples) = ",
x1_grid.flatten()[z.argmin()], x2_grid.flatten()[z.argmin()],
"; min value = ", z.min())
########
# Plots
########
# Convert trials object data into numpy arrays
x1 = np.array([tr['x1'] for tr in trials])
x2 = np.array([tr['x2'] for tr in trials])
y = np.array([tr['loss'] for tr in trials])
# Plot the function contour using the grid search data
h = (z.max()-z.min())/25
plt.contour(x1_grid, x2_grid, z, levels=np.linspace(z.min()-h, z.max(), 26))
# Mark the points that were sampled through HPO
plt.scatter(x1, x2, s=10, color='r', label='HPO Samples')
# Mark the best points obtained through both methods
plt.scatter(best['x1'], best['x2'], s=30, color='b', label='Best HPO')
plt.scatter(x1_grid.flatten()[z.argmin()], x2_grid.flatten()[z.argmin()],
s=30, color='g', label='Best grid search')
# Labels
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('Hyperparameter optimization using HPO (Branin function)')
plt.legend()
plt.show()
| 4,770 | 33.572464 | 77 | py |
brainiak | brainiak-master/tests/conftest.py | import multiprocessing
from mpi4py import MPI
import pytest
import numpy
import random
import tensorflow
def pytest_configure(config):
config.option.xmlpath = "junit-{}.xml".format(MPI.COMM_WORLD.Get_rank())
@pytest.fixture
def seeded_rng():
random.seed(0)
numpy.random.seed(0)
tensorflow.random.set_seed(0)
skip_non_fork = pytest.mark.skipif(
multiprocessing.get_start_method() != "fork"
and MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is not None,
reason="MPI only works with multiprocessing fork start method.",
)
| 544 | 19.961538 | 76 | py |
brainiak | brainiak-master/tests/image/test_image.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Sequence
import numpy as np
import pytest
from nibabel.nifti1 import Nifti1Pair
from nibabel.spatialimages import SpatialImage
from brainiak.image import (mask_image, mask_images, MaskedMultiSubjectData,
multimask_images, SingleConditionSpec)
@pytest.fixture
def masked_multi_subject_data(masked_images):
return np.stack(masked_images, axis=-1)
class TestMaskedMultiSubjectData:
def test_from_masked_images(self, masked_images,
masked_multi_subject_data):
result = MaskedMultiSubjectData.from_masked_images(masked_images,
len(masked_images))
assert np.array_equal(np.moveaxis(result, 1, 0),
masked_multi_subject_data)
@pytest.fixture
def condition_spec() -> SingleConditionSpec:
return np.array([[[1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0]]],
dtype=np.int8).view(SingleConditionSpec)
class TestUniqueLabelConditionSpec:
def test_extract_labels(self, condition_spec: SingleConditionSpec
) -> None:
assert np.array_equal(condition_spec.extract_labels(),
np.array([0, 1]))
@pytest.fixture
def spatial_image() -> SpatialImage:
return Nifti1Pair(np.array([[[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]]).reshape(4, 4, 4, 1),
np.eye(4))
@pytest.fixture
def mask() -> np.ndarray:
return np.array([[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]], dtype=np.bool)
@pytest.fixture
def masked_data() -> np.ndarray:
return np.array([[1, 0, 0, 1, 0, 1, 1, 0]]).reshape(8, 1)
@pytest.fixture
def images(spatial_image: SpatialImage) -> Iterable[SpatialImage]:
images = [spatial_image]
image_data = spatial_image.get_data().copy()
image_data[1, 1, 1, 0] = 2
images.append(Nifti1Pair(image_data, np.eye(4)))
return images
@pytest.fixture
def masks(mask: np.ndarray) -> Sequence[np.ndarray]:
masks = [mask]
mask2 = mask.copy()
mask2[0, 0, 0] = 1
masks.append(mask2)
mask3 = mask.copy()
mask3[2, 2, 2] = 0
masks.append(mask3)
return masks
@pytest.fixture
def multimasked_images(masked_data) -> Iterable[Iterable[np.ndarray]]:
masked_data_2 = np.concatenate(([[2]], masked_data[1:, :]))
return [[masked_data, np.concatenate(([[0]], masked_data)),
masked_data[:-1, :]],
[masked_data_2, np.concatenate(([[0]], masked_data_2)),
masked_data_2[:-1, :]]]
@pytest.fixture
def masked_images(multimasked_images) -> Iterable[np.ndarray]:
return [multimasked_image[0] for multimasked_image in multimasked_images]
def test_mask_image(spatial_image: SpatialImage, mask: np.ndarray,
masked_data: np.ndarray) -> None:
result = mask_image(spatial_image, mask)
assert np.array_equal(result, masked_data)
def test_mask_image_with_type(spatial_image: SpatialImage, mask: np.ndarray,
masked_data: np.ndarray) -> None:
masked_data_type = np.float32
result = mask_image(spatial_image, mask, masked_data_type)
assert result.dtype == masked_data_type
assert np.allclose(result, masked_data)
def test_multimask_images(
images: Iterable[SpatialImage],
masks: Sequence[np.ndarray],
multimasked_images: Iterable[Iterable[np.ndarray]]
) -> None:
result = multimask_images(images, masks)
for result_images, expected_images in zip(result,
multimasked_images):
for result_image, expected_image in zip(result_images,
expected_images):
assert np.array_equal(result_image, expected_image)
def test_mask_images(
images: Iterable[SpatialImage],
mask: np.ndarray,
masked_images: Iterable[np.ndarray]
) -> None:
result = mask_images(images, mask)
for result_image, expected_image in zip(result, masked_images):
assert np.array_equal(result_image, expected_image)
| 6,093 | 34.225434 | 78 | py |
brainiak | brainiak-master/tests/io/test_io.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Iterable, Sequence
import nibabel as nib
import numpy as np
import pytest
from brainiak import io
@pytest.fixture
def in_dir() -> Path:
return Path(__file__).parent / "data"
@pytest.fixture
def expected_image_data_shape() -> Sequence[int]:
return (64, 64, 26, 10)
@pytest.fixture
def mask_path(in_dir: Path) -> Path:
return in_dir / "mask.nii.gz"
@pytest.fixture
def labels_path(in_dir: Path) -> Path:
return in_dir / "epoch_labels.npy"
@pytest.fixture
def expected_condition_spec_shape() -> Sequence[int]:
return (2, 2, 10)
@pytest.fixture
def expected_n_subjects() -> int:
return 2
@pytest.fixture
def image_paths(in_dir: Path) -> Iterable[Path]:
return (in_dir / "subject1_bet.nii.gz", in_dir / "subject2_bet.nii.gz")
def test_load_images_from_dir_data_shape(
in_dir: Path,
expected_image_data_shape: Sequence[int],
expected_n_subjects: int
) -> None:
for i, image in enumerate(io.load_images_from_dir(in_dir, "bet.nii.gz")):
assert image.get_data().shape == (64, 64, 26, 10)
assert i + 1 == expected_n_subjects
def test_load_images_data_shape(
image_paths: Iterable[Path],
expected_image_data_shape: Sequence[int],
expected_n_subjects: int
) -> None:
for i, image in enumerate(io.load_images(image_paths)):
assert image.get_data().shape == (64, 64, 26, 10)
assert i + 1 == expected_n_subjects
def test_load_boolean_mask(mask_path: Path) -> None:
mask = io.load_boolean_mask(mask_path)
assert mask.dtype == np.bool
def test_load_boolean_mask_predicate(mask_path: Path) -> None:
mask = io.load_boolean_mask(mask_path, lambda x: np.logical_not(x))
expected_mask = np.logical_not(io.load_boolean_mask(mask_path))
assert np.array_equal(mask, expected_mask)
def test_load_labels(labels_path: Path,
expected_condition_spec_shape: Sequence[int],
expected_n_subjects: int) -> None:
condition_specs = io.load_labels(labels_path)
i = 0
for condition_spec in condition_specs:
assert condition_spec.shape == expected_condition_spec_shape
i += 1
assert i == expected_n_subjects
def test_save_as_nifti_file(tmpdir) -> None:
out_file = str(tmpdir / "nifti.nii")
shape = (4, 4, 4)
io.save_as_nifti_file(np.ones(shape), np.eye(4), out_file)
assert nib.load(out_file).get_data().shape == shape
| 3,078 | 27.775701 | 77 | py |
brainiak | brainiak-master/tests/isc/test_isc.py | import numpy as np
import logging
import pytest
from brainiak.isc import (isc, isfc, bootstrap_isc, permutation_isc,
squareform_isfc, timeshift_isc,
phaseshift_isc)
from scipy.spatial.distance import squareform
logger = logging.getLogger(__name__)
# Create simple simulated data with high intersubject correlation
def simulated_timeseries(n_subjects, n_TRs, n_voxels=30,
noise=1, data_type='array',
random_state=None):
prng = np.random.RandomState(random_state)
if n_voxels:
signal = prng.randn(n_TRs, n_voxels)
prng = np.random.RandomState(prng.randint(0, 2**32 - 1))
data = [signal + prng.randn(n_TRs, n_voxels) * noise
for subject in np.arange(n_subjects)]
elif not n_voxels:
signal = prng.randn(n_TRs)
prng = np.random.RandomState(prng.randint(0, 2**32 - 1))
data = [signal + prng.randn(n_TRs) * noise
for subject in np.arange(n_subjects)]
if data_type == 'array':
if n_voxels:
data = np.dstack(data)
elif not n_voxels:
data = np.column_stack(data)
return data
# Create 3 voxel simulated data with correlated time series
def correlated_timeseries(n_subjects, n_TRs, noise=0,
random_state=None):
prng = np.random.RandomState(random_state)
signal = prng.randn(n_TRs)
correlated = True
while correlated:
uncorrelated = np.random.randn(n_TRs,
n_subjects)[:, np.newaxis, :]
unc_max = np.amax(squareform(np.corrcoef(
uncorrelated[:, 0, :].T), checks=False))
unc_mean = np.mean(squareform(np.corrcoef(
uncorrelated[:, 0, :].T), checks=False))
if unc_max < .25 and np.abs(unc_mean) < .001:
correlated = False
data = np.repeat(np.column_stack((signal, signal))[..., np.newaxis],
n_subjects, axis=2)
data = np.concatenate((data, uncorrelated), axis=1)
data = data + np.random.randn(n_TRs, 3, n_subjects) * noise
return data
# Compute ISCs using different input types
# List of subjects with one voxel/ROI
def test_isc_input():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC inputs")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=None, data_type='list',
random_state=random_state)
iscs_list = isc(data, pairwise=False, summary_statistic=None)
# Array of subjects with one voxel/ROI
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=None, data_type='array',
random_state=random_state)
iscs_array = isc(data, pairwise=False, summary_statistic=None)
# Check they're the same
assert np.array_equal(iscs_list, iscs_array)
# List of subjects with multiple voxels/ROIs
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='list',
random_state=random_state)
iscs_list = isc(data, pairwise=False, summary_statistic=None)
# Array of subjects with multiple voxels/ROIs
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs_array = isc(data, pairwise=False, summary_statistic=None)
# Check they're the same
assert np.array_equal(iscs_list, iscs_array)
logger.info("Finished testing ISC inputs")
# Check pairwise and leave-one-out, and summary statistics for ISC
def test_isc_options():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs_loo = isc(data, pairwise=False, summary_statistic=None)
assert iscs_loo.shape == (n_subjects, n_voxels)
# Just two subjects
iscs_loo = isc(data[..., :2], pairwise=False, summary_statistic=None)
assert iscs_loo.shape == (n_voxels,)
iscs_pw = isc(data, pairwise=True, summary_statistic=None)
assert iscs_pw.shape == (n_subjects*(n_subjects-1)/2, n_voxels)
# Check summary statistics
isc_mean = isc(data, pairwise=False, summary_statistic='mean')
assert isc_mean.shape == (n_voxels,)
isc_median = isc(data, pairwise=False, summary_statistic='median')
assert isc_median.shape == (n_voxels,)
with pytest.raises(ValueError):
isc(data, pairwise=False, summary_statistic='min')
logger.info("Finished testing ISC options")
# Make sure ISC recovers correlations of 1 and less than 1
def test_isc_output():
logger.info("Testing ISC outputs")
data = correlated_timeseries(20, 60, noise=0,
random_state=42)
iscs = isc(data, pairwise=False)
assert np.allclose(iscs[:, :2], 1., rtol=1e-05)
assert np.all(iscs[:, -1] < 1.)
iscs = isc(data, pairwise=True)
assert np.allclose(iscs[:, :2], 1., rtol=1e-05)
assert np.all(iscs[:, -1] < 1.)
logger.info("Finished testing ISC outputs")
# Check for proper handling of NaNs in ISC
def test_isc_nans():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Inject NaNs into data
data[0, 0, 0] = np.nan
# Don't tolerate NaNs, should lose zeroeth voxel
iscs_loo = isc(data, pairwise=False, tolerate_nans=False)
assert np.sum(np.isnan(iscs_loo)) == n_subjects
# Tolerate all NaNs, only subject with NaNs yields NaN
iscs_loo = isc(data, pairwise=False, tolerate_nans=True)
assert np.sum(np.isnan(iscs_loo)) == 1
# Pairwise approach shouldn't care
iscs_pw_T = isc(data, pairwise=True, tolerate_nans=True)
iscs_pw_F = isc(data, pairwise=True, tolerate_nans=False)
assert np.allclose(iscs_pw_T, iscs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(iscs_pw_T)) ==
np.sum(np.isnan(iscs_pw_F)) ==
n_subjects - 1)
# Set proportion of nans to reject (70% and 90% non-NaN)
data[0, 0, :] = np.nan
data[0, 1, :n_subjects - int(n_subjects * .7)] = np.nan
data[0, 2, :n_subjects - int(n_subjects * .9)] = np.nan
iscs_loo_T = isc(data, pairwise=False, tolerate_nans=True)
iscs_loo_F = isc(data, pairwise=False, tolerate_nans=False)
iscs_loo_95 = isc(data, pairwise=False, tolerate_nans=.95)
iscs_loo_90 = isc(data, pairwise=False, tolerate_nans=.90)
iscs_loo_80 = isc(data, pairwise=False, tolerate_nans=.8)
iscs_loo_70 = isc(data, pairwise=False, tolerate_nans=.7)
iscs_loo_60 = isc(data, pairwise=False, tolerate_nans=.6)
assert (np.sum(np.isnan(iscs_loo_F)) ==
np.sum(np.isnan(iscs_loo_95)) == 60)
assert (np.sum(np.isnan(iscs_loo_80)) ==
np.sum(np.isnan(iscs_loo_90)) == 42)
assert (np.sum(np.isnan(iscs_loo_T)) ==
np.sum(np.isnan(iscs_loo_60)) ==
np.sum(np.isnan(iscs_loo_70)) == 28)
assert np.array_equal(np.sum(np.isnan(iscs_loo_F), axis=0),
np.sum(np.isnan(iscs_loo_95), axis=0))
assert np.array_equal(np.sum(np.isnan(iscs_loo_80), axis=0),
np.sum(np.isnan(iscs_loo_90), axis=0))
assert np.all((np.array_equal(
np.sum(np.isnan(iscs_loo_T), axis=0),
np.sum(np.isnan(iscs_loo_60), axis=0)),
np.array_equal(
np.sum(np.isnan(iscs_loo_T), axis=0),
np.sum(np.isnan(iscs_loo_70), axis=0)),
np.array_equal(
np.sum(np.isnan(iscs_loo_60), axis=0),
np.sum(np.isnan(iscs_loo_70), axis=0))))
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Make sure voxel with NaNs across all subjects is always removed
data[0, 0, :] = np.nan
iscs_loo_T = isc(data, pairwise=False, tolerate_nans=True)
iscs_loo_F = isc(data, pairwise=False, tolerate_nans=False)
assert np.allclose(iscs_loo_T, iscs_loo_F, equal_nan=True)
assert (np.sum(np.isnan(iscs_loo_T)) ==
np.sum(np.isnan(iscs_loo_F)) ==
n_subjects)
iscs_pw_T = isc(data, pairwise=True, tolerate_nans=True)
iscs_pw_F = isc(data, pairwise=True, tolerate_nans=False)
assert np.allclose(iscs_pw_T, iscs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(iscs_pw_T)) ==
np.sum(np.isnan(iscs_pw_F)) ==
n_subjects * (n_subjects - 1) / 2)
# Test one-sample bootstrap test
def test_bootstrap_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
n_bootstraps = 10
logger.info("Testing bootstrap hypothesis test")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs = isc(data, pairwise=False, summary_statistic=None)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=False,
summary_statistic='median',
n_bootstraps=n_bootstraps,
ci_percentile=95)
assert distribution.shape == (n_bootstraps, n_voxels)
# Test one-sample bootstrap test with pairwise approach
n_bootstraps = 10
iscs = isc(data, pairwise=True, summary_statistic=None)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=True,
summary_statistic='median',
n_bootstraps=n_bootstraps,
ci_percentile=95)
assert distribution.shape == (n_bootstraps, n_voxels)
# Check random seeds
iscs = isc(data, pairwise=False, summary_statistic=None)
distributions = []
for random_state in [42, 42, None]:
observed, ci, p, distribution = bootstrap_isc(
iscs, pairwise=False,
summary_statistic='median',
n_bootstraps=n_bootstraps,
ci_percentile=95,
random_state=random_state)
distributions.append(distribution)
assert np.array_equal(distributions[0], distributions[1])
assert not np.array_equal(distributions[1], distributions[2])
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
print(p)
iscs = isc(data, pairwise=True)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and bootstrap observed are same
iscs = isc(data, pairwise=False)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=False,
summary_statistic='median')
assert np.array_equal(observed, isc(data, pairwise=False,
summary_statistic='median'))
# Check that ISC computation and bootstrap observed are same
iscs = isc(data, pairwise=True)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=True,
summary_statistic='median')
assert np.array_equal(observed, isc(data, pairwise=True,
summary_statistic='median'))
logger.info("Finished testing bootstrap hypothesis test")
# Test permutation test with group assignments
def test_permutation_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
group_assignment = [1] * 10 + [2] * 10
logger.info("Testing permutation test")
# Create dataset with two groups in pairwise approach
data = np.dstack((simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3),
simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=5, data_type='array',
random_state=4)))
iscs = isc(data, pairwise=True, summary_statistic=None)
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=True,
summary_statistic='mean',
n_permutations=200)
# Create data with two groups in leave-one-out approach
data_1 = simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3)
data_2 = simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=10, data_type='array',
random_state=4)
iscs = np.vstack((isc(data_1, pairwise=False, summary_statistic=None),
isc(data_2, pairwise=False, summary_statistic=None)))
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=False,
summary_statistic='mean',
n_permutations=200)
# One-sample leave-one-out permutation test
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs = isc(data, pairwise=False, summary_statistic=None)
observed, p, distribution = permutation_isc(iscs,
pairwise=False,
summary_statistic='median',
n_permutations=200)
# One-sample pairwise permutation test
iscs = isc(data, pairwise=True, summary_statistic=None)
observed, p, distribution = permutation_isc(iscs,
pairwise=True,
summary_statistic='median',
n_permutations=200)
# Small one-sample pairwise exact test
data = simulated_timeseries(12, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs = isc(data, pairwise=False, summary_statistic=None)
observed, p, distribution = permutation_isc(iscs, pairwise=False,
summary_statistic='median',
n_permutations=10000)
# Small two-sample pairwise exact test (and unequal groups)
data = np.dstack((simulated_timeseries(3, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3),
simulated_timeseries(4, n_TRs, n_voxels=n_voxels,
noise=50, data_type='array',
random_state=4)))
iscs = isc(data, pairwise=True, summary_statistic=None)
group_assignment = [1, 1, 1, 2, 2, 2, 2]
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=True,
summary_statistic='mean',
n_permutations=10000)
# Small two-sample leave-one-out exact test (and unequal groups)
data_1 = simulated_timeseries(3, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3)
data_2 = simulated_timeseries(4, n_TRs, n_voxels=n_voxels,
noise=50, data_type='array',
random_state=4)
iscs = np.vstack((isc(data_1, pairwise=False, summary_statistic=None),
isc(data_2, pairwise=False, summary_statistic=None)))
group_assignment = [1, 1, 1, 2, 2, 2, 2]
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=False,
summary_statistic='mean',
n_permutations=10000)
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, p, distribution = permutation_isc(iscs, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, p, distribution = permutation_isc(iscs, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and permutation observed are same
iscs = isc(data, pairwise=False)
observed, p, distribution = permutation_isc(iscs, pairwise=False,
summary_statistic='median')
assert np.allclose(observed, isc(data, pairwise=False,
summary_statistic='median'),
rtol=1e-03)
# Check that ISC computation and permuation observed are same
iscs = isc(data, pairwise=True)
observed, p, distribution = permutation_isc(iscs, pairwise=True,
summary_statistic='mean')
assert np.allclose(observed, isc(data, pairwise=True,
summary_statistic='mean'),
rtol=1e-03)
logger.info("Finished testing permutaton test")
def test_timeshift_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
logger.info("Testing circular time-shift")
# Circular time-shift on one sample, leave-one-out
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = timeshift_isc(data, pairwise=False,
summary_statistic='median',
n_shifts=200)
# Circular time-shift on one sample, pairwise
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = timeshift_isc(data, pairwise=True,
summary_statistic='median',
n_shifts=200)
# Circular time-shift on one sample, leave-one-out
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = timeshift_isc(data, pairwise=False,
summary_statistic='mean',
n_shifts=200)
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, p, distribution = timeshift_isc(data, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, p, distribution = timeshift_isc(data, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and permutation observed are same
iscs = isc(data, pairwise=False)
observed, p, distribution = timeshift_isc(data, pairwise=False,
summary_statistic='median')
assert np.allclose(observed, isc(data, pairwise=False,
summary_statistic='median'),
rtol=1e-03)
# Check that ISC computation and permuation observed are same
iscs = isc(data, pairwise=True)
observed, p, distribution = timeshift_isc(data, pairwise=True,
summary_statistic='mean')
assert np.allclose(observed, isc(data, pairwise=True,
summary_statistic='mean'),
rtol=1e-03)
logger.info("Finished testing circular time-shift")
# Phase randomization test
def test_phaseshift_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
logger.info("Testing phase randomization")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = phaseshift_isc(data, pairwise=True,
summary_statistic='median',
n_shifts=200)
# Phase randomization one-sample test, leave-one-out
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = phaseshift_isc(data, pairwise=False,
summary_statistic='mean',
n_shifts=200)
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, p, distribution = phaseshift_isc(data, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, p, distribution = phaseshift_isc(data, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and permutation observed are same
iscs = isc(data, pairwise=False)
observed, p, distribution = phaseshift_isc(data, pairwise=False,
summary_statistic='median')
assert np.allclose(observed, isc(data, pairwise=False,
summary_statistic='median'),
rtol=1e-03)
# Check that ISC computation and permuation observed are same
iscs = isc(data, pairwise=True)
observed, p, distribution = phaseshift_isc(data, pairwise=True,
summary_statistic='mean')
assert np.allclose(observed, isc(data, pairwise=True,
summary_statistic='mean'),
rtol=1e-03)
logger.info("Finished testing phase randomization")
# Test ISFC
def test_isfc_options():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
logger.info("Testing ISFC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
isfcs, iscs = isfc(data, pairwise=False, summary_statistic=None)
assert isfcs.shape == (n_subjects, n_voxels * (n_voxels - 1) / 2)
assert iscs.shape == (n_subjects, n_voxels)
# Without vectorized upper triangle
isfcs = isfc(data, pairwise=False, summary_statistic=None,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_voxels)
# Just two subjects
isfcs, iscs = isfc(data[..., :2], pairwise=False, summary_statistic=None)
assert isfcs.shape == (n_voxels * (n_voxels - 1) / 2,)
assert iscs.shape == (n_voxels,)
isfcs = isfc(data[..., :2], pairwise=False, summary_statistic=None,
vectorize_isfcs=False)
assert isfcs.shape == (n_voxels, n_voxels)
# ISFC with pairwise approach
isfcs, iscs = isfc(data, pairwise=True, summary_statistic=None)
assert isfcs.shape == (n_subjects * (n_subjects - 1) / 2,
n_voxels * (n_voxels - 1) / 2)
assert iscs.shape == (n_subjects * (n_subjects - 1) / 2,
n_voxels)
isfcs = isfc(data, pairwise=True, summary_statistic=None,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects * (n_subjects - 1) / 2,
n_voxels, n_voxels)
# ISFC with summary statistics
isfcs, iscs = isfc(data, pairwise=True, summary_statistic='mean')
isfcs, iscs = isfc(data, pairwise=True, summary_statistic='median')
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
isfcs = isfc(data, pairwise=False, vectorize_isfcs=False)
assert np.all(isfcs[:, 0, 1] > .5) and np.all(isfcs[:, 1, 0] > .5)
assert np.all(isfcs[:, :2, 2] < .5) and np.all(isfcs[:, 2, :2] < .5)
isfcs = isfc(data, pairwise=True, vectorize_isfcs=False)
assert np.all(isfcs[:, 0, 1] > .5) and np.all(isfcs[:, 1, 0] > .5)
assert np.all(isfcs[:, :2, 2] < .5) and np.all(isfcs[:, 2, :2] < .5)
# Check that ISC and ISFC diagonal are identical
iscs = isc(data, pairwise=False)
isfcs = isfc(data, pairwise=False, vectorize_isfcs=False)
for s in np.arange(len(iscs)):
assert np.allclose(isfcs[s, ...].diagonal(), iscs[s, :], rtol=1e-03)
isfcs, iscs_v = isfc(data, pairwise=False)
assert np.allclose(iscs, iscs_v, rtol=1e-03)
# Check that ISC and ISFC diagonal are identical (pairwise)
iscs = isc(data, pairwise=True)
isfcs = isfc(data, pairwise=True, vectorize_isfcs=False)
for s in np.arange(len(iscs)):
assert np.allclose(isfcs[s, ...].diagonal(), iscs[s, :], rtol=1e-03)
isfcs, iscs_v = isfc(data, pairwise=True)
assert np.allclose(iscs, iscs_v, rtol=1e-03)
# Generate 'targets' data and use for ISFC
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
n_targets = 15
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='array')
isfcs = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
# Ensure 'square' output enforced
isfcs = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=True)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
# Check list input for targets
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='list')
isfcs = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
# Check that mismatching subjects / TRs breaks targets
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='array')
with pytest.raises(ValueError):
isfcs = isfc(data, targets=targets_data[..., :-1],
pairwise=False, vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
with pytest.raises(ValueError):
isfcs = isfc(data, targets=targets_data[:-1, ...],
pairwise=False, vectorize_isfcs=False)
# Check targets for only 2 subjects
isfcs = isfc(data[..., :2], targets=targets_data[..., :2],
pairwise=False, summary_statistic=None)
assert isfcs.shape == (2, n_voxels, n_targets)
isfcs = isfc(data[..., :2], targets=targets_data[..., :2],
pairwise=True, summary_statistic=None)
assert isfcs.shape == (2, n_voxels, n_targets)
# Check that supplying targets enforces leave-one-out
isfcs_pw = isfc(data, targets=targets_data, pairwise=True,
vectorize_isfcs=False, tolerate_nans=False)
assert isfcs_pw.shape == (n_subjects, n_voxels, n_targets)
logger.info("Finished testing ISFC options")
# Check for proper handling of NaNs in ISFC
def test_isfc_nans():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Inject NaNs into data
data[0, 0, 0] = np.nan
# Don't tolerate NaNs, should lose zeroeth voxel
isfcs_loo = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=False)
assert np.sum(np.isnan(isfcs_loo)) == n_subjects * (n_voxels * 2 - 1)
# With vectorized ISFCs
isfcs_loo, iscs_loo = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=False)
assert np.sum(np.isnan(isfcs_loo)) == n_subjects * (n_voxels - 1)
# Tolerate all NaNs, only subject with NaNs yields NaN
isfcs_loo = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=True)
assert np.sum(np.isnan(isfcs_loo)) == n_voxels * 2 - 1
isfcs_loo, iscs_loo = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=True)
assert np.sum(np.isnan(isfcs_loo)) == n_voxels - 1
# Pairwise approach shouldn't care
isfcs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_pw_F = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=False)
assert np.allclose(isfcs_pw_T, isfcs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_pw_T)) ==
np.sum(np.isnan(isfcs_pw_F)) ==
(n_voxels * 2 - 1) * (n_subjects - 1))
isfcs_pw_T, iscs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=True,
tolerate_nans=True)
isfcs_pw_F, iscs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=True,
tolerate_nans=False)
assert np.allclose(isfcs_pw_T, isfcs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_pw_T)) ==
np.sum(np.isnan(isfcs_pw_F)) ==
(n_voxels - 1) * (n_subjects - 1))
# Set proportion of nans to reject (70% and 90% non-NaN)
data[0, 0, :] = np.nan
data[0, 1, :n_subjects - int(n_subjects * .7)] = np.nan
data[0, 2, :n_subjects - int(n_subjects * .9)] = np.nan
isfcs_loo_T = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_loo_F = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=False)
isfcs_loo_95 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.95)
isfcs_loo_90 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.90)
isfcs_loo_80 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.8)
isfcs_loo_70 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.7)
isfcs_loo_60 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.6)
assert (np.sum(np.isnan(isfcs_loo_F)) ==
np.sum(np.isnan(isfcs_loo_95)) == 3420)
assert (np.sum(np.isnan(isfcs_loo_80)) ==
np.sum(np.isnan(isfcs_loo_90)) == 2430)
assert (np.sum(np.isnan(isfcs_loo_T)) ==
np.sum(np.isnan(isfcs_loo_60)) ==
np.sum(np.isnan(isfcs_loo_70)) == 1632)
assert np.array_equal(np.sum(np.isnan(isfcs_loo_F), axis=0),
np.sum(np.isnan(isfcs_loo_95), axis=0))
assert np.array_equal(np.sum(np.isnan(isfcs_loo_80), axis=0),
np.sum(np.isnan(isfcs_loo_90), axis=0))
assert np.all((np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_60), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_60), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0))))
isfcs_loo_T, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=True)
isfcs_loo_F, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=False)
isfcs_loo_95, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.95)
isfcs_loo_90, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.90)
isfcs_loo_80, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.8)
isfcs_loo_70, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.7)
isfcs_loo_60, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.6)
assert (np.sum(np.isnan(isfcs_loo_F)) ==
np.sum(np.isnan(isfcs_loo_95)) == 1680)
assert (np.sum(np.isnan(isfcs_loo_80)) ==
np.sum(np.isnan(isfcs_loo_90)) == 1194)
assert (np.sum(np.isnan(isfcs_loo_T)) ==
np.sum(np.isnan(isfcs_loo_60)) ==
np.sum(np.isnan(isfcs_loo_70)) == 802)
assert np.array_equal(np.sum(np.isnan(isfcs_loo_F), axis=0),
np.sum(np.isnan(isfcs_loo_95), axis=0))
assert np.array_equal(np.sum(np.isnan(isfcs_loo_80), axis=0),
np.sum(np.isnan(isfcs_loo_90), axis=0))
assert np.all((np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_60), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_60), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0))))
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Make sure voxel with NaNs across all subjects is always removed
data[0, 0, :] = np.nan
isfcs_loo_T = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_loo_F = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=False)
assert np.allclose(isfcs_loo_T, isfcs_loo_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_loo_T)) ==
np.sum(np.isnan(isfcs_loo_F)) ==
1180)
isfcs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_pw_F = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=False)
assert np.allclose(isfcs_pw_T, isfcs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_pw_T)) ==
np.sum(np.isnan(isfcs_pw_T)) ==
11210)
# Check for NaN-handling in targets
n_targets = 15
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='array')
# Inject NaNs into targets_data
targets_data[0, 0, 0] = np.nan
# Don't tolerate NaNs, should lose zeroeth voxel
isfcs_loo = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False, tolerate_nans=False)
assert np.sum(np.isnan(isfcs_loo)) == (n_subjects - 1) * (n_targets * 2)
# Single NaN in targets will get averaged out with tolerate
isfcs_loo = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False, tolerate_nans=True)
assert np.sum(np.isnan(isfcs_loo)) == 0
def test_squareform_isfc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Generate square redundant ISFCs
isfcs_r = isfc(data, vectorize_isfcs=False)
assert isfcs_r.shape == (n_subjects, n_voxels, n_voxels)
# Squareform these into condensed ISFCs and ISCs
isfcs_c, iscs_c = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_subjects, n_voxels * (n_voxels - 1) / 2)
assert iscs_c.shape == (n_subjects, n_voxels)
# Go back the other way and check it's the same
isfcs_new = squareform_isfc(isfcs_c, iscs_c)
assert np.array_equal(isfcs_r, isfcs_new)
# Check against ISC function
assert np.allclose(isc(data), iscs_c, rtol=1e-03)
# Check for two subjects
isfcs_r = isfc(data[..., :2], vectorize_isfcs=False)
assert isfcs_r.shape == (n_voxels, n_voxels)
isfcs_c, iscs_c = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_voxels * (n_voxels - 1) / 2,)
assert iscs_c.shape == (n_voxels,)
assert np.array_equal(isfcs_r, squareform_isfc(isfcs_c, iscs_c))
if __name__ == '__main__':
test_isc_input()
test_isc_options()
test_isc_output()
test_isc_nans()
test_bootstrap_isc()
test_permutation_isc()
test_timeshift_isc()
test_phaseshift_isc()
test_isfc_options()
test_isfc_nans()
test_squareform_isfc()
logger.info("Finished all ISC tests")
| 40,279 | 41.045929 | 78 | py |
brainiak | brainiak-master/tests/matnormal/test_matnormal_logp.py | import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import multivariate_normal
import tensorflow as tf
from brainiak.matnormal.utils import rmn
from brainiak.matnormal.matnormal_likelihoods import matnorm_logp
from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky
# X is m x n, so A sould be m x p
m = 5
n = 4
p = 3
rtol = 1e-7
def test_against_scipy_mvn_row(seeded_rng):
rowcov = CovUnconstrainedCholesky(size=m)
colcov = CovIdentity(size=n)
X = rmn(np.eye(m), np.eye(n))
X_tf = tf.constant(X, "float64")
rowcov_np = rowcov._cov
scipy_answer = np.sum(multivariate_normal.logpdf(
X.T, np.zeros([m]), rowcov_np))
tf_answer = matnorm_logp(X_tf, rowcov, colcov)
assert_allclose(scipy_answer, tf_answer, rtol=rtol)
def test_against_scipy_mvn_col(seeded_rng):
rowcov = CovIdentity(size=m)
colcov = CovUnconstrainedCholesky(size=n)
X = rmn(np.eye(m), np.eye(n))
X_tf = tf.constant(X, "float64")
colcov_np = colcov._cov
scipy_answer = np.sum(multivariate_normal.logpdf(
X, np.zeros([n]), colcov_np))
tf_answer = matnorm_logp(X_tf, rowcov, colcov)
assert_allclose(scipy_answer, tf_answer, rtol=rtol)
| 1,232 | 25.234043 | 73 | py |
brainiak | brainiak-master/tests/matnormal/test_cov.py | import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import norm, wishart, invgamma, invwishart
import tensorflow as tf
from brainiak.matnormal.covs import (
CovIdentity,
CovAR1,
CovIsotropic,
CovDiagonal,
CovDiagonalGammaPrior,
CovUnconstrainedCholesky,
CovUnconstrainedCholeskyWishartReg,
CovUnconstrainedInvCholesky,
CovKroneckerFactored,
)
# X is m x n, so A sould be m x p
m = 8
n = 4
p = 3
rtol = 1e-7
atol = 1e-7
def logdet_sinv_np(X, sigma):
# logdet
sign, logdet = np.linalg.slogdet(sigma)
logdet_np = sign * logdet
# sigma-inv
sinv_np = np.linalg.inv(sigma)
# solve
sinvx_np = np.linalg.solve(sigma, X)
return logdet_np, sinv_np, sinvx_np
def logdet_sinv_np_mask(X, sigma, mask):
mask_indices = np.nonzero(mask)[0]
# logdet
_, logdet_np = np.linalg.slogdet(sigma[np.ix_(mask_indices, mask_indices)])
# sigma-inv
sinv_np_ = np.linalg.inv(sigma[np.ix_(mask_indices, mask_indices)])
# sigma-inverse *
sinvx_np_ = sinv_np_.dot(X[mask_indices, :])
sinv_np = np.zeros_like(sigma)
sinv_np[np.ix_(mask_indices, mask_indices)] = sinv_np_
sinvx_np = np.zeros_like(X)
sinvx_np[mask_indices, :] = sinvx_np_
return logdet_np, sinv_np, sinvx_np
X = norm.rvs(size=(m, n))
X_tf = tf.constant(X)
A = norm.rvs(size=(m, p))
A_tf = tf.constant(A)
eye = tf.eye(m, dtype=tf.float64)
def test_CovConstant(seeded_rng):
cov_np = wishart.rvs(df=m + 2, scale=np.eye(m))
cov = CovUnconstrainedCholesky(Sigma=cov_np)
# verify what we pass is what we get
cov_tf = cov._cov
assert_allclose(cov_tf, cov_np)
# compute the naive version
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_CovIdentity(seeded_rng):
cov = CovIdentity(size=m)
# compute the naive version
cov_np = np.eye(m)
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_CovIsotropic(seeded_rng):
cov = CovIsotropic(size=m)
# compute the naive version
cov_np = cov._cov * np.eye(cov.size)
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
# test initialization
cov = CovIsotropic(var=0.123, size=3)
assert_allclose(np.exp(cov.log_var.numpy()), 0.123)
def test_CovDiagonal(seeded_rng):
cov = CovDiagonal(size=m)
# compute the naive version
cov_np = cov._cov
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_CovDiagonal_initialized(seeded_rng):
cov_np = np.diag(np.exp(np.random.normal(size=m)))
cov = CovDiagonal(size=m, diag_var=np.diag(cov_np))
# compute the naive version
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_CovDiagonalGammaPrior(seeded_rng):
cov_np = np.diag(np.exp(np.random.normal(size=m)))
cov = CovDiagonalGammaPrior(size=m, sigma=np.diag(cov_np), alpha=1.5,
beta=1e-10)
ig = invgamma(1.5, scale=1e-10)
# compute the naive version
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
penalty_np = np.sum(ig.logpdf(1 / np.diag(cov_np)))
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
assert_allclose(penalty_np, cov.logp, rtol=rtol)
def test_CovUnconstrainedCholesky(seeded_rng):
cov = CovUnconstrainedCholesky(size=m)
L = cov.L.numpy()
cov_np = L @ L.T
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_CovUnconstrainedCholeskyWishartReg(seeded_rng):
cov = CovUnconstrainedCholeskyWishartReg(size=m)
L = cov.L.numpy()
cov_np = L @ L.T
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
# now compute the regularizer
reg = wishart.logpdf(cov_np, df=m + 2, scale=1e10 * np.eye(m))
assert_allclose(reg, cov.logp, rtol=rtol)
def test_CovUnconstrainedInvCholesky(seeded_rng):
init = invwishart.rvs(scale=np.eye(m), df=m + 2)
cov = CovUnconstrainedInvCholesky(invSigma=init)
Linv = cov.Linv
L = np.linalg.inv(Linv)
cov_np = L @ L.T
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_Cov2FactorKron(seeded_rng):
assert m % 2 == 0
dim1 = int(m / 2)
dim2 = 2
with pytest.raises(TypeError) as excinfo:
cov = CovKroneckerFactored(sizes=dim1)
assert "sizes is not a list" in str(excinfo.value)
cov = CovKroneckerFactored(sizes=[dim1, dim2])
L1 = (cov.L[0]).numpy()
L2 = (cov.L[1]).numpy()
cov_np = np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose()))
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_Cov3FactorKron(seeded_rng):
assert m % 4 == 0
dim1 = int(m / 4)
dim2 = 2
dim3 = 2
cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3])
L1 = (cov.L[0]).numpy()
L2 = (cov.L[1]).numpy()
L3 = (cov.L[2]).numpy()
cov_np = np.kron(
np.kron(np.dot(L1, L1.transpose()), np.dot(L2, L2.transpose())),
np.dot(L3, L3.transpose()),
)
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_Cov3FactorMaskedKron(seeded_rng):
assert m % 4 == 0
dim1 = int(m / 4)
dim2 = 2
dim3 = 2
mask = np.random.binomial(1, 0.5, m).astype(np.int32)
if sum(mask == 0):
mask[0] = 1
mask_indices = np.nonzero(mask)[0]
cov = CovKroneckerFactored(sizes=[dim1, dim2, dim3], mask=mask)
L1 = (cov.L[0]).numpy()
L2 = (cov.L[1]).numpy()
L3 = (cov.L[2]).numpy()
cov_np_factor = np.kron(L1, np.kron(L2, L3))[
np.ix_(mask_indices, mask_indices)]
cov_np = np.dot(cov_np_factor, cov_np_factor.transpose())
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X[mask_indices, :], cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol, atol=atol)
assert_allclose(
sinv_np,
cov.solve(eye).numpy()[np.ix_(mask_indices, mask_indices)],
rtol=rtol,
atol=atol,
)
assert_allclose(
sinvx_np, cov.solve(X_tf).numpy()[
mask_indices, :], rtol=rtol, atol=atol
)
def test_CovAR1(seeded_rng):
cov = CovAR1(size=m)
cov_np = np.linalg.inv(cov.solve(eye))
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
# test initialization
cov = CovAR1(rho=0.3, sigma=1.3, size=3)
assert_allclose(np.exp(cov.log_sigma.numpy()), 1.3)
assert_allclose((2 * tf.sigmoid(cov.rho_unc) - 1).numpy(), 0.3)
def test_CovAR1_scan_onsets(seeded_rng):
cov = CovAR1(size=m, scan_onsets=[0, m // 2])
# compute the naive version
cov_np = np.linalg.inv(cov.solve(eye))
logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
assert_allclose(logdet_np, cov.logdet, rtol=rtol)
assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
def test_raises(seeded_rng):
with pytest.raises(RuntimeError):
CovUnconstrainedCholesky(Sigma=np.eye(3), size=4)
with pytest.raises(RuntimeError):
CovUnconstrainedCholesky()
with pytest.raises(RuntimeError):
CovUnconstrainedInvCholesky(invSigma=np.eye(3), size=4)
with pytest.raises(RuntimeError):
CovUnconstrainedInvCholesky()
| 9,042 | 28.07717 | 79 | py |
brainiak | brainiak-master/tests/matnormal/test_matnormal_logp_marginal.py | import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import multivariate_normal
import tensorflow as tf
from brainiak.matnormal.utils import rmn
from brainiak.matnormal.matnormal_likelihoods import (
matnorm_logp_marginal_col,
matnorm_logp_marginal_row,
)
from brainiak.matnormal.covs import CovIdentity, CovUnconstrainedCholesky
# X is m x n, so A sould be m x p
m = 5
n = 4
p = 3
rtol = 1e-7
def test_against_scipy_mvn_row_marginal(seeded_rng):
rowcov = CovUnconstrainedCholesky(size=m)
colcov = CovIdentity(size=n)
Q = CovUnconstrainedCholesky(size=p)
X = rmn(np.eye(m), np.eye(n))
A = rmn(np.eye(m), np.eye(p))
A_tf = tf.constant(A, "float64")
X_tf = tf.constant(X, "float64")
Q_np = Q._cov
rowcov_np = rowcov._cov + A.dot(Q_np).dot(A.T)
scipy_answer = np.sum(multivariate_normal.logpdf(
X.T, np.zeros([m]), rowcov_np))
tf_answer = matnorm_logp_marginal_row(X_tf, rowcov, colcov, A_tf, Q)
assert_allclose(scipy_answer, tf_answer, rtol=rtol)
def test_against_scipy_mvn_col_marginal(seeded_rng):
rowcov = CovIdentity(size=m)
colcov = CovUnconstrainedCholesky(size=n)
Q = CovUnconstrainedCholesky(size=p)
X = rmn(np.eye(m), np.eye(n))
A = rmn(np.eye(p), np.eye(n))
A_tf = tf.constant(A, "float64")
X_tf = tf.constant(X, "float64")
Q_np = Q._cov
colcov_np = colcov._cov + A.T.dot(Q_np).dot(A)
scipy_answer = np.sum(multivariate_normal.logpdf(
X, np.zeros([n]), colcov_np))
tf_answer = matnorm_logp_marginal_col(X_tf, rowcov, colcov, A_tf, Q)
assert_allclose(scipy_answer, tf_answer, rtol=rtol)
| 1,662 | 23.820896 | 73 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.