repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y, atol=1e-5, rtol=1e-3), message)
| 768 | 24.633333 | 76 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'set_sbn_eps_mode',
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
SBN_EPS_MODE = 'clamp'
def set_sbn_eps_mode(mode):
global SBN_EPS_MODE
assert mode in ('clamp', 'plus')
SBN_EPS_MODE = mode
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine,
track_running_stats=track_running_stats)
if not self.track_running_stats:
import warnings
warnings.warn('track_running_stats=False is not supported by the SynchronizedBatchNorm.')
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
assert input.size(1) == self.num_features, 'Channel size mismatch: got {}, expect {}.'.format(input.size(1), self.num_features)
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
if SBN_EPS_MODE == 'clamp':
return mean, bias_var.clamp(self.eps) ** -0.5
elif SBN_EPS_MODE == 'plus':
return mean, (bias_var + self.eps) ** -0.5
else:
raise ValueError('Unknown EPS mode: {}.'.format(SBN_EPS_MODE))
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
>>> import torch.nn as nn
>>> import torchvision
>>> # m is a standard pytorch model
>>> m = torchvision.models.resnet18(True)
>>> m = nn.DataParallel(m)
>>> # after convert, m is using SyncBN
>>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod, device_ids=module.device_ids)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod
| 16,465 | 38.869249 | 135 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,385 | 30.813333 | 95 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/comm.py | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,449 | 31.246377 | 117 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/__init__.py | # -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import set_sbn_eps_mode
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .batchnorm import patch_sync_batchnorm, convert_model
from .replicate import DataParallelWithCallback, patch_replication_callback
| 547 | 35.533333 | 96 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/transformers.py | import torch
from torch.nn import Module, ModuleList, Linear, Dropout, LayerNorm, Identity, Parameter, init
import torch.nn.functional as F
from .stochastic_depth import DropPath
class Attention(Module):
"""
Obtained from timm: github.com:rwightman/pytorch-image-models
"""
def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = Linear(dim, dim * 3, bias=False)
self.attn_drop = Dropout(attention_dropout)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(projection_dropout)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MaskedAttention(Module):
def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = Linear(dim, dim * 3, bias=False)
self.attn_drop = Dropout(attention_dropout)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(projection_dropout)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask_value = -torch.finfo(attn.dtype).max
assert mask.shape[-1] == attn.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attn.masked_fill_(~mask, mask_value)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TransformerEncoderLayer(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.1, drop_path_rate=0.1):
super(TransformerEncoderLayer, self).__init__()
self.pre_norm = LayerNorm(d_model)
self.self_attn = Attention(dim=d_model, num_heads=nhead,
attention_dropout=attention_dropout, projection_dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
def forward(self, src: torch.Tensor, *args, **kwargs) -> torch.Tensor:
src = src + self.drop_path(self.self_attn(self.pre_norm(src)))
src = self.norm1(src)
src2 = self.linear2(self.dropout1(self.activation(self.linear1(src))))
src = src + self.drop_path(self.dropout2(src2))
return src
class MaskedTransformerEncoderLayer(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.1, drop_path_rate=0.1):
super(MaskedTransformerEncoderLayer, self).__init__()
self.pre_norm = LayerNorm(d_model)
self.self_attn = MaskedAttention(dim=d_model, num_heads=nhead,
attention_dropout=attention_dropout, projection_dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
def forward(self, src: torch.Tensor, mask=None, *args, **kwargs) -> torch.Tensor:
src = src + self.drop_path(self.self_attn(self.pre_norm(src), mask))
src = self.norm1(src)
src2 = self.linear2(self.dropout1(self.activation(self.linear1(src))))
src = src + self.drop_path(self.dropout2(src2))
return src
class TransformerClassifier(Module):
def __init__(self,
seq_pool=True,
embedding_dim=768,
num_layers=12,
num_heads=12,
mlp_ratio=4.0,
num_classes=1000,
dropout=0.1,
attention_dropout=0.1,
stochastic_depth=0.1,
positional_embedding='learnable',
sequence_length=None):
super().__init__()
positional_embedding = positional_embedding if \
positional_embedding in ['sine', 'learnable', 'none'] else 'sine'
dim_feedforward = int(embedding_dim * mlp_ratio)
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.seq_pool = seq_pool
assert sequence_length is not None or positional_embedding == 'none', \
f"Positional embedding is set to {positional_embedding} and" \
f" the sequence length was not specified."
if not seq_pool:
sequence_length += 1
self.class_emb = Parameter(torch.zeros(1, 1, self.embedding_dim),
requires_grad=True)
else:
self.attention_pool = Linear(self.embedding_dim, 1)
if positional_embedding != 'none':
if positional_embedding == 'learnable':
self.positional_emb = Parameter(torch.zeros(1, sequence_length, embedding_dim),
requires_grad=True)
init.trunc_normal_(self.positional_emb, std=0.2)
else:
self.positional_emb = Parameter(self.sinusoidal_embedding(sequence_length, embedding_dim),
requires_grad=False)
else:
self.positional_emb = None
self.dropout = Dropout(p=dropout)
dpr = [x.item() for x in torch.linspace(0, stochastic_depth, num_layers)]
self.blocks = ModuleList([
TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
dim_feedforward=dim_feedforward, dropout=dropout,
attention_dropout=attention_dropout, drop_path_rate=dpr[i])
for i in range(num_layers)])
self.norm = LayerNorm(embedding_dim)
# self.fc = Linear(embedding_dim, num_classes)
self.apply(self.init_weight)
def forward(self, x):
if self.positional_emb is None and x.size(1) < self.sequence_length:
x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0)
if not self.seq_pool:
cls_token = self.class_emb.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
if self.positional_emb is not None:
x += self.positional_emb
x = self.dropout(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
# TODO: TOREMOVE
# if self.seq_pool:
# x = torch.matmul(F.softmax(self.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
#else:
# x = x[:, 0]
# x = self.fc(x)
return x
@staticmethod
def init_weight(m):
if isinstance(m, Linear):
init.trunc_normal_(m.weight, std=.02)
if isinstance(m, Linear) and m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, LayerNorm):
init.constant_(m.bias, 0)
init.constant_(m.weight, 1.0)
@staticmethod
def sinusoidal_embedding(n_channels, dim):
pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)]
for p in range(n_channels)])
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
return pe.unsqueeze(0)
class MaskedTransformerClassifier(Module):
def __init__(self,
seq_pool=True,
embedding_dim=768,
num_layers=12,
num_heads=12,
mlp_ratio=4.0,
num_classes=1000,
dropout=0.1,
attention_dropout=0.1,
stochastic_depth=0.1,
positional_embedding='sine',
seq_len=None,
*args, **kwargs):
super().__init__()
positional_embedding = positional_embedding if \
positional_embedding in ['sine', 'learnable', 'none'] else 'sine'
dim_feedforward = int(embedding_dim * mlp_ratio)
self.embedding_dim = embedding_dim
self.seq_len = seq_len
self.seq_pool = seq_pool
assert seq_len is not None or positional_embedding == 'none', \
f"Positional embedding is set to {positional_embedding} and" \
f" the sequence length was not specified."
if not seq_pool:
seq_len += 1
self.class_emb = Parameter(torch.zeros(1, 1, self.embedding_dim),
requires_grad=True)
else:
self.attention_pool = Linear(self.embedding_dim, 1)
if positional_embedding != 'none':
if positional_embedding == 'learnable':
seq_len += 1 # padding idx
self.positional_emb = Parameter(torch.zeros(1, seq_len, embedding_dim),
requires_grad=True)
init.trunc_normal_(self.positional_emb, std=0.2)
else:
self.positional_emb = Parameter(self.sinusoidal_embedding(seq_len,
embedding_dim,
padding_idx=True),
requires_grad=False)
else:
self.positional_emb = None
self.dropout = Dropout(p=dropout)
dpr = [x.item() for x in torch.linspace(0, stochastic_depth, num_layers)]
self.blocks = ModuleList([
MaskedTransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
dim_feedforward=dim_feedforward, dropout=dropout,
attention_dropout=attention_dropout, drop_path_rate=dpr[i])
for i in range(num_layers)])
self.norm = LayerNorm(embedding_dim)
self.fc = Linear(embedding_dim, num_classes)
self.apply(self.init_weight)
def forward(self, x, mask=None):
if self.positional_emb is None and x.size(1) < self.seq_len:
x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0)
if not self.seq_pool:
cls_token = self.class_emb.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
if mask is not None:
mask = torch.cat([torch.ones(size=(mask.shape[0], 1), device=mask.device), mask.float()], dim=1)
mask = (mask > 0)
if self.positional_emb is not None:
x += self.positional_emb
x = self.dropout(x)
for blk in self.blocks:
x = blk(x, mask=mask)
x = self.norm(x)
if self.seq_pool:
x = torch.matmul(F.softmax(self.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
else:
x = x[:, 0]
x = self.fc(x)
return x
@staticmethod
def init_weight(m):
if isinstance(m, Linear):
init.trunc_normal_(m.weight, std=.02)
if isinstance(m, Linear) and m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, LayerNorm):
init.constant_(m.bias, 0)
init.constant_(m.weight, 1.0)
@staticmethod
def sinusoidal_embedding(n_channels, dim, padding_idx=False):
pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)]
for p in range(n_channels)])
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
pe = pe.unsqueeze(0)
if padding_idx:
return torch.cat([torch.zeros((1, 1, dim)), pe], dim=1)
return pe
| 13,211 | 38.088757 | 112 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/embedder.py | import torch.nn as nn
class Embedder(nn.Module):
def __init__(self,
word_embedding_dim=300,
vocab_size=100000,
padding_idx=1,
pretrained_weight=None,
embed_freeze=False,
*args, **kwargs):
super(Embedder, self).__init__()
self.embeddings = nn.Embedding.from_pretrained(pretrained_weight, freeze=embed_freeze) \
if pretrained_weight is not None else \
nn.Embedding(vocab_size, word_embedding_dim, padding_idx=padding_idx)
self.embeddings.weight.requires_grad = not embed_freeze
def forward_mask(self, mask):
bsz, seq_len = mask.shape
new_mask = mask.view(bsz, seq_len, 1)
new_mask = new_mask.sum(-1)
new_mask = (new_mask > 0)
return new_mask
def forward(self, x, mask=None):
embed = self.embeddings(x)
embed = embed if mask is None else embed * self.forward_mask(mask).unsqueeze(-1).float()
return embed, mask
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
else:
nn.init.normal_(m.weight)
| 1,332 | 34.078947 | 96 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/stochastic_depth.py | # Thanks to rwightman's timm package
# github.com:rwightman/pytorch-image-models
import torch
import torch.nn as nn
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
| 1,586 | 38.675 | 108 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/cct.py | from torch.hub import load_state_dict_from_url
import torch.nn as nn
import torch
import torch.nn.functional as F
from .transformers import TransformerClassifier
from .tokenizer import Tokenizer
from .helpers import pe_check
from timm.models.registry import register_model
model_urls = {
'cct_7_3x1_32':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_cifar10_300epochs.pth',
'cct_7_3x1_32_sine':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_sine_cifar10_5000epochs.pth',
'cct_7_3x1_32_c100':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_cifar100_300epochs.pth',
'cct_7_3x1_32_sine_c100':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_sine_cifar100_5000epochs.pth',
'cct_7_7x2_224_sine':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_7x2_224_flowers102.pth',
'cct_14_7x2_224':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_14_7x2_224_imagenet.pth',
'cct_14_7x2_384':
'https://shi-labs.com/projects/cct/checkpoints/finetuned/cct_14_7x2_384_imagenet.pth',
'cct_14_7x2_384_fl':
'https://shi-labs.com/projects/cct/checkpoints/finetuned/cct_14_7x2_384_flowers102.pth',
}
class CCT(nn.Module):
def __init__(self,
img_size=224,
embedding_dim=768,
n_input_channels=3,
n_conv_layers=1,
kernel_size=7,
stride=2,
padding=3,
pooling_kernel_size=3,
pooling_stride=2,
pooling_padding=1,
dropout=0.,
attention_dropout=0.1,
stochastic_depth=0.1,
num_layers=14,
num_heads=6,
mlp_ratio=4.0,
num_classes=1000,
positional_embedding='learnable',
aggregation=None,
*args, **kwargs):
super(CCT, self).__init__()
self.tokenizer = Tokenizer(n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pooling_kernel_size=pooling_kernel_size,
pooling_stride=pooling_stride,
pooling_padding=pooling_padding,
max_pool=True,
activation=nn.ReLU,
n_conv_layers=n_conv_layers,
conv_bias=False)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=True,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth=stochastic_depth,
num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
num_classes=num_classes,
positional_embedding=positional_embedding
)
if aggregation in ['cls', 'seqpool']:
self.aggregation = aggregation
else:
self.aggregation = None
def forward(self, x):
x = self.tokenizer(x)
x = self.classifier(x)
if self.aggregation == 'cls':
return x[:, 0]
elif self.aggregation == 'seqpool':
x = torch.matmul(F.softmax(self.classifier.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
return x
else:
# x = x.permute(0, 2, 1)
return x
def _cct(arch, pretrained, progress,
num_layers, num_heads, mlp_ratio, embedding_dim,
kernel_size=3, stride=None, padding=None,
aggregation=None, *args, **kwargs):
stride = stride if stride is not None else max(1, (kernel_size // 2) - 1)
padding = padding if padding is not None else max(1, (kernel_size // 2))
model = CCT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
aggregation=aggregation,
*args, **kwargs)
if pretrained:
if arch in model_urls:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
state_dict = pe_check(model, state_dict)
model.load_state_dict(state_dict, strict=False)
else:
raise RuntimeError(f'Variant {arch} does not yet have pretrained weights.')
return model
def cct_2(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128,
aggregation=aggregation, *args, **kwargs)
def cct_4(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128,
aggregation=aggregation, *args, **kwargs)
def cct_6(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256,
aggregation=aggregation, *args, **kwargs)
def cct_7(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256,
aggregation=aggregation, *args, **kwargs)
def cct_14(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=14, num_heads=6, mlp_ratio=3, embedding_dim=384,
aggregation=aggregation, *args, **kwargs)
@register_model
def cct_2_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_2('cct_2_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_2_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_2('cct_2_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_4_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_4('cct_4_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_4_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_4('cct_4_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x1_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x1_32', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x1_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x1_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32_c100(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=100,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32_c100', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32_sine_c100(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=100,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32_sine_c100', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_7x2_224(pretrained=False, progress=False,
img_size=224, positional_embedding='learnable', num_classes=102,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_7x2_224', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_7x2_224_sine(pretrained=False, progress=False,
img_size=224, positional_embedding='sine', num_classes=102,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_7x2_224_sine', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_14_7x2_224(pretrained=False, progress=False,
img_size=224, positional_embedding='learnable', num_classes=1000,
aggregation=None, *args, **kwargs):
return cct_14('cct_14_7x2_224', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_14_7x2_384(pretrained=False, progress=False,
img_size=384, positional_embedding='learnable', num_classes=1000,
aggregation=None, *args, **kwargs):
return cct_14('cct_14_7x2_384', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_14_7x2_384_fl(pretrained=False, progress=False,
img_size=384, positional_embedding='learnable', num_classes=102,
aggregation=None, *args, **kwargs):
return cct_14('cct_14_7x2_384_fl', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
| 15,794 | 42.753463 | 114 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/tokenizer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Tokenizer(nn.Module):
def __init__(self,
kernel_size, stride, padding,
pooling_kernel_size=3, pooling_stride=2, pooling_padding=1,
n_conv_layers=1,
n_input_channels=3,
n_output_channels=64,
in_planes=64,
activation=None,
max_pool=True,
conv_bias=False):
super(Tokenizer, self).__init__()
n_filter_list = [n_input_channels] + \
[in_planes for _ in range(n_conv_layers - 1)] + \
[n_output_channels]
self.conv_layers = nn.Sequential(
*[nn.Sequential(
nn.Conv2d(n_filter_list[i], n_filter_list[i + 1],
kernel_size=(kernel_size, kernel_size),
stride=(stride, stride),
padding=(padding, padding), bias=conv_bias),
nn.Identity() if activation is None else activation(),
nn.MaxPool2d(kernel_size=pooling_kernel_size,
stride=pooling_stride,
padding=pooling_padding) if max_pool else nn.Identity()
)
for i in range(n_conv_layers)
])
self.flattener = nn.Flatten(2, 3)
self.apply(self.init_weight)
def sequence_length(self, n_channels=3, height=224, width=224):
return self.forward(torch.zeros((1, n_channels, height, width))).shape[1]
def forward(self, x):
return self.flattener(self.conv_layers(x)).transpose(-2, -1)
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
class TextTokenizer(nn.Module):
def __init__(self,
kernel_size, stride, padding,
pooling_kernel_size=3, pooling_stride=2, pooling_padding=1,
embedding_dim=300,
n_output_channels=128,
activation=None,
max_pool=True,
*args, **kwargs):
super(TextTokenizer, self).__init__()
self.max_pool = max_pool
self.conv_layers = nn.Sequential(
nn.Conv2d(1, n_output_channels,
kernel_size=(kernel_size, embedding_dim),
stride=(stride, 1),
padding=(padding, 0), bias=False),
nn.Identity() if activation is None else activation(),
nn.MaxPool2d(
kernel_size=(pooling_kernel_size, 1),
stride=(pooling_stride, 1),
padding=(pooling_padding, 0)
) if max_pool else nn.Identity()
)
self.apply(self.init_weight)
def seq_len(self, seq_len=32, embed_dim=300):
return self.forward(torch.zeros((1, seq_len, embed_dim)))[0].shape[1]
def forward_mask(self, mask):
new_mask = mask.unsqueeze(1).float()
cnn_weight = torch.ones(
(1, 1, self.conv_layers[0].kernel_size[0]),
device=mask.device,
dtype=torch.float)
new_mask = F.conv1d(
new_mask, cnn_weight, None,
self.conv_layers[0].stride[0], self.conv_layers[0].padding[0], 1, 1)
if self.max_pool:
new_mask = F.max_pool1d(
new_mask, self.conv_layers[2].kernel_size[0],
self.conv_layers[2].stride[0], self.conv_layers[2].padding[0], 1, False, False)
new_mask = new_mask.squeeze(1)
new_mask = (new_mask > 0)
return new_mask
def forward(self, x, mask=None):
x = x.unsqueeze(1)
x = self.conv_layers(x)
x = x.transpose(1, 3).squeeze(1)
x = x if mask is None else x * self.forward_mask(mask).unsqueeze(-1).float()
return x, mask
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
| 4,035 | 35.690909 | 95 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/__init__.py | from .cct import cct_14_7x2_384, cct_14_7x2_224 | 47 | 47 | 47 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/helpers.py | import math
import torch
import torch.nn.functional as F
def resize_pos_embed(posemb, posemb_new, num_tokens=1):
# Copied from `timm` by Ross Wightman:
# github.com/rwightman/pytorch-image-models
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def pe_check(model, state_dict, pe_key='classifier.positional_emb'):
if pe_key is not None and pe_key in state_dict.keys() and pe_key in model.state_dict().keys():
if model.state_dict()[pe_key].shape != state_dict[pe_key].shape:
state_dict[pe_key] = resize_pos_embed(state_dict[pe_key],
model.state_dict()[pe_key],
num_tokens=model.classifier.num_tokens)
return state_dict
| 1,573 | 46.69697 | 132 | py |
anomaly-seg | anomaly-seg-master/anom_utils.py | import numpy as np
import sklearn.metrics as sk
recall_level_default = 0.95
def stable_cumsum(arr, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, dtype=np.float64)
expected = np.sum(arr, dtype=np.float64)
if not np.allclose(out[-1], expected, rtol=rtol, atol=atol):
raise RuntimeError('cumsum was found to be unstable: '
'its last element does not correspond to sum')
return out
def fpr_and_fdr_at_recall(y_true, y_score, recall_level=recall_level_default, pos_label=None):
classes = np.unique(y_true)
if (pos_label is None and
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true)[threshold_idxs]
fps = 1 + threshold_idxs - tps # add one because of zero-based indexing
thresholds = y_score[threshold_idxs]
recall = tps / tps[-1]
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1) # [last_ind::-1]
recall, fps, tps, thresholds = np.r_[recall[sl], 1], np.r_[fps[sl], 0], np.r_[tps[sl], 0], thresholds[sl]
cutoff = np.argmin(np.abs(recall - recall_level))
return fps[cutoff] / (np.sum(np.logical_not(y_true))) # , fps[cutoff]/(fps[cutoff] + tps[cutoff])
def get_measures(_pos, _neg, recall_level=recall_level_default):
pos = np.array(_pos[:]).reshape((-1, 1))
neg = np.array(_neg[:]).reshape((-1, 1))
examples = np.squeeze(np.vstack((pos, neg)))
labels = np.zeros(len(examples), dtype=np.int32)
labels[:len(pos)] += 1
auroc = sk.roc_auc_score(labels, examples)
aupr = sk.average_precision_score(labels, examples)
fpr = fpr_and_fdr_at_recall(labels, examples, recall_level)
return auroc, aupr, fpr
def print_measures(auroc, aupr, fpr, method_name='Ours', recall_level=recall_level_default):
print('\t\t\t\t' + method_name)
print('FPR{:d}:\t\t\t{:.2f}'.format(int(100 * recall_level), 100 * fpr))
print('AUROC: \t\t\t{:.2f}'.format(100 * auroc))
print('AUPR: \t\t\t{:.2f}'.format(100 * aupr))
def print_measures_with_std(aurocs, auprs, fprs, method_name='Ours', recall_level=recall_level_default):
print('\t\t\t\t' + method_name)
print('FPR{:d}:\t\t\t{:.2f}\t+/- {:.2f}'.format(int(100 * recall_level), 100 * np.mean(fprs), 100 * np.std(fprs)))
print('AUROC: \t\t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(aurocs), 100 * np.std(aurocs)))
print('AUPR: \t\t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(auprs), 100 * np.std(auprs)))
def get_and_print_results(out_score, in_score, num_to_avg=1):
aurocs, auprs, fprs = [], [], []
measures = get_measures(out_score, in_score)
aurocs.append(measures[0]); auprs.append(measures[1]); fprs.append(measures[2])
auroc = np.mean(aurocs); aupr = np.mean(auprs); fpr = np.mean(fprs)
return auroc, aupr, fpr
def eval_ood_measure(conf, seg_label, out_label=13):
# this code subtracts one
out_label = out_label
in_scores = - conf[np.where(seg_label == out_label)]
out_scores = - conf[np.where(seg_label != out_label)]
if (len(out_scores) != 0) and (len(in_scores) != 0):
auroc, aupr, fpr = get_and_print_results(in_scores, out_scores)
return auroc, aupr, fpr
else:
return None
| 4,508 | 37.538462 | 118 | py |
anomaly-seg | anomaly-seg-master/create_bdd_dataset.py | import numpy as np
import scipy
import scipy.io as sio
import scipy.misc
from scipy.misc import imread, imsave
import matplotlib
import matplotlib.pyplot as plt
import json
import os
import os.path
from tqdm import tqdm
# replace the colors with our colors
#a = sio.loadmat("data_ADE/color150.mat")
# print(a)
colors = np.array(
[0, # road
1, #sidewalk
2, # building
3, # wall
4, # fence
5, # pole
6, # traffic light
7, # traffic sign
8, # vegetation
9, # terrain
10, # sky
11, # person
12, # rider
13, # car
14, # truck
15, # bus
16, # train
17, # motorcycle
18, # bicycle
255,]) # other
#swap 255 with -1
# add 2 to whole array
#a["colors"] = colors
# print(a)
#sio.savemat("data/color150.mat", a)
#####
#create the train and val obgt
## To view the structure of their obgt file uncomment
## the lines below
# odgt = "data_ADE/train.odgt"
#
# with open(odgt) as fp:
# a = json.loads(fp.read())
# print(a, type(a))
#
# a = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
# print(a, type(a), type(a[0]), len(a), "\n\n", a[0])
def create_odgt(root_dir, file_dir, ann_dir, out_dir, anom_files=None):
if anom_files is None:
anom_files = []
_files = []
count1 = 0
count2 = 0
img_files = sorted(os.listdir(root_dir+file_dir))
for img in img_files:
#print(img, img[-5])
# this line is because all of train images
# are saved as "type5.png"
#ann_file = img[:-5] + "5" + img[-4:]
ann_file = img[:-4] + "_train_id.png"
#print(ann_file)
ann_file_path = root_dir+ann_dir+ann_file
if os.path.exists(ann_file_path):
#print("exists")
dict_entry = {
"dbName": "BDD100k",
"width": 1280,
"height": 720,
"fpath_img": file_dir+img,
"fpath_segm": ann_dir+ann_file,
}
img = imread(ann_file_path)
cond1 = np.logical_or((img == 18), (img == 19) )
if np.any(np.logical_or( cond1, (img == 20) )):
count2 += 1
anom_files.append(dict_entry)
else:
count1 += 1
_files.append(dict_entry)
print("total images in = {} and out = {}".format(count1, count2))
with open(out_dir, "w") as outfile:
json.dump(_files, outfile)
with open(root_dir + "anom.odgt", "w") as outfile:
json.dump(anom_files, outfile)
# for i in training_files:
# json.dumps(i, outfile)
return anom_files
#do train first
out_dir = "data/train.odgt"
root_dir = "data/"
train_dir = "seg/images/train/"
ann_dir = "seg/train_labels/train/"
anom_files = create_odgt(root_dir, train_dir, ann_dir, out_dir)
out_dir = "data/validation.odgt"
root_dir = "data/"
train_dir = "seg/images/val/"
ann_dir = "seg/train_labels/val/"
create_odgt(root_dir, train_dir, ann_dir, out_dir, anom_files=anom_files)
# sanity check to make sure it can be loaded back
#a = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
# print(a)
# print(a, type(a), type(a[0]), len(a[0]), "\n\n",)
### convert annotation images to correct labels
def convert_cityscapes_to_uint(root_dir, ann_dir):
count = 0
for img_loc in tqdm(os.listdir(root_dir+ann_dir)):
img = imread(root_dir+ann_dir+img_loc)
if img.ndim <= 1:
continue
#img = img[:,:,:3]
#print(img.shape, img[0],)
#swap 255 with -1
# add 2 to whole array
loc = img == 255
img[loc] = -1
img += 2
# plt.imshow(new_img)
# plt.show()
#imsave(root_dir+ann_dir+img_loc, new_img) # SCIPY RESCALES from 0-255 on its own
scipy.misc.toimage(img, cmin=0, cmax=255).save(root_dir+ann_dir+img_loc)
root_dir = "data/"
ann_dir = "seg/train_labels/train/"
# convert the training images
#convert_cityscapes_to_uint(root_dir, ann_dir)
root_dir = "data/"
ann_dir = "seg/train_labels/val/"
# convert the anomaly images
#convert_cityscapes_to_uint(root_dir, ann_dir)
#convert the val images
#ann_dir = "annotations/validation/"
#convert_cityscapes_to_uint(root_dir, ann_dir)
| 4,282 | 25.115854 | 90 | py |
anomaly-seg | anomaly-seg-master/dataset.py | import os
import json
import torch
from torchvision import transforms
import numpy as np
from PIL import Image
def imresize(im, size, interp='bilinear'):
if interp == 'nearest':
resample = Image.NEAREST
elif interp == 'bilinear':
resample = Image.BILINEAR
elif interp == 'bicubic':
resample = Image.BICUBIC
else:
raise Exception('resample method undefined!')
return im.resize(size, resample)
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, odgt, opt, **kwargs):
# parse options
self.imgSizes = opt.imgSizes
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# parse the input list
self.parse_input_list(odgt, **kwargs)
# mean and std
self.normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def parse_input_list(self, odgt, max_sample=-1, start_idx=-1, end_idx=-1):
if isinstance(odgt, list):
self.list_sample = odgt
elif isinstance(odgt, str):
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')][0]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
if start_idx >= 0 and end_idx >= 0: # divide file list
self.list_sample = self.list_sample[start_idx:end_idx]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def img_transform(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = self.normalize(torch.from_numpy(img.copy()))
return img
def segm_transform(self, segm):
# to tensor, -1 to 149
segm = torch.from_numpy(np.array(segm)).long() - 1
return segm
# Round x to the nearest multiple of p and x' >= x
def round2nearest_multiple(self, x, p):
return ((x - 1) // p + 1) * p
class TrainDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, batch_per_gpu=1, **kwargs):
super(TrainDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
self.batch_per_gpu = batch_per_gpu
# classify images into two classes: 1. h > w and 2. h <= w
self.batch_record_list = [[], []]
# override dataset length when trainig with batch_per_gpu > 1
self.cur_idx = 0
self.if_shuffled = False
def _get_sub_batch(self):
while True:
# get a sample record
this_sample = self.list_sample[self.cur_idx]
if this_sample['height'] > this_sample['width']:
self.batch_record_list[0].append(this_sample) # h > w, go to 1st class
else:
self.batch_record_list[1].append(this_sample) # h <= w, go to 2nd class
# update current sample pointer
self.cur_idx += 1
if self.cur_idx >= self.num_sample:
self.cur_idx = 0
np.random.shuffle(self.list_sample)
if len(self.batch_record_list[0]) == self.batch_per_gpu:
batch_records = self.batch_record_list[0]
self.batch_record_list[0] = []
break
elif len(self.batch_record_list[1]) == self.batch_per_gpu:
batch_records = self.batch_record_list[1]
self.batch_record_list[1] = []
break
return batch_records
def __getitem__(self, index):
# NOTE: random shuffle for the first time. shuffle in __init__ is useless
if not self.if_shuffled:
np.random.seed(index)
np.random.shuffle(self.list_sample)
self.if_shuffled = True
# get sub-batch candidates
batch_records = self._get_sub_batch()
# resize all images' short edges to the chosen size
if isinstance(self.imgSizes, list) or isinstance(self.imgSizes, tuple):
this_short_size = np.random.choice(self.imgSizes)
else:
this_short_size = self.imgSizes
# calculate the BATCH's height and width
# since we concat more than one samples, the batch's h and w shall be larger than EACH sample
batch_widths = np.zeros(self.batch_per_gpu, np.int32)
batch_heights = np.zeros(self.batch_per_gpu, np.int32)
for i in range(self.batch_per_gpu):
img_height, img_width = batch_records[i]['height'], batch_records[i]['width']
this_scale = min(
this_short_size / min(img_height, img_width), \
self.imgMaxSize / max(img_height, img_width))
batch_widths[i] = img_width * this_scale
batch_heights[i] = img_height * this_scale
# Here we must pad both input image and segmentation map to size h' and w' so that p | h' and p | w'
batch_width = np.max(batch_widths)
batch_height = np.max(batch_heights)
batch_width = int(self.round2nearest_multiple(batch_width, self.padding_constant))
batch_height = int(self.round2nearest_multiple(batch_height, self.padding_constant))
assert self.padding_constant >= self.segm_downsampling_rate, \
'padding constant must be equal or large than segm downsamping rate'
batch_images = torch.zeros(
self.batch_per_gpu, 3, batch_height, batch_width)
batch_segms = torch.zeros(
self.batch_per_gpu,
batch_height // self.segm_downsampling_rate,
batch_width // self.segm_downsampling_rate).long()
for i in range(self.batch_per_gpu):
this_record = batch_records[i]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
# random_flip
if np.random.choice([0, 1]):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
segm = segm.transpose(Image.FLIP_LEFT_RIGHT)
# note that each sample within a mini batch has different scale param
img = imresize(img, (batch_widths[i], batch_heights[i]), interp='bilinear')
segm = imresize(segm, (batch_widths[i], batch_heights[i]), interp='nearest')
# further downsample seg label, need to avoid seg label misalignment
segm_rounded_width = self.round2nearest_multiple(segm.size[0], self.segm_downsampling_rate)
segm_rounded_height = self.round2nearest_multiple(segm.size[1], self.segm_downsampling_rate)
segm_rounded = Image.new('L', (segm_rounded_width, segm_rounded_height), 0)
segm_rounded.paste(segm, (0, 0))
segm = imresize(
segm_rounded,
(segm_rounded.size[0] // self.segm_downsampling_rate, \
segm_rounded.size[1] // self.segm_downsampling_rate), \
interp='nearest')
# image transform, to torch float tensor 3xHxW
img = self.img_transform(img)
# segm transform, to torch long tensor HxW
segm = self.segm_transform(segm)
# put into batch arrays
batch_images[i][:, :img.shape[1], :img.shape[2]] = img
batch_segms[i][:segm.shape[0], :segm.shape[1]] = segm
output = dict()
output['img_data'] = batch_images
output['seg_label'] = batch_segms
return output
def __len__(self):
return int(1e10) # It's a fake length due to the trick that every loader maintains its own list
#return self.num_sampleclass
class ValDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, **kwargs):
super(ValDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
ori_width, ori_height = img.size
img_resized_list = []
for this_short_size in self.imgSizes:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_width = self.round2nearest_multiple(target_width, self.padding_constant)
target_height = self.round2nearest_multiple(target_height, self.padding_constant)
# resize images
img_resized = imresize(img, (target_width, target_height), interp='bilinear')
# image transform, to torch float tensor 3xHxW
img_resized = self.img_transform(img_resized)
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
# segm transform, to torch long tensor HxW
segm = self.segm_transform(segm)
batch_segms = torch.unsqueeze(segm, 0)
output = dict()
output['img_ori'] = np.array(img)
output['img_data'] = [x.contiguous() for x in img_resized_list]
output['seg_label'] = batch_segms.contiguous()
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
class TestDataset(BaseDataset):
def __init__(self, odgt, opt, **kwargs):
super(TestDataset, self).__init__(odgt, opt, **kwargs)
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image
image_path = this_record['fpath_img']
img = Image.open(image_path).convert('RGB')
ori_width, ori_height = img.size
img_resized_list = []
for this_short_size in self.imgSizes:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_width = self.round2nearest_multiple(target_width, self.padding_constant)
target_height = self.round2nearest_multiple(target_height, self.padding_constant)
# resize images
img_resized = imresize(img, (target_width, target_height), interp='bilinear')
# image transform, to torch float tensor 3xHxW
img_resized = self.img_transform(img_resized)
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
output = dict()
output['img_ori'] = np.array(img)
output['img_data'] = [x.contiguous() for x in img_resized_list]
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
| 11,901 | 39.074074 | 108 | py |
anomaly-seg | anomaly-seg-master/defaults.py | from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.DIR = "ckpt/ade20k-resnet50dilated-ppm_deepsup"
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.root_dataset = "./data/"
_C.DATASET.list_train = "./data/training.odgt"
_C.DATASET.list_val = "./data/validation.odgt"
_C.DATASET.num_class = 17
# multiscale train/test, size of short edge (int or tuple)
_C.DATASET.imgSizes = (300, 375, 450, 525, 600)
# maximum input image size of long edge
_C.DATASET.imgMaxSize = 1000
# maxmimum downsampling rate of the network
_C.DATASET.padding_constant = 8
# downsampling rate of the segmentation label
_C.DATASET.segm_downsampling_rate = 8
# randomly horizontally flip images when train/test
_C.DATASET.random_flip = True
# -----------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# architecture of net_encoder
_C.MODEL.arch_encoder = "resnet50dilated"
# architecture of net_decoder
_C.MODEL.arch_decoder = "ppm_deepsup"
# weights to finetune net_encoder
_C.MODEL.weights_encoder = ""
# weights to finetune net_decoder
_C.MODEL.weights_decoder = ""
# number of feature channels between encoder and decoder
_C.MODEL.fc_dim = 2048
# -----------------------------------------------------------------------------
# Training
# -----------------------------------------------------------------------------
_C.TRAIN = CN()
_C.TRAIN.batch_size_per_gpu = 2
# epochs to train for
_C.TRAIN.num_epoch = 20
# epoch to start training. useful if continue from a checkpoint
_C.TRAIN.start_epoch = 0
# iterations of each epoch (irrelevant to batch size)
_C.TRAIN.epoch_iters = 5000
_C.TRAIN.optim = "SGD"
_C.TRAIN.lr_encoder = 0.02
_C.TRAIN.lr_decoder = 0.02
# power in poly to drop LR
_C.TRAIN.lr_pow = 0.9
# momentum for sgd, beta1 for adam
_C.TRAIN.beta1 = 0.9
# weights regularizer
_C.TRAIN.weight_decay = 1e-4
# the weighting of deep supervision loss
_C.TRAIN.deep_sup_scale = 0.4
# fix bn params, only under finetuning
_C.TRAIN.fix_bn = False
# number of data loading workers
_C.TRAIN.workers = 16
# frequency to display
_C.TRAIN.disp_iter = 20
# manual seed
_C.TRAIN.seed = 304
# -----------------------------------------------------------------------------
# Validation
# -----------------------------------------------------------------------------
_C.VAL = CN()
# currently only supports 1
_C.VAL.batch_size = 1
# output visualization during validation
_C.VAL.visualize = False
# the checkpoint to evaluate on
_C.VAL.checkpoint = "epoch_20.pth"
# -----------------------------------------------------------------------------
# Testing
# -----------------------------------------------------------------------------
_C.TEST = CN()
# currently only supports 1
_C.TEST.batch_size = 1
# the checkpoint to test on
_C.TEST.checkpoint = "epoch_20.pth"
# folder to output visualization results
_C.TEST.result = "./"
# -----------------------------------------------------------------------------
# OOD
# -----------------------------------------------------------------------------
_C.OOD = CN()
# whether or not to include or exclude background pixels
_C.OOD.exclude_back = False
# which ood method to use
_C.OOD.ood = "msp"
# which pixels are considered OOD
_C.OOD.out_labels = (13,)
| 3,625 | 32.266055 | 79 | py |
anomaly-seg | anomaly-seg-master/create_dataset.py | import numpy as np
import scipy
import scipy.io as sio
import scipy.misc
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import json
import os
import os.path
from tqdm import tqdm
# Replace the colors with our colors
# This is only used for visualization purposes
#color_mat = sio.loadmat("data_ADE/color150.mat")
#StreetHazards colors
#colors = np.array([[ 0, 0, 0],# // unlabeled = 0,
# [ 70, 70, 70],# // building = 1,
# [190, 153, 153],# // fence = 2,
# [250, 170, 160],# // other = 3,
# [220, 20, 60],# // pedestrian = 4,
# [153, 153, 153],# // pole = 5,
# [157, 234, 50],# // road line = 6,
# [128, 64, 128],# // road = 7,
# [244, 35, 232],# // sidewalk = 8,
# [107, 142, 35],# // vegetation = 9,
# [ 0, 0, 142],# // car = 10,
# [102, 102, 156],# // wall = 11,
# [220, 220, 0],# // traffic sign = 12,
# [ 60, 250, 240],# // anomaly = 13,
#
# ])
#color_mat["colors"] = colors
#sio.savemat("data/color150.mat", color_mat)
#####
#create the train and val obgt
def create_odgt(root_dir, file_dir, ann_dir, out_dir, anom_files=None):
if anom_files is None:
anom_files = []
_files = []
count = total = 0
town_names = sorted(os.listdir(root_dir+file_dir))
for town in town_names:
img_files = sorted(os.listdir(os.path.join(root_dir,file_dir,town)))
total += len(img_files)
for img in img_files:
ann_file = img
ann_file_path = os.path.join(root_dir,ann_dir,town,ann_file)
if os.path.exists(ann_file_path):
dict_entry = {
"dbName": "StreetHazards",
"width": 1280,
"height": 720,
"fpath_img": os.path.join(file_dir,town,img),
"fpath_segm": os.path.join(ann_dir,town,ann_file),
}
# If converting BDD100K uncomment out the following
#img = Image.open(ann_file_path)
#if np.any(np.logical_or( (img == 19), (img == 20) )):
# anom_files.append(dict_entry)
#else:
count += 1
_files.append(dict_entry)
print("total images in = {} and out = {}".format(total, count))
with open(out_dir, "w") as outfile:
json.dump(_files, outfile)
# If converting BDD100K uncomment out the following
#with open(root_dir + "anom.odgt", "w") as outfile:
# json.dump(anom_files, outfile)
return anom_files
out_dir = "data/training.odgt"
#modify root directory to reflect the location of where the streethazards_train was extracted to.
root_dir = "data/"
train_dir = "train/images/training/"
ann_dir = "train/annotations/training/"
anom_files = create_odgt(root_dir, train_dir, ann_dir, out_dir)
out_dir = "data/validation.odgt"
train_dir = "train/images/validation/"
ann_dir = "train/annotations/validation/"
create_odgt(root_dir, train_dir, ann_dir, out_dir, anom_files=anom_files)
out_dir = "data/test.odgt"
val_dir = "test/images/test/"
ann_dir = "test/annotations/test/"
create_odgt(root_dir, val_dir, ann_dir, out_dir)
# BDD100K label map
#colors = np.array(
# [0, # road
# 1, #sidewalk
# 2, # building
# 3, # wall
# 4, # fence
# 5, # pole
# 6, # traffic light
# 7, # traffic sign
# 8, # vegetation
# 9, # terrain
# 10, # sky
# 11, # person
# 12, # rider
# 13, # car
# 14, # truck
# 15, # bus
# 16, # train
# 17, # motorcycle
# 18, # bicycle
# 255,]) # other
### convert BDD100K semantic segmentation images to correct labels
def convert_bdd(root_dir, ann_dir):
count = 0
for img_loc in tqdm(os.listdir(root_dir+ann_dir)):
img = Image.open(root_dir+ann_dir+img_loc)
if img.ndim <= 1:
continue
#swap 255 with -1
#16 -> 19
#18 -> 16
#19 -> 18
# add 1 to whole array
loc = img == 255
img[loc] = -1
loc = img == 16
img[loc] = 19
loc = img == 18
img[loc] = 16
loc = img == 19
img[loc] = 18
img += 1
scipy.misc.toimage(img, cmin=0, cmax=255).save(root_dir+ann_dir+img_loc)
#root_dir = "data/"
#ann_dir = "seg/train_labels/train/"
# convert the BDD100K semantic segmentation images.
#convert_bdd(root_dir, ann_dir)
| 4,611 | 27.645963 | 97 | py |
anomaly-seg | anomaly-seg-master/eval_ood.py | # System libs
import os
import time
import argparse
from distutils.version import LooseVersion
# Numerical libs
import numpy as np
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from config import cfg
from dataset import ValDataset
from models import ModelBuilder, SegmentationModule
from utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, setup_logger
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy
from PIL import Image
from tqdm import tqdm
import anom_utils
colors = loadmat('data/color150.mat')['colors']
def visualize_result(data, pred, dir_result):
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(pred, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))
def eval_ood_measure(conf, seg_label, cfg, mask=None):
out_labels = cfg.OOD.out_labels
if mask is not None:
seg_label = seg_label[mask]
out_label = seg_label == out_labels[0]
for label in out_labels:
out_label = np.logical_or(out_label, seg_label == label)
in_scores = - conf[np.logical_not(out_label)]
out_scores = - conf[out_label]
if (len(out_scores) != 0) and (len(in_scores) != 0):
auroc, aupr, fpr = anom_utils.get_and_print_results(out_scores, in_scores)
return auroc, aupr, fpr
else:
print("This image does not contain any OOD pixels or is only OOD.")
return None
def evaluate(segmentation_module, loader, cfg, gpu):
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
time_meter = AverageMeter()
segmentation_module.eval()
aurocs, auprs, fprs = [], [], []
pbar = tqdm(total=len(loader))
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
torch.cuda.synchronize()
tic = time.perf_counter()
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu)
# forward pass
scores_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)
tmp_scores = scores
if cfg.OOD.exclude_back:
tmp_scores = tmp_scores[:,1:]
mask = None
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
#for evaluating MSP
if cfg.OOD.ood == "msp":
conf, _ = torch.max(nn.functional.softmax(tmp_scores, dim=1),dim=1)
conf = as_numpy(conf.squeeze(0).cpu())
elif cfg.OOD.ood == "maxlogit":
conf, _ = torch.max(tmp_scores,dim=1)
conf = as_numpy(conf.squeeze(0).cpu())
elif cfg.OOD.ood == "background":
conf = tmp_scores[:, 0]
conf = as_numpy(conf.squeeze(0).cpu())
elif cfg.OOD.ood == "crf":
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral, create_pairwise_gaussian
ch,h,w = scores.squeeze(0).size()
d = dcrf.DenseCRF2D(h, w, ch) # width, height, nlabels
tmp_scores = as_numpy(nn.functional.softmax(tmp_scores, dim=1).squeeze(0))
tmp_scores = as_numpy(tmp_scores)
U = unary_from_softmax(tmp_scores)
d.setUnaryEnergy(U)
pairwise_energy = create_pairwise_bilateral(sdims=(10,10), schan=13, img=tmp_scores, chdim=0)
d.addPairwiseEnergy(pairwise_energy, compat=10)
# Run inference for 100 iterations
Q_unary = d.inference(100)
# The Q is now the approximate posterior, we can get a MAP estimate using argmax.
map_soln_unary = np.argmax(Q_unary, axis=0)
# Unfortunately, the DenseCRF flattens everything, so get it back into picture form.
map_soln_unary = map_soln_unary.reshape((h,w))
conf = np.max(Q_unary, axis=0).reshape((h,w))
elif cfg.OOD.ood == "crf-gauss":
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral, create_pairwise_gaussian
ch,h,w = scores.squeeze(0).size()
d = dcrf.DenseCRF2D(h, w, ch) # width, height, nlabels
tmp_scores = as_numpy(nn.functional.softmax(tmp_scores, dim=1).squeeze(0))
tmp_scores = as_numpy(tmp_scores)
U = unary_from_softmax(tmp_scores)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=3, compat=3) # `compat` is the "strength" of this potential.
# Run inference for 100 iterations
Q_unary = d.inference(100)
# The Q is now the approximate posterior, we can get a MAP estimate using argmax.
map_soln_unary = np.argmax(Q_unary, axis=0)
# Unfortunately, the DenseCRF flattens everything, so get it back into picture form.
map_soln_unary = map_soln_unary.reshape((h,w))
conf = np.max(Q_unary, axis=0).reshape((h,w))
res = eval_ood_measure(conf, seg_label, cfg, mask=mask)
if res is not None:
auroc, aupr, fpr = res
aurocs.append(auroc); auprs.append(aupr), fprs.append(fpr)
else:
pass
torch.cuda.synchronize()
time_meter.update(time.perf_counter() - tic)
# calculate accuracy
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
# visualization
if cfg.VAL.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
pred,
os.path.join(cfg.DIR, 'result')
)
pbar.update(1)
# summary
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {:.4f}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'
.format(iou.mean(), acc_meter.average()*100, time_meter.average()))
print("mean auroc = ", np.mean(aurocs), "mean aupr = ", np.mean(auprs), " mean fpr = ", np.mean(fprs))
def main(cfg, gpu):
torch.cuda.set_device(gpu)
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
# Dataset and Loader
dataset_val = ValDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_val,
cfg.DATASET)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=cfg.VAL.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=5,
drop_last=True)
segmentation_module.cuda()
# Main loop
evaluate(segmentation_module, loader_val, cfg, gpu)
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Validation"
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpu",
default=0,
help="gpu to use"
)
parser.add_argument(
"--ood",
help="Choices are [msp, crf-gauss, crf, maxlogit, background]",
default="msp",
)
parser.add_argument(
"--exclude_back",
help="Whether to exclude the background class.",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
ood = ["OOD.exclude_back", args.exclude_back, "OOD.ood", args.ood]
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(ood)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
# absolute paths of model weights
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
if not os.path.isdir(os.path.join(cfg.DIR, "result")):
os.makedirs(os.path.join(cfg.DIR, "result"))
main(cfg, args.gpu)
| 10,392 | 34.35034 | 116 | py |
fmriprep | fmriprep-master/wrapper/src/fmriprep_docker/__main__.py | #!/usr/bin/env python
"""
The *fMRIPrep* on Docker wrapper
This is a lightweight Python wrapper to run *fMRIPrep*.
Docker must be installed and running. This can be checked
running ::
docker info
Please acknowledge this work using the citation boilerplate that *fMRIPrep* includes
in the visual report generated for every subject processed.
For a more detailed description of the citation boilerplate and its relevance,
please check out the
`NiPreps documentation <https://www.nipreps.org/intro/transparency/#citation-boilerplates>`__.
Please report any feedback to our `GitHub repository <https://github.com/nipreps/fmriprep>`__.
"""
import os
import re
import subprocess
import sys
try:
from ._version import __version__
except ImportError:
__version__ = '0+unknown'
__copyright__ = 'Copyright 2023, Center for Reproducible Neuroscience, Stanford University'
__credits__ = [
'Craig Moodie',
'Ross Blair',
'Oscar Esteban',
'Chris Gorgolewski',
'Shoshana Berleant',
'Christopher J. Markiewicz',
'Russell A. Poldrack',
]
__bugreports__ = 'https://github.com/nipreps/fmriprep/issues'
MISSING = """
Image '{}' is missing
Would you like to download? [Y/n] """
PKG_PATH = '/opt/conda/envs/fmriprep/lib/python3.10/site-packages'
TF_TEMPLATES = (
'MNI152Lin',
'MNI152NLin2009cAsym',
'MNI152NLin6Asym',
'MNI152NLin6Sym',
'MNIInfant',
'MNIPediatricAsym',
'NKI',
'OASIS30ANTs',
'PNC',
'UNCInfant',
'fsLR',
'fsaverage',
'fsaverage5',
'fsaverage6',
)
NONSTANDARD_REFERENCES = ('anat', 'T1w', 'run', 'func', 'sbref', 'fsnative')
# Monkey-patch Py2 subprocess
if not hasattr(subprocess, 'DEVNULL'):
subprocess.DEVNULL = -3
if not hasattr(subprocess, 'run'):
# Reimplement minimal functionality for usage in this file
def _run(args, stdout=None, stderr=None):
from collections import namedtuple
result = namedtuple('CompletedProcess', 'stdout stderr returncode')
devnull = None
if subprocess.DEVNULL in (stdout, stderr):
devnull = open(os.devnull, 'r+')
if stdout == subprocess.DEVNULL:
stdout = devnull
if stderr == subprocess.DEVNULL:
stderr = devnull
proc = subprocess.Popen(args, stdout=stdout, stderr=stderr)
stdout, stderr = proc.communicate()
res = result(stdout, stderr, proc.returncode)
if devnull is not None:
devnull.close()
return res
subprocess.run = _run
# De-fang Python 2's input - we don't eval user input
try:
input = raw_input
except NameError:
pass
def check_docker():
"""Verify that docker is installed and the user has permission to
run docker images.
Returns
-------
-1 Docker can't be found
0 Docker found, but user can't connect to daemon
1 Test run OK
"""
try:
ret = subprocess.run(['docker', 'version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
from errno import ENOENT
if e.errno == ENOENT:
return -1
raise e
if ret.stderr.startswith(b"Cannot connect to the Docker daemon."):
return 0
return 1
def check_image(image):
"""Check whether image is present on local system"""
ret = subprocess.run(['docker', 'images', '-q', image], stdout=subprocess.PIPE)
return bool(ret.stdout)
def check_memory(image):
"""Check total memory from within a docker container"""
ret = subprocess.run(
['docker', 'run', '--rm', '--entrypoint=free', image, '-m'], stdout=subprocess.PIPE
)
if ret.returncode:
return -1
mem = [
line.decode().split()[1] for line in ret.stdout.splitlines() if line.startswith(b'Mem:')
][0]
return int(mem)
def merge_help(wrapper_help, target_help):
def _get_posargs(usage):
"""
Extract positional arguments from usage string.
This function can be used by both native fmriprep (`fmriprep -h`)
and the docker wrapper (`fmriprep-docker -h`).
"""
posargs = []
for targ in usage.split('\n')[-3:]:
line = targ.lstrip()
if line.startswith('usage'):
continue
if line[0].isalnum() or line[0] == "{":
posargs.append(line)
elif line[0] == '[' and (line[1].isalnum() or line[1] == "{"):
posargs.append(line)
return " ".join(posargs)
# Matches all flags with up to two nested square brackets
# I'm sorry.
opt_re = re.compile(r'(\[--?[\w-]+(?:[^\[\]]+(?:\[(?:[^\[\]]+(?:\[[^\[\]]+\])?)+\])?)?\])')
# Matches flag name only
flag_re = re.compile(r'\[--?([\w-]+)[ \]]')
# Normalize to Unix-style line breaks
w_help = wrapper_help.rstrip().replace('\r', '')
t_help = target_help.rstrip().replace('\r', '')
w_usage, w_details = w_help.split('\n\n', 1)
w_groups = w_details.split('\n\n')
t_usage, t_details = t_help.split('\n\n', 1)
t_groups = t_details.split('\n\n')
w_posargs = _get_posargs(w_usage)
t_posargs = _get_posargs(t_usage)
w_options = opt_re.findall(w_usage)
w_flags = sum(map(flag_re.findall, w_options), [])
t_options = opt_re.findall(t_usage)
t_flags = sum(map(flag_re.findall, t_options), [])
# The following code makes this assumption
assert w_flags[:2] == ['h', 'version']
assert w_posargs.replace(']', '').replace('[', '') == t_posargs
# Make sure we're not clobbering options we don't mean to
overlap = set(w_flags).intersection(t_flags)
expected_overlap = {
'anat-derivatives',
'bids-database-dir',
'bids-filter-file',
'fs-license-file',
'fs-subjects-dir',
'output-spaces',
'config-file',
'h',
'use-plugin',
'version',
'w',
}
assert overlap == expected_overlap, "Clobbering options: {}".format(
', '.join(overlap - expected_overlap)
)
sections = []
# Construct usage
start = w_usage[: w_usage.index(' [')]
indent = ' ' * len(start)
new_options = sum(
(
w_options[:2],
[opt for opt, flag in zip(t_options, t_flags) if flag not in overlap],
w_options[2:],
),
[],
)
opt_line_length = 79 - len(start)
length = 0
opt_lines = [start]
for opt in new_options:
opt = ' ' + opt
olen = len(opt)
if length + olen <= opt_line_length:
opt_lines[-1] += opt
length += olen
else:
opt_lines.append(indent + opt)
length = olen
opt_lines.append(indent + ' ' + t_posargs)
sections.append('\n'.join(opt_lines))
# Use target description and positional args
sections.extend(t_groups[:2])
for line in t_groups[2].split('\n')[1:]:
content = line.lstrip().split(',', 1)[0]
if content[1:] not in overlap:
w_groups[2] += '\n' + line
sections.append(w_groups[2])
# All remaining sections, show target then wrapper (skipping duplicates)
sections.extend(t_groups[3:] + w_groups[6:])
return '\n\n'.join(sections)
def is_in_directory(filepath, directory):
return os.path.realpath(filepath).startswith(os.path.realpath(directory) + os.sep)
def get_parser():
"""Defines the command line interface of the wrapper"""
import argparse
from functools import partial
class ToDict(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = {}
for kv in values:
k, v = kv.split("=")
d[k] = os.path.abspath(v)
setattr(namespace, self.dest, d)
def _is_file(path, parser):
"""Ensure a given path exists and it is a file."""
path = os.path.abspath(path)
if not os.path.isfile(path):
raise parser.error("Path should point to a file (or symlink of file): <%s>." % path)
return path
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False
)
IsFile = partial(_is_file, parser=parser)
# Standard FMRIPREP arguments
parser.add_argument('bids_dir', nargs='?', type=os.path.abspath, default='')
parser.add_argument('output_dir', nargs='?', type=os.path.abspath, default='')
parser.add_argument(
'analysis_level', nargs='?', choices=['participant'], default='participant'
)
parser.add_argument(
'-h', '--help', action='store_true', help="show this help message and exit"
)
parser.add_argument(
'--version', action='store_true', help="show program's version number and exit"
)
# Allow alternative images (semi-developer)
parser.add_argument(
'-i',
'--image',
metavar='IMG',
type=str,
default='nipreps/fmriprep:{}'.format(__version__),
help='image name',
)
# Options for mapping files and directories into container
# Update `expected_overlap` variable in merge_help() when adding to this
g_wrap = parser.add_argument_group(
'Wrapper options',
'Standard options that require mapping files into the container; see fmriprep '
'usage for complete descriptions',
)
g_wrap.add_argument(
'-w',
'--work-dir',
action='store',
type=os.path.abspath,
)
g_wrap.add_argument(
'--output-spaces',
nargs="*",
)
g_wrap.add_argument(
'--fs-license-file',
metavar='PATH',
type=IsFile,
default=os.getenv('FS_LICENSE', None),
)
g_wrap.add_argument(
'--fs-subjects-dir',
metavar='PATH',
type=os.path.abspath,
)
g_wrap.add_argument(
'--config-file',
metavar='PATH',
type=os.path.abspath,
)
g_wrap.add_argument(
'--anat-derivatives',
metavar='PATH',
type=os.path.abspath,
)
g_wrap.add_argument(
'--use-plugin',
metavar='PATH',
action='store',
default=None,
type=os.path.abspath,
)
g_wrap.add_argument(
'--bids-database-dir',
metavar='PATH',
type=os.path.abspath,
)
g_wrap.add_argument(
'--bids-filter-file',
metavar='PATH',
type=os.path.abspath,
)
# Developer patch/shell options
g_dev = parser.add_argument_group(
'Developer options', 'Tools for testing and debugging FMRIPREP'
)
g_dev.add_argument(
'--patch',
nargs="+",
metavar="PACKAGE=PATH",
action=ToDict,
help='Sequence of PACKAGE=PATH specifications to patch a Python package into the '
'container Python environment.',
)
g_dev.add_argument(
'--shell', action='store_true', help='Open shell in image instead of running FMRIPREP'
)
g_dev.add_argument(
'--config',
metavar='PATH',
action='store',
type=os.path.abspath,
help='Use custom nipype.cfg file',
)
g_dev.add_argument(
'-e',
'--env',
action='append',
nargs=2,
metavar=('ENV_VAR', 'value'),
help='Set custom environment variables within container',
)
g_dev.add_argument(
'-u',
'--user',
action='store',
help='Run container as a given user/uid. Additionally, group/gid can be'
'assigned, (i.e., --user <UID>:<GID>)',
)
g_dev.add_argument(
'--network',
action='store',
help='Run container with a different network driver '
'("none" to simulate no internet connection)',
)
g_dev.add_argument('--no-tty', action='store_true', help='Run docker without TTY flag -it')
return parser
def main():
"""Entry point"""
parser = get_parser()
# Capture additional arguments to pass inside container
opts, unknown_args = parser.parse_known_args()
# Set help if no directories set
if (opts.bids_dir, opts.output_dir, opts.version) == ('', '', False):
opts.help = True
# Stop if no docker / docker fails to run
check = check_docker()
if check < 1:
if opts.version:
print('fmriprep wrapper {!s}'.format(__version__))
if opts.help:
parser.print_help()
if check == -1:
print("fmriprep: Could not find docker command... Is it installed?")
else:
print("fmriprep: Make sure you have permission to run 'docker'")
return 1
# For --help or --version, ask before downloading an image
if not check_image(opts.image):
resp = 'Y'
if opts.version:
print('fmriprep wrapper {!s}'.format(__version__))
if opts.help:
parser.print_help()
if opts.version or opts.help:
try:
resp = input(MISSING.format(opts.image))
except KeyboardInterrupt:
print()
return 1
if resp not in ('y', 'Y', ''):
return 0
print('Downloading. This may take a while...')
# Warn on low memory allocation
mem_total = check_memory(opts.image)
if mem_total == -1:
print(
'Could not detect memory capacity of Docker container.\n'
'Do you have permission to run docker?'
)
return 1
if not (opts.help or opts.version or '--reports-only' in unknown_args) and mem_total < 8000:
print(
'Warning: <8GB of RAM is available within your Docker '
'environment.\nSome parts of fMRIPrep may fail to complete.'
)
if '--mem_mb' not in unknown_args:
resp = 'N'
try:
resp = input('Continue anyway? [y/N]')
except KeyboardInterrupt:
print()
return 1
if resp not in ('y', 'Y', ''):
return 0
ret = subprocess.run(
['docker', 'version', '--format', "{{.Server.Version}}"], stdout=subprocess.PIPE
)
docker_version = ret.stdout.decode('ascii').strip()
command = ['docker', 'run', '--rm', '-e', 'DOCKER_VERSION_8395080871=%s' % docker_version]
if not opts.no_tty:
if opts.help:
# TTY can corrupt stdout
command.append('-i')
else:
command.append('-it')
# Patch working repositories into installed package directories
if opts.patch:
for pkg, repo_path in opts.patch.items():
command.extend(['-v', '{}:{}/{}:ro'.format(repo_path, PKG_PATH, pkg)])
if opts.env:
for envvar in opts.env:
command.extend(['-e', '%s=%s' % tuple(envvar)])
if opts.user:
command.extend(['-u', opts.user])
if opts.fs_license_file:
command.extend(['-v', '{}:/opt/freesurfer/license.txt:ro'.format(opts.fs_license_file)])
main_args = []
if opts.bids_dir:
command.extend(['-v', ':'.join((opts.bids_dir, '/data', 'ro'))])
main_args.append('/data')
if opts.output_dir:
if not os.path.exists(opts.output_dir):
# create it before docker does
os.makedirs(opts.output_dir)
command.extend(['-v', ':'.join((opts.output_dir, '/out'))])
main_args.append('/out')
main_args.append(opts.analysis_level)
if opts.fs_subjects_dir:
command.extend(['-v', '{}:/opt/subjects'.format(opts.fs_subjects_dir)])
unknown_args.extend(['--fs-subjects-dir', '/opt/subjects'])
if opts.config_file:
command.extend(['-v', '{}:/tmp/config.toml'.format(opts.config_file)])
unknown_args.extend(['--config-file', '/tmp/config.toml'])
if opts.anat_derivatives:
command.extend(['-v', '{}:/opt/smriprep/subjects'.format(opts.anat_derivatives)])
unknown_args.extend(['--anat-derivatives', '/opt/smriprep/subjects'])
if opts.work_dir:
command.extend(['-v', ':'.join((opts.work_dir, '/scratch'))])
unknown_args.extend(['-w', '/scratch'])
# Check that work_dir is not a child of bids_dir
if opts.work_dir and opts.bids_dir:
if is_in_directory(opts.work_dir, opts.bids_dir):
print(
'The selected working directory is a subdirectory of the input BIDS folder. '
'Please modify the output path.'
)
return 1
if opts.config:
command.extend(['-v', ':'.join((opts.config, '/home/fmriprep/.nipype/nipype.cfg', 'ro'))])
if opts.use_plugin:
command.extend(['-v', ':'.join((opts.use_plugin, '/tmp/plugin.yml', 'ro'))])
unknown_args.extend(['--use-plugin', '/tmp/plugin.yml'])
if opts.bids_database_dir:
command.extend(['-v', ':'.join((opts.bids_database_dir, '/tmp/bids_db'))])
unknown_args.extend(['--bids-database-dir', '/tmp/bids_db'])
if opts.bids_filter_file:
command.extend(['-v', ':'.join((opts.bids_filter_file, '/tmp/bids_filter.json'))])
unknown_args.extend(['--bids-filter-file', '/tmp/bids_filter.json'])
if opts.output_spaces:
spaces = []
for space in opts.output_spaces:
if space.split(':')[0] not in (TF_TEMPLATES + NONSTANDARD_REFERENCES):
tpl = os.path.basename(space)
if not tpl.startswith('tpl-'):
raise RuntimeError("Custom template %s requires a `tpl-` prefix" % tpl)
target = '/home/fmriprep/.cache/templateflow/' + tpl
command.extend(['-v', ':'.join((os.path.abspath(space), target, 'ro'))])
spaces.append(tpl[4:])
else:
spaces.append(space)
unknown_args.extend(['--output-spaces'] + spaces)
if opts.shell:
command.append('--entrypoint=bash')
if opts.network:
command.append('--network=' + opts.network)
command.append(opts.image)
# Override help and version to describe underlying program
# Respects '-i' flag, so will retrieve information from any image
if opts.help:
command.append('-h')
targethelp = subprocess.check_output(command).decode()
print(merge_help(parser.format_help(), targethelp))
return 0
elif opts.version:
# Get version to be run and exit
command.append('--version')
ret = subprocess.run(command)
return ret.returncode
if not opts.shell:
command.extend(main_args)
command.extend(unknown_args)
print("RUNNING: " + ' '.join(command))
ret = subprocess.run(command)
if ret.returncode:
print("fMRIPrep: Please report errors to {}".format(__bugreports__))
return ret.returncode
if __name__ == '__main__':
if '__main__.py' in sys.argv[0]:
from . import __name__ as module
sys.argv[0] = '%s -m %s' % (sys.executable, module)
sys.exit(main())
| 18,982 | 30.170772 | 99 | py |
fmriprep | fmriprep-master/wrapper/src/fmriprep_docker/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/scripts/fetch_templates.py | #!/usr/bin/env python
"""
Standalone script to facilitate caching of required TemplateFlow templates.
To download and view how to use this script, run the following commands inside a terminal:
1. wget https://raw.githubusercontent.com/nipreps/fmriprep/master/scripts/fetch_templates.py
2. python fetch_templates.py -h
"""
import argparse
import os
def fetch_MNI2009():
"""
Expected templates:
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_T1w.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-02_T1w.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_desc-brain_mask.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-02_desc-brain_mask.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_desc-carpet_dseg.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz
"""
template = "MNI152NLin2009cAsym"
tf.get(template, resolution=(1, 2), desc=None, suffix="T1w")
tf.get(template, resolution=(1, 2), desc="brain", suffix="mask")
tf.get(template, resolution=1, atlas=None, desc="carpet", suffix="dseg")
tf.get(template, resolution=2, desc="fMRIPrep", suffix="boldref")
tf.get(template, resolution=1, label="brain", suffix="probseg")
def fetch_MNI6():
"""
Expected templates:
tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz
tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz
tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz
tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz
tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_atlas-HCP_dseg.nii.gz
"""
template = "MNI152NLin6Asym"
tf.get(template, resolution=(1, 2), desc=None, suffix="T1w")
tf.get(template, resolution=(1, 2), desc="brain", suffix="mask")
# CIFTI
tf.get(template, resolution=2, atlas="HCP", suffix="dseg")
def fetch_OASIS():
"""
Expected templates:
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_T1w.nii.gz
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_label-WM_probseg.nii.gz
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_label-BS_probseg.nii.gz
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_label-brain_probseg.nii.gz
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-brain_mask.nii.gz
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-BrainCerebellumExtraction_mask.nii.gz
"""
template = "OASIS30ANTs"
tf.get(template, resolution=1, desc=None, label=None, suffix="T1w")
tf.get(template, resolution=1, label="WM", suffix="probseg")
tf.get(template, resolution=1, label="BS", suffix="probseg")
tf.get(template, resolution=1, label="brain", suffix="probseg")
tf.get(template, resolution=1, label="brain", suffix="mask")
tf.get(template, resolution=1, desc="BrainCerebellumExtraction", suffix="mask")
def fetch_fsaverage():
"""
Expected templates:
tpl-fsaverage/tpl-fsaverage_hemi-L_den-164k_desc-std_sphere.surf.gii
tpl-fsaverage/tpl-fsaverage_hemi-R_den-164k_desc-std_sphere.surf.gii
tpl-fsaverage/tpl-fsaverage_hemi-L_den-164k_desc-vaavg_midthickness.shape.gii
tpl-fsaverage/tpl-fsaverage_hemi-R_den-164k_desc-vaavg_midthickness.shape.gii
tpl-fsaverage/tpl-fsaverage_hemi-L_den-164k_midthickness.surf.gii
tpl-fsaverage/tpl-fsaverage_hemi-R_den-164k_midthickness.surf.gii
"""
template = "fsaverage"
tf.get(template, density="164k", desc="std", suffix="sphere")
tf.get(template, density="164k", suffix="midthickness")
def fetch_fsLR():
"""
Expected templates:
tpl-fsLR/tpl-fsLR_hemi-L_den-32k_desc-nomedialwall_dparc.label.gii
tpl-fsLR/tpl-fsLR_hemi-L_den-32k_desc-vaavg_midthickness.shape.gii
tpl-fsLR/tpl-fsLR_hemi-L_den-32k_sphere.surf.gii
tpl-fsLR/tpl-fsLR_hemi-R_den-32k_desc-nomedialwall_dparc.label.gii
tpl-fsLR/tpl-fsLR_hemi-R_den-32k_desc-vaavg_midthickness.shape.gii
tpl-fsLR/tpl-fsLR_hemi-R_den-32k_sphere.surf.gii
tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-L_den-32k_sphere.surf.gii
tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-R_den-32k_sphere.surf.gii
"""
tf.get("fsLR", density="32k")
def fetch_all():
fetch_MNI2009()
fetch_MNI6()
fetch_OASIS()
fetch_fsaverage()
fetch_fsLR()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Helper script for pre-caching required templates to run fMRIPrep",
)
parser.add_argument(
"--tf-dir",
type=os.path.abspath,
help="Directory to save templates in. If not provided, templates will be saved to"
" `${HOME}/.cache/templateflow`.",
)
opts = parser.parse_args()
# set envvar (if necessary) prior to templateflow import
if opts.tf_dir is not None:
os.environ["TEMPLATEFLOW_HOME"] = opts.tf_dir
import templateflow.api as tf
fetch_all()
| 4,945 | 35.910448 | 92 | py |
fmriprep | fmriprep-master/scripts/generate_reference_mask.py | #!/usr/bin/env python
import sys
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.func.util import init_bold_reference_wf
def sink_mask_file(in_file, orig_file, out_dir):
import os
from nipype.utils.filemanip import fname_presuffix, copyfile
os.makedirs(out_dir, exist_ok=True)
out_file = fname_presuffix(orig_file, suffix='_mask', newpath=out_dir)
copyfile(in_file, out_file, copy=True, use_hardlink=True)
return out_file
def init_main_wf(bold_file, out_dir, base_dir=None, name='main_wf'):
wf = init_bold_reference_wf(omp_nthreads=4,
name=name)
wf.base_dir = base_dir
wf.inputs.inputnode.bold_file = bold_file
sink = pe.Node(niu.Function(function=sink_mask_file),
name='sink')
sink.inputs.out_dir = out_dir
sink.inputs.orig_file = bold_file
wf.connect([
(wf.get_node('outputnode'), sink, [('bold_mask', 'in_file')]),
])
return wf
def main():
main_wf = init_main_wf(sys.argv[1], sys.argv[2])
main_wf.run(plugin='MultiProc')
if __name__ == '__main__':
main()
| 1,153 | 27.85 | 74 | py |
fmriprep | fmriprep-master/docs/conf.py | # fmriprep documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 09:04:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
from sphinx import __version__ as sphinxversion
from packaging import version as pver # Avoid distutils.LooseVersion which is deprecated
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath("sphinxext"))
sys.path.insert(0, os.path.abspath("../wrapper"))
from github_link import make_linkcode_resolve # noqa: E402
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.linkcode",
"sphinx.ext.napoleon",
"sphinxarg.ext", # argparse extension
"nipype.sphinxext.plot_workflow",
]
# Mock modules in autodoc:
autodoc_mock_imports = [
"numpy",
"nitime",
"matplotlib",
]
if pver.parse(sphinxversion) >= pver.parse("1.7.0"):
autodoc_mock_imports += [
"pandas",
"nilearn",
"seaborn",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Accept custom section names to be parsed for numpy-style docstrings
# of parameters.
# Requires pinning sphinxcontrib-napoleon to a specific commit while
# https://github.com/sphinx-contrib/napoleon/pull/10 is merged.
napoleon_use_param = False
napoleon_custom_sections = [
("Inputs", "Parameters"),
("Outputs", "Parameters"),
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "fmriprep"
author = "The fMRIPrep developers"
copyright = f"2016-{datetime.now().year}, {author}"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "version"
# The full version, including alpha/beta/rc tags.
release = "version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'fmriprep vversion'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g., ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "fmriprepdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "fmriprep.tex", "fMRIprep Documentation",
author,
"manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "fmriprep", "fmriprep Documentation",
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "fmriprep", "fMRIprep Documentation",
author, "fmriprep", "One line description of project.",
"Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve("fmriprep",
"https://github.com/nipreps/"
"fmriprep/blob/{revision}/"
"{package}/{path}#L{lineno}")
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/stable/", None),
"bids": ("https://bids-standard.github.io/pybids/", None),
"nibabel": ("https://nipy.org/nibabel/", None),
"nipype": ("https://nipype.readthedocs.io/en/latest/", None),
"niworkflows": ("https://www.nipreps.org/niworkflows/", None),
"sdcflows": ("https://www.nipreps.org/sdcflows/", None),
"smriprep": ("https://www.nipreps.org/smriprep/", None),
"templateflow": ("https://www.templateflow.org/python-client", None),
}
suppress_warnings = ["image.nonlocal_uri"]
def setup(app):
app.add_css_file("theme_overrides.css")
# We need this for the boilerplate script
app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js")
| 11,942 | 32.642254 | 89 | py |
fmriprep | fmriprep-master/docs/sphinxext/github_link.py | """
This script comes from scikit-learn:
https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/github_link.py
"""
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode('utf-8')
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn,
start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ''
return url_fmt.format(revision=revision, package=package,
path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt)
| 2,803 | 30.505618 | 85 | py |
fmriprep | fmriprep-master/.maint/update_authors.py | #!/usr/bin/env python3
"""Update and sort the creators list of the zenodo record."""
import sys
from pathlib import Path
import json
import click
from fuzzywuzzy import fuzz, process
def read_md_table(md_text):
"""
Extract the first table found in a markdown document as a Python dict.
Examples
--------
>>> read_md_table('''
... # Some text
...
... More text
...
... | **Header1** | **Header2** |
... | --- | --- |
... | val1 | val2 |
... | | val4 |
...
... | **Header3** | **Header4** |
... | --- | --- |
... | val1 | val2 |
... | | val4 |
... ''')
[{'header1': 'val1', 'header2': 'val2'}, {'header2': 'val4'}]
"""
prev = None
keys = None
retval = []
for line in md_text.splitlines():
if line.strip().startswith("| --- |"):
keys = (
k.replace("*", "").strip()
for k in prev.split("|")
)
keys = [k.lower() for k in keys if k]
continue
elif not keys:
prev = line
continue
if not line or not line.strip().startswith("|"):
break
values = [v.strip() or None for v in line.split("|")][1:-1]
retval.append({k: v for k, v in zip(keys, values) if v})
return retval
def sort_contributors(entries, git_lines, exclude=None, last=None):
"""Return a list of author dictionaries, ordered by contribution."""
last = last or []
sorted_authors = sorted(entries, key=lambda i: i["name"])
first_last = [
" ".join(val["name"].split(",")[::-1]).strip() for val in sorted_authors
]
first_last_excl = [
" ".join(val["name"].split(",")[::-1]).strip() for val in exclude or []
]
unmatched = []
author_matches = []
for ele in git_lines:
matches = process.extract(
ele, first_last, scorer=fuzz.token_sort_ratio, limit=2
)
# matches is a list [('First match', % Match), ('Second match', % Match)]
if matches[0][1] > 80:
val = sorted_authors[first_last.index(matches[0][0])]
else:
# skip unmatched names
if ele not in first_last_excl:
unmatched.append(ele)
continue
if val not in author_matches:
author_matches.append(val)
names = {" ".join(val["name"].split(",")[::-1]).strip() for val in author_matches}
for missing_name in first_last:
if missing_name not in names:
missing = sorted_authors[first_last.index(missing_name)]
author_matches.append(missing)
position_matches = []
for i, item in enumerate(author_matches):
pos = item.pop("position", None)
if pos is not None:
position_matches.append((i, int(pos)))
for i, pos in position_matches:
if pos < 0:
pos += len(author_matches) + 1
author_matches.insert(pos, author_matches.pop(i))
return author_matches, unmatched
def get_git_lines(fname="line-contributors.txt"):
"""Run git-line-summary."""
import shutil
import subprocess as sp
contrib_file = Path(fname)
lines = []
if contrib_file.exists():
print("WARNING: Reusing existing line-contributors.txt file.", file=sys.stderr)
lines = contrib_file.read_text().splitlines()
git_line_summary_path = shutil.which("git-line-summary")
if not lines and git_line_summary_path:
print("Running git-line-summary on repo")
lines = sp.check_output([git_line_summary_path]).decode().splitlines()
lines = [line for line in lines if "Not Committed Yet" not in line]
contrib_file.write_text("\n".join(lines))
if not lines:
raise RuntimeError(
"""\
Could not find line-contributors from git repository.%s"""
% """ \
git-line-summary not found, please install git-extras. """
* (git_line_summary_path is None)
)
return [" ".join(line.strip().split()[1:-1]) for line in lines if "%" in line]
def _namelast(inlist):
retval = []
for i in inlist:
i["name"] = (f"{i.pop('name', '')} {i.pop('lastname', '')}").strip()
retval.append(i)
return retval
@click.group()
def cli():
"""Generate authorship boilerplates."""
pass
@cli.command()
@click.option("-z", "--zenodo-file", type=click.Path(exists=True), default=".zenodo.json")
@click.option("-m", "--maintainers", type=click.Path(exists=True), default=".maint/MAINTAINERS.md")
@click.option("-c", "--contributors", type=click.Path(exists=True),
default=".maint/CONTRIBUTORS.md")
@click.option("--pi", type=click.Path(exists=True), default=".maint/PIs.md")
@click.option("-f", "--former-file", type=click.Path(exists=True), default=".maint/FORMER.md")
def zenodo(
zenodo_file,
maintainers,
contributors,
pi,
former_file,
):
"""Generate a new Zenodo payload file."""
data = get_git_lines()
zenodo = json.loads(Path(zenodo_file).read_text())
former = _namelast(read_md_table(Path(former_file).read_text()))
zen_creators, miss_creators = sort_contributors(
_namelast(read_md_table(Path(maintainers).read_text())),
data,
exclude=former,
)
zen_contributors, miss_contributors = sort_contributors(
_namelast(read_md_table(Path(contributors).read_text())),
data,
exclude=former
)
zen_pi = _namelast(
sorted(
read_md_table(Path(pi).read_text()),
key=lambda v: (int(v.get("position", -1)), v.get("lastname"))
)
)
zenodo["creators"] = zen_creators
zenodo["contributors"] = zen_contributors + zen_pi
misses = set(miss_creators).intersection(miss_contributors)
if misses:
print(
"Some people made commits, but are missing in .maint/ "
f"files: {', '.join(misses)}",
file=sys.stderr,
)
# Remove position
for creator in zenodo["creators"]:
creator.pop("position", None)
creator.pop("handle", None)
if isinstance(creator["affiliation"], list):
creator["affiliation"] = creator["affiliation"][0]
for creator in zenodo["contributors"]:
creator.pop("handle", None)
creator["type"] = "Researcher"
creator.pop("position", None)
if isinstance(creator["affiliation"], list):
creator["affiliation"] = creator["affiliation"][0]
Path(zenodo_file).write_text(
"%s\n" % json.dumps(zenodo, indent=2)
)
@cli.command()
@click.option("-m", "--maintainers", type=click.Path(exists=True), default=".maint/MAINTAINERS.md")
@click.option("-c", "--contributors", type=click.Path(exists=True),
default=".maint/CONTRIBUTORS.md")
@click.option("--pi", type=click.Path(exists=True), default=".maint/PIs.md")
@click.option("-f", "--former-file", type=click.Path(exists=True), default=".maint/FORMER.md")
def publication(
maintainers,
contributors,
pi,
former_file,
):
"""Generate the list of authors and affiliations for papers."""
members = (
_namelast(read_md_table(Path(maintainers).read_text()))
+ _namelast(read_md_table(Path(contributors).read_text()))
)
hits, misses = sort_contributors(
members,
get_git_lines(),
exclude=_namelast(read_md_table(Path(former_file).read_text())),
)
pi_hits = _namelast(
sorted(
read_md_table(Path(pi).read_text()),
key=lambda v: (int(v.get("position", -1)), v.get("lastname"))
)
)
pi_names = [pi["name"] for pi in pi_hits]
hits = [
hit for hit in hits
if hit["name"] not in pi_names
] + pi_hits
def _aslist(value):
if isinstance(value, (list, tuple)):
return value
return [value]
# Remove position
affiliations = []
for item in hits:
item.pop("position", None)
for a in _aslist(item.get("affiliation", "Unaffiliated")):
if a not in affiliations:
affiliations.append(a)
aff_indexes = [
", ".join(
[
"%d" % (affiliations.index(a) + 1)
for a in _aslist(author.get("affiliation", "Unaffiliated"))
]
)
for author in hits
]
if misses:
print(
"Some people made commits, but are missing in .maint/ "
f"files: {', '.join(misses)}",
file=sys.stderr,
)
print("Authors (%d):" % len(hits))
print(
"%s."
% "; ".join(
[
"%s \\ :sup:`%s`\\ " % (i["name"], idx)
for i, idx in zip(hits, aff_indexes)
]
)
)
print(
"\n\nAffiliations:\n%s"
% "\n".join(
["{0: >2}. {1}".format(i + 1, a) for i, a in enumerate(affiliations)]
)
)
if __name__ == "__main__":
""" Install entry-point """
cli()
| 9,037 | 28.061093 | 99 | py |
fmriprep | fmriprep-master/.maint/update_zenodo.py | #!/usr/bin/env python3
"""Update and sort the creators list of the zenodo record."""
import sys
from pathlib import Path
import json
from fuzzywuzzy import fuzz, process
# These ORCIDs should go last
CREATORS_LAST = ['Poldrack, Russell A.', 'Gorgolewski, Krzysztof J.']
CONTRIBUTORS_LAST = ['Ghosh, Satrajit S.']
def sort_contributors(entries, git_lines, exclude=None, last=None):
"""Return a list of author dictionaries, ordered by contribution."""
last = last or []
sorted_authors = sorted(entries, key=lambda i: i['name'])
first_last = [' '.join(val['name'].split(',')[::-1]).strip()
for val in sorted_authors]
first_last_excl = [' '.join(val['name'].split(',')[::-1]).strip()
for val in exclude or []]
unmatched = []
author_matches = []
position = 1
for ele in git_lines:
matches = process.extract(ele, first_last, scorer=fuzz.token_sort_ratio,
limit=2)
# matches is a list [('First match', % Match), ('Second match', % Match)]
if matches[0][1] > 80:
val = sorted_authors[first_last.index(matches[0][0])]
else:
# skip unmatched names
if ele not in first_last_excl:
unmatched.append(ele)
continue
if val not in author_matches:
val['position'] = position
author_matches.append(val)
position += 1
names = {' '.join(val['name'].split(',')[::-1]).strip() for val in author_matches}
for missing_name in first_last:
if missing_name not in names:
missing = sorted_authors[first_last.index(missing_name)]
missing['position'] = position
author_matches.append(missing)
position += 1
all_names = [val['name'] for val in author_matches]
for last_author in last:
author_matches[all_names.index(last_author)]['position'] = position
position += 1
author_matches = sorted(author_matches, key=lambda k: k['position'])
return author_matches, unmatched
def get_git_lines(fname='line-contributors.txt'):
"""Run git-line-summary."""
import shutil
import subprocess as sp
contrib_file = Path(fname)
lines = []
if contrib_file.exists():
print('WARNING: Reusing existing line-contributors.txt file.', file=sys.stderr)
lines = contrib_file.read_text().splitlines()
cmd = [shutil.which('git-line-summary')]
if cmd == [None]:
cmd = [shutil.which('git-summary'), "--line"]
if not lines and cmd[0]:
print(f"Running {' '.join(cmd)!r} on repo")
lines = sp.check_output(cmd).decode().splitlines()
lines = [line for line in lines if "Not Committed Yet" not in line]
contrib_file.write_text('\n'.join(lines))
if not lines:
raise RuntimeError("""\
Could not find line-contributors from git repository.%s""" % """ \
git-(line-)summary not found, please install git-extras. """ * (cmd[0] is None))
return [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line]
def loads_table_from_markdown(s):
"""Read the first table from a Markdown text."""
table = []
header = None
for line in s.splitlines():
if line.startswith("|"):
if not header:
# read header and strip bold
header = [item.strip("* ") for item in line.split('|')[1:-1]]
else:
values = [item.strip() for item in line.split('|')[1:-1]]
if any(any(c != "-" for c in item) for item in values):
table.append(dict(zip(header, values)))
elif header:
# we have already seen a table, we're past the end of that table
break
return table
def loads_contributors(s):
"""Reformat contributors read from the Markdown table."""
return [
{
"affiliation": contributor["Affiliation"],
"name": "{}, {}".format(contributor["Lastname"], contributor["Name"]),
"orcid": contributor["ORCID"],
} for contributor in loads_table_from_markdown(s)
]
if __name__ == '__main__':
data = get_git_lines()
zenodo_file = Path('.zenodo.json')
zenodo = json.loads(zenodo_file.read_text())
creators = json.loads(Path('.maint/developers.json').read_text())
zen_creators, miss_creators = sort_contributors(
creators, data,
exclude=json.loads(Path('.maint/former.json').read_text()),
last=CREATORS_LAST)
contributors = loads_contributors(Path('.maint/CONTRIBUTORS.md').read_text())
zen_contributors, miss_contributors = sort_contributors(
contributors, data,
exclude=json.loads(Path('.maint/former.json').read_text()),
last=CONTRIBUTORS_LAST)
zenodo['creators'] = zen_creators
zenodo['contributors'] = zen_contributors
print("Some people made commits, but are missing in .maint/ "
"files: %s." % ', '.join(set(miss_creators).intersection(miss_contributors)),
file=sys.stderr)
# Remove position
for creator in zenodo['creators']:
del creator['position']
if isinstance(creator['affiliation'], list):
creator['affiliation'] = creator['affiliation'][0]
for creator in zenodo['contributors']:
creator['type'] = 'Researcher'
del creator['position']
if isinstance(creator['affiliation'], list):
creator['affiliation'] = creator['affiliation'][0]
zenodo_file.write_text('%s\n' % json.dumps(zenodo, indent=2, ensure_ascii=False))
| 5,620 | 35.5 | 87 | py |
fmriprep | fmriprep-master/.maint/paper_author_list.py | #!/usr/bin/env python3
"""Generate an author list for a new paper or abstract."""
import sys
from pathlib import Path
import json
from update_zenodo import get_git_lines, sort_contributors
# These authors should go last
AUTHORS_LAST = ['Gorgolewski, Krzysztof J.', 'Poldrack, Russell A.', 'Esteban, Oscar']
def _aslist(inlist):
if not isinstance(inlist, list):
return [inlist]
return inlist
if __name__ == '__main__':
devs = json.loads(Path('.maint/developers.json').read_text())
contribs = json.loads(Path('.maint/contributors.json').read_text())
author_matches, unmatched = sort_contributors(
devs + contribs, get_git_lines(),
exclude=json.loads(Path('.maint/former.json').read_text()),
last=AUTHORS_LAST)
# Remove position
affiliations = []
for item in author_matches:
del item['position']
for a in _aslist(item.get('affiliation', 'Unaffiliated')):
if a not in affiliations:
affiliations.append(a)
aff_indexes = [', '.join(['%d' % (affiliations.index(a) + 1)
for a in _aslist(author.get('affiliation', 'Unaffiliated'))])
for author in author_matches]
print("Some people made commits, but are missing in .maint/ "
"files: %s." % ', '.join(unmatched), file=sys.stderr)
print('Authors (%d):' % len(author_matches))
print("%s." % '; '.join([
'%s \\ :sup:`%s`\\ ' % (i['name'], idx)
for i, idx in zip(author_matches, aff_indexes)
]))
print('\n\nAffiliations:\n%s' % '\n'.join(['{0: >2}. {1}'.format(i + 1, a)
for i, a in enumerate(affiliations)]))
| 1,711 | 33.24 | 91 | py |
fmriprep | fmriprep-master/fmriprep/__main__.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
from .cli.run import main
if __name__ == '__main__':
import sys
from . import __name__ as module
# `python -m <module>` typically displays the command as __main__.py
if '__main__.py' in sys.argv[0]:
sys.argv[0] = f'{sys.executable} -m {module}'
main()
| 1,162 | 33.205882 | 74 | py |
fmriprep | fmriprep-master/fmriprep/conftest.py | import json
import os
from pathlib import Path
from shutil import copytree
import nibabel as nb
import numpy as np
import pytest
try:
from importlib.resources import files as ir_files
except ImportError: # PY<3.9
from importlib_resources import files as ir_files
os.environ['NO_ET'] = '1'
def copytree_or_skip(source, target):
data_dir = ir_files('fmriprep') / source
if not data_dir.exists():
pytest.skip(f"Cannot chdir into {data_dir!r}. Probably in a zipped distribution.")
try:
copytree(data_dir, target / data_dir.name)
except Exception:
pytest.skip(f"Cannot copy {data_dir!r} into {target / data_dir.name}. Probably in a zip.")
@pytest.fixture(autouse=True)
def populate_namespace(doctest_namespace, tmp_path):
doctest_namespace['copytree_or_skip'] = copytree_or_skip
doctest_namespace['testdir'] = tmp_path
@pytest.fixture
def minimal_bids(tmp_path):
bids = tmp_path / "bids"
bids.mkdir()
Path.write_text(
bids / "dataset_description.json", json.dumps({"Name": "Test DS", "BIDSVersion": "1.8.0"})
)
T1w = bids / 'sub-01' / 'anat' / 'sub-01_T1w.nii.gz'
T1w.parent.mkdir(parents=True)
nb.Nifti1Image(np.zeros((5, 5, 5)), np.eye(4)).to_filename(T1w)
return bids
| 1,273 | 26.695652 | 98 | py |
fmriprep | fmriprep-master/fmriprep/_warnings.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Manipulate Python warnings."""
import logging
import warnings
_wlog = logging.getLogger("py.warnings")
_wlog.addHandler(logging.NullHandler())
def _warn(message, category=None, stacklevel=1, source=None):
"""Redefine the warning function."""
if category is not None:
category = type(category).__name__
category = category.replace("type", "WARNING")
logging.getLogger("py.warnings").warning(f"{category or 'WARNING'}: {message}")
def _showwarning(message, category, filename, lineno, file=None, line=None):
_warn(message, category=category)
warnings.warn = _warn
warnings.showwarning = _showwarning
| 1,520 | 32.065217 | 83 | py |
fmriprep | fmriprep-master/fmriprep/config.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
r"""
A Python module to maintain unique, run-wide *fMRIPrep* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<fmriprep_dir>/sub-<participant_id>/log/<run_unique_id>/fmriprep.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~fmriprep.config.to_filename` function to allow writing out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../fmriprep/data/tests/config.toml
:language: toml
:name: fmriprep.toml
:caption: **Example file representation of fMRIPrep settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~fmriprep.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from fmriprep import config
config_file = config.execution.work_dir / '.fmriprep.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
from multiprocessing import set_start_method
# Disable NiPype etelemetry always
_disable_et = bool(os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
CONFIG_FILENAME = "fmriprep.toml"
try:
set_start_method("forkserver")
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import random
import sys
from pathlib import Path
from time import strftime
from uuid import uuid4
from nipype import __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
(
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("FMRIPREP_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
)
):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("FMRIPREP_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
# https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765
from ._warnings import logging
else:
import logging
logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
# Ping NiPype eTelemetry once if env var was not set
# workers on the pool will have the env variable set from the master process
if not _disable_et:
# Just get so analytics track one hit
from contextlib import suppress
from requests import ConnectionError, ReadTimeout
from requests import get as _get_url
with suppress((ConnectionError, ReadTimeout)):
_get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05)
# Execution environment
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv("IS_DOCKER_8395080871"):
_exec_env = "singularity"
_cgroup = Path("/proc/1/cgroup")
if _cgroup.exists() and "docker" in _cgroup.read_text():
_docker_ver = os.getenv("DOCKER_VERSION_8395080871")
_exec_env = "fmriprep-docker" if _docker_ver else "docker"
del _cgroup
_fs_license = os.getenv("FS_LICENSE")
if not _fs_license and os.getenv("FREESURFER_HOME"):
_fs_home = os.getenv("FREESURFER_HOME")
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(
os.getenv("TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow"))
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().available / 1024**3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = "n/a"
_oc_policy = "n/a"
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path("/proc/sys/vm/overcommit_memory")
if _proc_oc_path.exists():
_oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get(
_proc_oc_path.read_text().strip(), "unknown"
)
if _oc_policy != "never":
_proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes")
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if _oc_limit in ("0", "n/a") and Path("/proc/sys/vm/overcommit_ratio").exists():
_oc_limit = "{}%".format(Path("/proc/sys/vm/overcommit_ratio").read_text().strip())
except Exception:
pass
# Debug modes are names that influence the exposure of internal details to
# the user, either through additional derivatives or increased verbosity
DEBUG_MODES = ("compcor", "fieldmaps", "pdb")
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError("Configuration type is not instantiable.")
@classmethod
def load(cls, settings, init=True, ignore=None):
"""Store settings from a dictionary."""
ignore = ignore or {}
for k, v in settings.items():
if k in ignore or v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
elif hasattr(cls, k):
setattr(cls, k, v)
if init:
try:
cls.init()
except AttributeError:
pass
@classmethod
def get(cls):
"""Return defined settings."""
from niworkflows.utils.spaces import Reference, SpatialReferences
out = {}
for k, v in cls.__dict__.items():
if k.startswith("_") or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *fMRIPrep* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*fMRIPrep*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = "txt"
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = "MultiProc"
"""NiPype's execution plugin."""
plugin_args = {
"maxtasksperchild": 1,
"raise_insufficient": False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
"plugin": cls.plugin,
"plugin_args": cls.plugin_args,
}
if cls.plugin in ("MultiProc", "LegacyMultiProc"):
out["plugin_args"]["n_procs"] = int(cls.nprocs)
if cls.memory_gb:
out["plugin_args"]["memory_gb"] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config(
{
"monitoring": {
"enabled": cls.resource_monitor,
"sample_frequency": "0.5",
"summary_append": True,
}
}
)
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config(
{
"execution": {
"crashdump_dir": str(execution.log_dir),
"crashfile_format": cls.crashfile_format,
"get_linked_libs": cls.get_linked_libs,
"stop_on_first_crash": cls.stop_on_first_crash,
"check_version": False, # disable future telemetry
}
}
)
if cls.omp_nthreads is None:
cls.omp_nthreads = min(cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_database_dir = None
"""Path to the directory containing SQLite database indices for the input BIDS dataset."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
sloppy = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
debug = []
"""Debug mode(s)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fmriprep_dir = None
"""Root of fMRIPrep BIDS Derivatives dataset. Depends on output_layout."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
notrack = False
"""Do not collect telemetry information for *fMRIPrep*."""
track_carbon = False
"""Tracks power draws using CodeCarbon package."""
country_code = "CAN"
"""Country ISO code used by carbon trackers."""
output_dir = None
"""Folder where derivatives will be stored."""
me_output_echos = False
"""Output individual echo time series with slice, motion and susceptibility correction"""
output_layout = None
"""Layout of derivatives within output_dir."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}"
"""Unique identifier of this particular run."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path("work").absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
"anat_derivatives",
"bids_dir",
"bids_database_dir",
"fmriprep_dir",
"fs_license_file",
"fs_subjects_dir",
"layout",
"log_dir",
"output_dir",
"templateflow_home",
"work_dir",
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout
from bids.layout.index import BIDSLayoutIndexer
_db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / "bids_db")
_db_path.mkdir(exist_ok=True, parents=True)
# Recommended after PyBIDS 12.1
_indexer = BIDSLayoutIndexer(
validate=False,
ignore=(
"code",
"stimuli",
"sourcedata",
"models",
re.compile(r"^\."),
re.compile(
r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|perf)"
),
),
)
cls._layout = BIDSLayout(
str(cls.bids_dir),
database_path=_db_path,
reset_database=cls.bids_database_dir is None,
indexer=_indexer,
)
cls.bids_database_dir = _db_path
cls.layout = cls._layout
if cls.bids_filters:
from bids.layout import Query
# unserialize pybids Query enum values
for acq, filters in cls.bids_filters.items():
cls.bids_filters[acq] = {
k: getattr(Query, v[7:-4]) if not isinstance(v, Query) and "Query" in v else v
for k, v in filters.items()
}
if "all" in cls.debug:
cls.debug = list(DEBUG_MODES)
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
anat_only = False
"""Execute the anatomical preprocessing only."""
aroma_err_on_warn = None
"""Cast AROMA warnings to errors."""
aroma_melodic_dim = None
"""Number of ICA components to be estimated by MELODIC
(positive = exact, negative = maximum)."""
bold2t1w_dof = None
"""Degrees of freedom of the BOLD-to-T1w registration steps."""
bold2t1w_init = "register"
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
BOLD image-header ('header')."""
cifti_output = None
"""Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``."""
dummy_scans = None
"""Set a number of initial scans to be considered nonsteady states."""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run FreeSurfer ``recon-all`` with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *fMRIPrep*."""
longitudinal = False
"""Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag."""
medial_surface_nan = None
"""Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling."""
project_goodvoxels = False
"""Exclude voxels with locally high coefficient of variation from sampling."""
regressors_all_comps = None
"""Return all CompCor components."""
regressors_dvars_th = None
"""Threshold for DVARS."""
regressors_fd_th = None
"""Threshold for :abbr:`FD (frame-wise displacement)`."""
run_reconall = True
"""Run FreeSurfer's surface reconstruction."""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "OASIS30ANTs"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*fMRIPrep* will run brain extraction of the T1w)."""
slice_time_ref = 0.5
"""The time of the reference slice to correct BOLD values to, as a fraction
acquisition time. 0 indicates the start, 0.5 the midpoint, and 1 the end
of acquisition. The alias `start` corresponds to 0, and `middle` to 0.5.
The default value is 0.5."""
spaces = None
"""Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_aroma = None
"""Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`."""
use_bbr = None
"""Run boundary-based registration for BOLD-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
me_t2s_fit_method = "curvefit"
"""The method by which to estimate T2*/S0 for multi-echo data"""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger("cli")
"""Command-line interface logging."""
workflow = logging.getLogger("nipype.workflow")
"""NiPype's workflow logger."""
interface = logging.getLogger("nipype.interface")
"""NiPype's interface logger."""
utils = logging.getLogger("nipype.utils")
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
if not cls.cli.hasHandlers():
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config(
{"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}}
)
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
_random_seed = None
master = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
numpy = None
"""Seed used by NumPy"""
@classmethod
def init(cls):
if cls._random_seed is not None:
cls.master = cls._random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
cls.numpy = _set_numpy_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ["ANTS_RANDOM_SEED"] = str(val)
return val
def _set_numpy_seed():
"""NumPy's random seed is independent from Python's `random` module"""
import numpy as np
val = random.randint(1, 65536)
np.random.seed(val)
return val
def from_dict(settings, init=True, ignore=None):
"""Read settings from a flat dictionary.
Arguments
---------
setting : dict
Settings to apply to any configuration
init : `bool` or :py:class:`~collections.abc.Container`
Initialize all, none, or a subset of configurations.
ignore : :py:class:`~collections.abc.Container`
Collection of keys in ``setting`` to ignore
"""
# Accept global True/False or container of configs to initialize
def initialize(x):
return init if init in (True, False) else x in init
nipype.load(settings, init=initialize('nipype'), ignore=ignore)
execution.load(settings, init=initialize('execution'), ignore=ignore)
workflow.load(settings, init=initialize('workflow'), ignore=ignore)
seeds.load(settings, init=initialize('seeds'), ignore=ignore)
loggers.init()
def load(filename, skip=None, init=True):
"""Load settings from file.
Arguments
---------
filename : :py:class:`os.PathLike`
TOML file containing fMRIPrep configuration.
skip : dict or None
Sets of values to ignore during load, keyed by section name
init : `bool` or :py:class:`~collections.abc.Container`
Initialize all, none, or a subset of configurations.
"""
from toml import loads
skip = skip or {}
# Accept global True/False or container of configs to initialize
def initialize(x):
return init if init in (True, False) else x in init
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != "environment":
section = getattr(sys.modules[__name__], sectionname)
ignore = skip.get(sectionname)
section.load(configs, ignore=ignore, init=initialize(sectionname))
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
"environment": environment.get(),
"execution": execution.get(),
"workflow": workflow.get(),
"nipype": nipype.get(),
"seeds": seeds.get(),
}
if not flat:
return settings
return {
".".join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()
}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(" ") for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
# Add the default standard space if not already present (required by several sub-workflows)
if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)):
spaces.add(Reference("MNI152NLin2009cAsym", {}))
# Ensure user-defined spatial references for outputs are correctly parsed.
# Certain options require normalization to a space not explicitly defined by users.
# These spaces will not be included in the final outputs.
cifti_output = workflow.cifti_output
if cifti_output:
# CIFTI grayordinates to corresponding FSL-MNI resolutions.
vol_res = "2" if cifti_output == "91k" else "1"
spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res}))
# Make the SpatialReferences object available
workflow.spaces = spaces
| 27,180 | 34.072258 | 99 | py |
fmriprep | fmriprep-master/fmriprep/__init__.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-module metadata."""
from .__about__ import __copyright__, __credits__, __packagename__, __version__
__all__ = [
'__copyright__',
'__credits__',
'__packagename__',
'__version__',
]
# Silence PyBIDS warning for extension entity behavior
# Can be removed once minimum PyBIDS dependency hits 0.14
try:
import bids
from packaging.version import Version
if Version(bids.__version__) < Version('0.14'):
bids.config.set_option('extension_initial_dot', True)
except (ImportError, ValueError):
pass
else:
del Version, bids
| 682 | 25.269231 | 79 | py |
fmriprep | fmriprep-master/fmriprep/__about__.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Base module variables."""
try:
from ._version import __version__
except ImportError:
__version__ = "0+unknown"
__packagename__ = 'fmriprep'
__copyright__ = 'Copyright 2023, The NiPreps Developers'
__credits__ = (
'Contributors: please check the ``.zenodo.json`` file at the top-level folder'
'of the repository'
)
__url__ = 'https://github.com/nipreps/fmriprep'
DOWNLOAD_URL = 'https://github.com/nipreps/{name}/archive/{ver}.tar.gz'.format(
name=__packagename__, ver=__version__
)
| 1,387 | 32.853659 | 82 | py |
fmriprep | fmriprep-master/fmriprep/cli/parser.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Parser."""
import sys
from .. import config
def _build_parser(**kwargs):
"""Build parser object.
``kwargs`` are passed to ``argparse.ArgumentParser`` (mainly useful for debugging).
"""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from functools import partial
from pathlib import Path
from niworkflows.utils.spaces import OutputReferencesAction, Reference
from packaging.version import Version
from .version import check_latest, is_flagged
def _path_exists(path, parser):
"""Ensure a given path exists."""
if path is None or not Path(path).exists():
raise parser.error(f"Path does not exist: <{path}>.")
return Path(path).absolute()
def _is_file(path, parser):
"""Ensure a given path exists and it is a file."""
path = _path_exists(path, parser)
if not path.is_file():
raise parser.error(f"Path should point to a file (or symlink of file): <{path}>.")
return path
def _min_one(value, parser):
"""Ensure an argument is not lower than 1."""
value = int(value)
if value < 1:
raise parser.error("Argument can't be less than one.")
return value
def _to_gb(value):
scale = {"G": 1, "T": 10**3, "M": 1e-3, "K": 1e-6, "B": 1e-9}
digits = "".join([c for c in value if c.isdigit()])
units = value[len(digits) :] or "M"
return int(digits) * scale[units[0]]
def _drop_sub(value):
return value[4:] if value.startswith("sub-") else value
def _filter_pybids_none_any(dct):
import bids
return {
k: bids.layout.Query.NONE if v is None else (bids.layout.Query.ANY if v == "*" else v)
for k, v in dct.items()
}
def _bids_filter(value, parser):
from json import JSONDecodeError, loads
if value:
if Path(value).exists():
try:
return loads(Path(value).read_text(), object_hook=_filter_pybids_none_any)
except JSONDecodeError:
raise parser.error(f"JSON syntax error in: <{value}>.")
else:
raise parser.error(f"Path does not exist: <{value}>.")
def _slice_time_ref(value, parser):
if value == "start":
value = 0
elif value == "middle":
value = 0.5
try:
value = float(value)
except ValueError:
raise parser.error(
"Slice time reference must be number, 'start', or 'middle'. " f"Received {value}."
)
if not 0 <= value <= 1:
raise parser.error(f"Slice time reference must be in range 0-1. Received {value}.")
return value
verstr = f"fMRIPrep v{config.environment.version}"
currentv = Version(config.environment.version)
is_release = not any((currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease))
parser = ArgumentParser(
description="fMRIPrep: fMRI PREProcessing workflows v{}".format(
config.environment.version
),
formatter_class=ArgumentDefaultsHelpFormatter,
**kwargs,
)
PathExists = partial(_path_exists, parser=parser)
IsFile = partial(_is_file, parser=parser)
PositiveInt = partial(_min_one, parser=parser)
BIDSFilter = partial(_bids_filter, parser=parser)
SliceTimeRef = partial(_slice_time_ref, parser=parser)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument(
"bids_dir",
action="store",
type=PathExists,
help="The root folder of a BIDS valid dataset (sub-XXXXX folders should "
"be found at the top level in this folder).",
)
parser.add_argument(
"output_dir",
action="store",
type=Path,
help="The output path for the outcomes of preprocessing and visual reports",
)
parser.add_argument(
"analysis_level",
choices=["participant"],
help='Processing stage to be run, only "participant" in the case of '
"fMRIPrep (see BIDS-Apps specification).",
)
g_bids = parser.add_argument_group("Options for filtering BIDS queries")
g_bids.add_argument(
"--skip_bids_validation",
"--skip-bids-validation",
action="store_true",
default=False,
help="Assume the input dataset is BIDS compliant and skip the validation",
)
g_bids.add_argument(
"--participant-label",
"--participant_label",
action="store",
nargs="+",
type=_drop_sub,
help="A space delimited list of participant identifiers or a single "
"identifier (the sub- prefix can be removed)",
)
# Re-enable when option is actually implemented
# g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
# help='Select a specific session to be processed')
# Re-enable when option is actually implemented
# g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
# help='Select a specific run to be processed')
g_bids.add_argument(
"-t", "--task-id", action="store", help="Select a specific task to be processed"
)
g_bids.add_argument(
"--echo-idx",
action="store",
type=int,
help="Select a specific echo to be processed in a multiecho series",
)
g_bids.add_argument(
"--bids-filter-file",
dest="bids_filters",
action="store",
type=BIDSFilter,
metavar="FILE",
help="A JSON file describing custom BIDS input filters using PyBIDS. "
"For further details, please check out "
"https://fmriprep.readthedocs.io/en/%s/faq.html#"
"how-do-I-select-only-certain-files-to-be-input-to-fMRIPrep"
% (currentv.base_version if is_release else "latest"),
)
g_bids.add_argument(
"--anat-derivatives",
action="store",
metavar="PATH",
type=PathExists,
help="Reuse the anatomical derivatives from another fMRIPrep run or calculated "
"with an alternative processing tool (NOT RECOMMENDED).",
)
g_bids.add_argument(
"--bids-database-dir",
metavar="PATH",
type=Path,
help="Path to a PyBIDS database folder, for faster indexing (especially "
"useful for large datasets). Will be created if not present.",
)
g_perfm = parser.add_argument_group("Options to handle performance")
g_perfm.add_argument(
"--nprocs",
"--nthreads",
"--n_cpus",
"--n-cpus",
dest='nprocs',
action="store",
type=PositiveInt,
help="Maximum number of threads across all processes",
)
g_perfm.add_argument(
"--omp-nthreads",
action="store",
type=PositiveInt,
help="Maximum number of threads per-process",
)
g_perfm.add_argument(
"--mem",
"--mem_mb",
"--mem-mb",
dest="memory_gb",
action="store",
type=_to_gb,
metavar="MEMORY_MB",
help="Upper bound memory limit for fMRIPrep processes",
)
g_perfm.add_argument(
"--low-mem",
action="store_true",
help="Attempt to reduce memory usage (will increase disk usage in working directory)",
)
g_perfm.add_argument(
"--use-plugin",
"--nipype-plugin-file",
action="store",
metavar="FILE",
type=IsFile,
help="Nipype plugin configuration file",
)
g_perfm.add_argument(
"--sloppy",
action="store_true",
default=False,
help="Use low-quality tools for speed - TESTING ONLY",
)
g_subset = parser.add_argument_group("Options for performing only a subset of the workflow")
g_subset.add_argument("--anat-only", action="store_true", help="Run anatomical workflows only")
g_subset.add_argument(
"--boilerplate-only",
"--boilerplate_only",
action="store_true",
default=False,
help="Generate boilerplate only",
)
g_subset.add_argument(
"--reports-only",
action="store_true",
default=False,
help="Only generate reports, don't run workflows. This will only rerun report "
"aggregation, not reportlet generation for specific nodes.",
)
g_conf = parser.add_argument_group("Workflow configuration")
g_conf.add_argument(
"--ignore",
required=False,
action="store",
nargs="+",
default=[],
choices=["fieldmaps", "slicetiming", "sbref", "t2w", "flair"],
help="Ignore selected aspects of the input dataset to disable corresponding "
"parts of the workflow (a space delimited list)",
)
g_conf.add_argument(
"--output-spaces",
nargs="*",
action=OutputReferencesAction,
help="""\
Standard and non-standard spaces to resample anatomical and functional images to. \
Standard spaces may be specified by the form \
``<SPACE>[:cohort-<label>][:res-<resolution>][...]``, where ``<SPACE>`` is \
a keyword designating a spatial reference, and may be followed by optional, \
colon-separated parameters. \
Non-standard spaces imply specific orientations and sampling grids. \
Important to note, the ``res-*`` modifier does not define the resolution used for \
the spatial normalization. To generate no BOLD outputs, use this option without specifying \
any spatial references. For further details, please check out \
https://fmriprep.readthedocs.io/en/%s/spaces.html"""
% (currentv.base_version if is_release else "latest"),
)
g_conf.add_argument(
"--longitudinal",
action="store_true",
help="Treat dataset as longitudinal - may increase runtime",
)
g_conf.add_argument(
"--bold2t1w-init",
action="store",
default="register",
choices=["register", "header"],
help='Either "register" (the default) to initialize volumes at center or "header"'
" to use the header information when coregistering BOLD to T1w images.",
)
g_conf.add_argument(
"--bold2t1w-dof",
action="store",
default=6,
choices=[6, 9, 12],
type=int,
help="Degrees of freedom when registering BOLD to T1w images. "
"6 degrees (rotation and translation) are used by default.",
)
g_conf.add_argument(
"--force-bbr",
action="store_true",
dest="use_bbr",
default=None,
help="Always use boundary-based registration (no goodness-of-fit checks)",
)
g_conf.add_argument(
"--force-no-bbr",
action="store_false",
dest="use_bbr",
default=None,
help="Do not use boundary-based registration (no goodness-of-fit checks)",
)
g_conf.add_argument(
"--slice-time-ref",
required=False,
action="store",
default=None,
type=SliceTimeRef,
help="The time of the reference slice to correct BOLD values to, as a fraction "
"acquisition time. 0 indicates the start, 0.5 the midpoint, and 1 the end "
"of acquisition. The alias `start` corresponds to 0, and `middle` to 0.5. "
"The default value is 0.5.",
)
g_conf.add_argument(
"--dummy-scans",
required=False,
action="store",
default=None,
type=int,
help="Number of nonsteady-state volumes. Overrides automatic detection.",
)
g_conf.add_argument(
"--random-seed",
dest="_random_seed",
action="store",
type=int,
default=None,
help="Initialize the random seed for the workflow",
)
g_conf.add_argument(
"--me-t2s-fit-method",
action="store",
default="curvefit",
choices=["curvefit", "loglin"],
help=(
"The method by which to estimate T2* and S0 for multi-echo data. "
"'curvefit' uses nonlinear regression. "
"It is more memory intensive, but also may be more accurate, than 'loglin'. "
"'loglin' uses log-linear regression. "
"It is faster and less memory intensive, but may be less accurate."
),
)
g_outputs = parser.add_argument_group("Options for modulating outputs")
g_outputs.add_argument(
"--output-layout",
action="store",
default="bids",
choices=("bids", "legacy"),
help="Organization of outputs. \"bids\" (default) places fMRIPrep derivatives "
"directly in the output directory, and defaults to placing FreeSurfer "
"derivatives in <output-dir>/sourcedata/freesurfer. \"legacy\" creates "
"derivative datasets as subdirectories of outputs.",
)
g_outputs.add_argument(
"--me-output-echos",
action="store_true",
default=False,
help="Output individual echo time series with slice, motion and susceptibility "
"correction. Useful for further Tedana processing post-fMRIPrep.",
)
g_outputs.add_argument(
"--medial-surface-nan",
required=False,
action="store_true",
default=False,
help="Replace medial wall values with NaNs on functional GIFTI files. Only "
"performed for GIFTI files mapped to a freesurfer subject (fsaverage or fsnative).",
)
g_conf.add_argument(
"--project-goodvoxels",
required=False,
action="store_true",
default=False,
help="Exclude voxels whose timeseries have locally high coefficient of variation "
"from surface resampling. Only performed for GIFTI files mapped to a freesurfer subject "
"(fsaverage or fsnative).",
)
g_outputs.add_argument(
"--md-only-boilerplate",
action="store_true",
default=False,
help="Skip generation of HTML and LaTeX formatted citation with pandoc",
)
g_outputs.add_argument(
"--cifti-output",
nargs="?",
const="91k",
default=False,
choices=("91k", "170k"),
type=str,
help="Output preprocessed BOLD as a CIFTI dense timeseries. "
"Optionally, the number of grayordinate can be specified "
"(default is 91k, which equates to 2mm resolution)",
)
g_aroma = parser.add_argument_group("[DEPRECATED] Options for running ICA_AROMA")
g_aroma.add_argument(
"--use-aroma",
action="store_true",
default=False,
help="Deprecated. Will raise an error in 24.0.",
)
g_aroma.add_argument(
"--aroma-melodic-dimensionality",
dest="aroma_melodic_dim",
action="store",
default=0,
type=int,
help="Deprecated. Will raise an error in 24.0.",
)
g_aroma.add_argument(
"--error-on-aroma-warnings",
action="store_true",
dest="aroma_err_on_warn",
default=False,
help="Deprecated. Will raise an error in 24.0.",
)
g_confounds = parser.add_argument_group("Options relating to confounds")
g_confounds.add_argument(
"--return-all-components",
dest="regressors_all_comps",
required=False,
action="store_true",
default=False,
help="Include all components estimated in CompCor decomposition in the confounds "
"file instead of only the components sufficient to explain 50 percent of "
"BOLD variance in each CompCor mask",
)
g_confounds.add_argument(
"--fd-spike-threshold",
dest="regressors_fd_th",
required=False,
action="store",
default=0.5,
type=float,
help="Threshold for flagging a frame as an outlier on the basis of framewise "
"displacement",
)
g_confounds.add_argument(
"--dvars-spike-threshold",
dest="regressors_dvars_th",
required=False,
action="store",
default=1.5,
type=float,
help="Threshold for flagging a frame as an outlier on the basis of standardised DVARS",
)
# ANTs options
g_ants = parser.add_argument_group("Specific options for ANTs registrations")
g_ants.add_argument(
"--skull-strip-template",
default="OASIS30ANTs",
type=Reference.from_string,
help="Select a template for skull-stripping with antsBrainExtraction "
"(OASIS30ANTs, by default)",
)
g_ants.add_argument(
"--skull-strip-fixed-seed",
action="store_true",
help="Do not use a random seed for skull-stripping - will ensure "
"run-to-run replicability when used with --omp-nthreads 1 and "
"matching --random-seed <int>",
)
g_ants.add_argument(
"--skull-strip-t1w",
action="store",
choices=("auto", "skip", "force"),
default="force",
help="Perform T1-weighted skull stripping ('force' ensures skull "
"stripping, 'skip' ignores skull stripping, and 'auto' applies brain extraction "
"based on the outcome of a heuristic to check whether the brain is already masked).",
)
# Fieldmap options
g_fmap = parser.add_argument_group("Specific options for handling fieldmaps")
g_fmap.add_argument(
"--fmap-bspline",
action="store_true",
default=False,
help="Fit a B-Spline field using least-squares (experimental)",
)
g_fmap.add_argument(
"--fmap-no-demean",
action="store_false",
default=True,
help="Do not remove median (within mask) from fieldmap",
)
# SyN-unwarp options
g_syn = parser.add_argument_group("Specific options for SyN distortion correction")
g_syn.add_argument(
"--use-syn-sdc",
nargs="?",
choices=["warn", "error"],
action="store",
const="error",
default=False,
help="Use fieldmap-less distortion correction based on anatomical image; "
"if unable, error (default) or warn based on optional argument.",
)
g_syn.add_argument(
"--force-syn",
action="store_true",
default=False,
help="EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to "
"fieldmap correction, if available",
)
# FreeSurfer options
g_fs = parser.add_argument_group("Specific options for FreeSurfer preprocessing")
g_fs.add_argument(
"--fs-license-file",
metavar="FILE",
type=IsFile,
help="Path to FreeSurfer license key file. Get it (for free) by registering"
" at https://surfer.nmr.mgh.harvard.edu/registration.html",
)
g_fs.add_argument(
"--fs-subjects-dir",
metavar="PATH",
type=Path,
help="Path to existing FreeSurfer subjects directory to reuse. "
"(default: OUTPUT_DIR/freesurfer)",
)
g_fs.add_argument(
"--no-submm-recon",
action="store_false",
dest="hires",
help="Disable sub-millimeter (hires) reconstruction",
)
g_fs.add_argument(
"--fs-no-reconall",
action="store_false",
dest="run_reconall",
help="Disable FreeSurfer surface preprocessing.",
)
g_carbon = parser.add_argument_group("Options for carbon usage tracking")
g_carbon.add_argument(
"--track-carbon",
action="store_true",
help="Tracks power draws using CodeCarbon package",
)
g_carbon.add_argument(
"--country-code",
action="store",
default="CAN",
type=str,
help="Country ISO code used by carbon trackers",
)
g_other = parser.add_argument_group("Other options")
g_other.add_argument("--version", action="version", version=verstr)
g_other.add_argument(
"-v",
"--verbose",
dest="verbose_count",
action="count",
default=0,
help="Increases log verbosity for each occurrence, debug level is -vvv",
)
g_other.add_argument(
"-w",
"--work-dir",
action="store",
type=Path,
default=Path("work").absolute(),
help="Path where intermediate results should be stored",
)
g_other.add_argument(
"--clean-workdir",
action="store_true",
default=False,
help="Clears working directory of contents. Use of this flag is not "
"recommended when running concurrent processes of fMRIPrep.",
)
g_other.add_argument(
"--resource-monitor",
action="store_true",
default=False,
help="Enable Nipype's resource monitoring to keep track of memory and CPU usage",
)
g_other.add_argument(
"--config-file",
action="store",
metavar="FILE",
help="Use pre-generated configuration file. Values in file will be overridden "
"by command-line arguments.",
)
g_other.add_argument(
"--write-graph",
action="store_true",
default=False,
help="Write workflow graph.",
)
g_other.add_argument(
"--stop-on-first-crash",
action="store_true",
default=False,
help="Force stopping on first crash, even if a work directory was specified.",
)
g_other.add_argument(
"--notrack",
action="store_true",
default=False,
help="Opt-out of sending tracking information of this run to "
"the FMRIPREP developers. This information helps to "
"improve FMRIPREP and provides an indicator of real "
"world usage crucial for obtaining funding.",
)
g_other.add_argument(
"--debug",
action="store",
nargs="+",
choices=config.DEBUG_MODES + ("all",),
help="Debug mode(s) to enable. 'all' is alias for all available modes.",
)
latest = check_latest()
if latest is not None and currentv < latest:
print(
"""\
You are using fMRIPrep-%s, and a newer version of fMRIPrep is available: %s.
Please check out our documentation about how and when to upgrade:
https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading"""
% (currentv, latest),
file=sys.stderr,
)
_blist = is_flagged()
if _blist[0]:
_reason = _blist[1] or "unknown"
print(
"""\
WARNING: Version %s of fMRIPrep (current) has been FLAGGED
(reason: %s).
That means some severe flaw was found in it and we strongly
discourage its usage."""
% (config.environment.version, _reason),
file=sys.stderr,
)
return parser
def parse_args(args=None, namespace=None):
"""Parse args and run further checks on the command line."""
import logging
from niworkflows.utils.spaces import Reference, SpatialReferences
parser = _build_parser()
opts = parser.parse_args(args, namespace)
if opts.config_file:
skip = {} if opts.reports_only else {"execution": ("run_uuid",)}
config.load(opts.config_file, skip=skip, init=False)
config.loggers.cli.info(f"Loaded previous configuration file {opts.config_file}")
config.execution.log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
config.from_dict(vars(opts), init=['nipype'])
if not config.execution.notrack:
import pkgutil
if pkgutil.find_loader("sentry_sdk") is None:
config.execution.notrack = True
config.loggers.cli.warning("Telemetry disabled because sentry_sdk is not installed.")
else:
config.loggers.cli.info(
"Telemetry system to collect crashes and errors is enabled "
"- thanks for your feedback!. Use option ``--notrack`` to opt out."
)
# Initialize --output-spaces if not defined
if config.execution.output_spaces is None:
config.execution.output_spaces = SpatialReferences(
[Reference("MNI152NLin2009cAsym", {"res": "native"})]
)
# Retrieve logging level
build_log = config.loggers.cli
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
import yaml
with open(opts.use_plugin) as f:
plugin_settings = yaml.load(f, Loader=yaml.FullLoader)
_plugin = plugin_settings.get("plugin")
if _plugin:
config.nipype.plugin = _plugin
config.nipype.plugin_args = plugin_settings.get("plugin_args", {})
config.nipype.nprocs = opts.nprocs or config.nipype.plugin_args.get(
"n_procs", config.nipype.nprocs
)
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
if 1 < config.nipype.nprocs < config.nipype.omp_nthreads:
build_log.warning(
f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed "
f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})"
)
# Inform the user about the risk of using brain-extracted images
if config.workflow.skull_strip_t1w == "auto":
build_log.warning(
"""\
Option ``--skull-strip-t1w`` was set to 'auto'. A heuristic will be \
applied to determine whether the input T1w image(s) have already been skull-stripped.
If that were the case, brain extraction and INU correction will be skipped for those T1w \
inputs. Please, BEWARE OF THE RISKS TO THE CONSISTENCY of results when using varying \
processing workflows across participants. To determine whether a participant has been run \
through the shortcut pipeline (meaning, brain extraction was skipped), please check the \
citation boilerplate. When reporting results with varying pipelines, please make sure you \
mention this particular variant of fMRIPrep listing the participants for which it was \
applied."""
)
bids_dir = config.execution.bids_dir
output_dir = config.execution.output_dir
work_dir = config.execution.work_dir
version = config.environment.version
output_layout = config.execution.output_layout
if config.execution.fs_subjects_dir is None:
if output_layout == "bids":
config.execution.fs_subjects_dir = output_dir / "sourcedata" / "freesurfer"
elif output_layout == "legacy":
config.execution.fs_subjects_dir = output_dir / "freesurfer"
if config.execution.fmriprep_dir is None:
if output_layout == "bids":
config.execution.fmriprep_dir = output_dir
elif output_layout == "legacy":
config.execution.fmriprep_dir = output_dir / "fmriprep"
# Wipe out existing work_dir
if opts.clean_workdir and work_dir.exists():
from niworkflows.utils.misc import clean_directory
build_log.info(f"Clearing previous fMRIPrep working directory: {work_dir}")
if not clean_directory(work_dir):
build_log.warning(f"Could not clear all contents of working directory: {work_dir}")
# Update the config with an empty dict to trigger initialization of all config
# sections (we used `init=False` above).
# This must be done after cleaning the work directory, or we could delete an
# open SQLite database
config.from_dict({})
# Ensure input and output folders are not the same
if output_dir == bids_dir:
parser.error(
"The selected output folder is the same as the input BIDS folder. "
"Please modify the output path (suggestion: %s)."
% bids_dir
/ "derivatives"
/ ("fmriprep-%s" % version.split("+")[0])
)
if bids_dir in work_dir.parents:
parser.error(
"The selected working directory is a subdirectory of the input BIDS folder. "
"Please modify the output path."
)
# Validate inputs
if not opts.skip_bids_validation:
from ..utils.bids import validate_input_dir
build_log.info(
"Making sure the input data is BIDS compliant (warnings can be ignored in most "
"cases)."
)
validate_input_dir(config.environment.exec_env, opts.bids_dir, opts.participant_label)
# Setup directories
config.execution.log_dir = config.execution.fmriprep_dir / "logs"
# Check and create output and working directories
config.execution.log_dir.mkdir(exist_ok=True, parents=True)
work_dir.mkdir(exist_ok=True, parents=True)
# Force initialization of the BIDSLayout
config.execution.init()
all_subjects = config.execution.layout.get_subjects()
if config.execution.participant_label is None:
config.execution.participant_label = all_subjects
participant_label = set(config.execution.participant_label)
missing_subjects = participant_label - set(all_subjects)
if missing_subjects:
parser.error(
"One or more participant labels were not found in the BIDS directory: "
"%s." % ", ".join(missing_subjects)
)
config.execution.participant_label = sorted(participant_label)
config.workflow.skull_strip_template = config.workflow.skull_strip_template[0]
| 30,254 | 35.190191 | 99 | py |
fmriprep | fmriprep-master/fmriprep/cli/run.py | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""fMRI preprocessing workflow."""
from .. import config
EXITCODE: int = -1
def main():
"""Entry point."""
import gc
import sys
import warnings
from multiprocessing import Manager, Process
from os import EX_SOFTWARE
from pathlib import Path
from ..utils.bids import write_bidsignore, write_derivative_description
from .parser import parse_args
from .workflow import build_workflow
parse_args()
# Deprecated flags
if any(
(
config.workflow.use_aroma,
config.workflow.aroma_err_on_warn,
config.workflow.aroma_melodic_dim,
)
):
config.loggers.cli.warning(
"ICA-AROMA was removed in fMRIPrep 23.1.0. The --use-aroma, --aroma-err-on-warn, "
"and --aroma-melodic-dim flags will error in fMRIPrep 24.0.0."
)
# Code Carbon
if config.execution.track_carbon:
from codecarbon import OfflineEmissionsTracker
country_iso_code = config.execution.country_code
config.loggers.workflow.log(25, "CodeCarbon tracker started ...")
config.loggers.workflow.log(25, f"Using country_iso_code: {country_iso_code}")
config.loggers.workflow.log(25, f"Saving logs at: {config.execution.log_dir}")
tracker = OfflineEmissionsTracker(
output_dir=config.execution.log_dir, country_iso_code=country_iso_code
)
tracker.start()
if "pdb" in config.execution.debug:
from fmriprep.utils.debug import setup_exceptionhook
setup_exceptionhook()
config.nipype.plugin = "Linear"
sentry_sdk = None
if not config.execution.notrack and not config.execution.debug:
import atexit
import sentry_sdk
from ..utils.telemetry import sentry_setup, setup_migas
sentry_setup()
setup_migas(init_ping=True)
atexit.register(migas_exit)
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml"
config_file.parent.mkdir(exist_ok=True, parents=True)
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
if "pdb" not in config.execution.debug:
with Manager() as mgr:
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retval = dict(retval.items()) # Convert to base dictionary
if p.exitcode:
retval["return_code"] = p.exitcode
else:
retval = build_workflow(str(config_file), {})
global EXITCODE
EXITCODE = retval.get("return_code", 0)
fmriprep_wf = retval.get("workflow", None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of fMRIPrep).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(EXITCODE > 0))
if fmriprep_wf and config.execution.write_graph:
fmriprep_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
EXITCODE = EXITCODE or (fmriprep_wf is None) * EX_SOFTWARE
if EXITCODE != 0:
sys.exit(EXITCODE)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), fmriprep_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(EXITCODE > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
# Sentry tracking
if sentry_sdk is not None:
with sentry_sdk.configure_scope() as scope:
scope.set_tag("run_uuid", config.execution.run_uuid)
scope.set_tag("npart", len(config.execution.participant_label))
sentry_sdk.add_breadcrumb(message="fMRIPrep started", level="info")
sentry_sdk.capture_message("fMRIPrep started", level="info")
config.loggers.workflow.log(
15,
"\n".join(["fMRIPrep config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]),
)
config.loggers.workflow.log(25, "fMRIPrep started!")
errno = 1 # Default is error exit unless otherwise set
try:
fmriprep_wf.run(**config.nipype.get_plugin())
except Exception as e:
if not config.execution.notrack:
from ..utils.telemetry import process_crashfile
crashfolders = [
config.execution.fmriprep_dir / f"sub-{s}" / "log" / config.execution.run_uuid
for s in config.execution.participant_label
]
for crashfolder in crashfolders:
for crashfile in crashfolder.glob("crash*.*"):
process_crashfile(crashfile)
if sentry_sdk is not None and "Workflow did not execute cleanly" not in str(e):
sentry_sdk.capture_exception(e)
config.loggers.workflow.critical("fMRIPrep failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "fMRIPrep finished successfully!")
if sentry_sdk is not None:
success_message = "fMRIPrep finished without errors"
sentry_sdk.add_breadcrumb(message=success_message, level="info")
sentry_sdk.capture_message(success_message, level="info")
# Bother users with the boilerplate only iff the workflow went okay.
boiler_file = config.execution.fmriprep_dir / "logs" / "CITATION.md"
if boiler_file.exists():
if config.environment.exec_env in (
"singularity",
"docker",
"fmriprep-docker",
):
boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
config.execution.output_dir
)
config.loggers.workflow.log(
25,
"Works derived from this fMRIPrep execution should include the "
f"boilerplate text found in {boiler_file}.",
)
if config.workflow.run_reconall:
from niworkflows.utils.misc import _copy_any
from templateflow import api
dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
_copy_any(dseg_tsv, str(config.execution.fmriprep_dir / "desc-aseg_dseg.tsv"))
_copy_any(dseg_tsv, str(config.execution.fmriprep_dir / "desc-aparcaseg_dseg.tsv"))
errno = 0
finally:
from pkg_resources import resource_filename as pkgrf
# Code Carbon
if config.execution.track_carbon:
emissions: float = tracker.stop()
config.loggers.workflow.log(25, "CodeCarbon tracker has stopped.")
config.loggers.workflow.log(25, f"Saving logs at: {config.execution.log_dir}")
config.loggers.workflow.log(25, f"Carbon emissions: {emissions} kg")
from fmriprep.reports.core import generate_reports
# Generate reports phase
failed_reports = generate_reports(
config.execution.participant_label,
config.execution.fmriprep_dir,
config.execution.run_uuid,
config=pkgrf("fmriprep", "data/reports-spec.yml"),
packagename="fmriprep",
)
write_derivative_description(config.execution.bids_dir, config.execution.fmriprep_dir)
write_bidsignore(config.execution.fmriprep_dir)
if sentry_sdk is not None and failed_reports:
sentry_sdk.capture_message(
"Report generation failed for %d subjects" % failed_reports,
level="error",
)
sys.exit(int((errno + failed_reports) > 0))
def migas_exit() -> None:
"""
Send a final crumb to the migas server signaling if the run successfully completed
This function should be registered with `atexit` to run at termination.
"""
import sys
from ..utils.telemetry import send_breadcrumb
global EXITCODE
migas_kwargs = {'status': 'C'}
# `sys` will not have these attributes unless an error has been handled
if hasattr(sys, 'last_type'):
migas_kwargs = {
'status': 'F',
'status_desc': 'Finished with error(s)',
'error_type': sys.last_type,
'error_desc': sys.last_value,
}
elif EXITCODE != 0:
migas_kwargs.update(
{
'status': 'F',
'status_desc': f'Completed with exitcode {EXITCODE}',
}
)
else:
migas_kwargs['status_desc'] = 'Success'
send_breadcrumb(**migas_kwargs)
if __name__ == "__main__":
raise RuntimeError(
"fmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` fmriprep and use the `fmriprep` command"
)
| 10,249 | 35.870504 | 95 | py |
fmriprep | fmriprep-master/fmriprep/cli/version.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Version CLI helpers."""
from datetime import datetime
from pathlib import Path
import requests
from .. import __version__
RELEASE_EXPIRY_DAYS = 14
DATE_FMT = "%Y%m%d"
def check_latest():
"""Determine whether this is the latest version."""
from packaging.version import InvalidVersion, Version
latest = None
date = None
outdated = None
cachefile = Path.home() / ".cache" / "fmriprep" / "latest"
try:
cachefile.parent.mkdir(parents=True, exist_ok=True)
except OSError:
cachefile = None
if cachefile and cachefile.exists():
try:
latest, date = cachefile.read_text().split("|")
except Exception:
pass
else:
try:
latest = Version(latest)
date = datetime.strptime(date, DATE_FMT)
except (InvalidVersion, ValueError):
latest = None
else:
if abs((datetime.now() - date).days) > RELEASE_EXPIRY_DAYS:
outdated = True
if latest is None or outdated is True:
try:
response = requests.get(url="https://pypi.org/pypi/fmriprep/json", timeout=1.0)
except Exception:
response = None
if response and response.status_code == 200:
versions = [Version(rel) for rel in response.json()["releases"].keys()]
versions = [rel for rel in versions if not rel.is_prerelease]
if versions:
latest = sorted(versions)[-1]
else:
latest = None
if cachefile is not None and latest is not None:
try:
cachefile.write_text("|".join(("%s" % latest, datetime.now().strftime(DATE_FMT))))
except Exception:
pass
return latest
def is_flagged():
"""Check whether current version is flagged."""
# https://raw.githubusercontent.com/nipreps/fmriprep/master/.versions.json
flagged = tuple()
try:
response = requests.get(
url="""\
https://raw.githubusercontent.com/nipreps/fmriprep/master/.versions.json""",
timeout=1.0,
)
except Exception:
response = None
if response and response.status_code == 200:
flagged = response.json().get("flagged", {}) or {}
if __version__ in flagged:
return True, flagged[__version__]
return False, None
| 3,268 | 29.551402 | 94 | py |
fmriprep | fmriprep-master/fmriprep/cli/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/cli/workflow.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
The workflow builder factory method.
All the checks and the construction of the workflow are done
inside this function that has pickleable inputs and output
dictionary (``retval``) to allow isolation using a
``multiprocessing.Process`` that allows fmriprep to enforce
a hard-limited memory-scope.
"""
def build_workflow(config_file, retval):
"""Create the Nipype Workflow that supports the whole execution graph."""
from pathlib import Path
from niworkflows.utils.bids import check_pipeline_version, collect_participants
from niworkflows.utils.misc import check_valid_fs_license
from pkg_resources import resource_filename as pkgrf
from fmriprep.reports.core import generate_reports
from .. import config
from ..utils.misc import check_deps
from ..workflows.base import init_fmriprep_wf
config.load(config_file)
build_log = config.loggers.workflow
fmriprep_dir = config.execution.fmriprep_dir
version = config.environment.version
retval["return_code"] = 1
retval["workflow"] = None
banner = [f"Running fMRIPrep version {version}"]
notice_path = Path(pkgrf("fmriprep", "data/NOTICE"))
if notice_path.exists():
banner[0] += "\n"
banner += [f"License NOTICE {'#' * 50}"]
banner += [f"fMRIPrep {version}"]
banner += notice_path.read_text().splitlines(keepends=False)[1:]
banner += ["#" * len(banner[1])]
build_log.log(25, f"\n{' ' * 9}".join(banner))
# warn if older results exist: check for dataset_description.json in output folder
msg = check_pipeline_version(version, fmriprep_dir / "dataset_description.json")
if msg is not None:
build_log.warning(msg)
# Please note this is the input folder's dataset_description.json
dset_desc_path = config.execution.bids_dir / "dataset_description.json"
if dset_desc_path.exists():
from hashlib import sha256
desc_content = dset_desc_path.read_bytes()
config.execution.bids_description_hash = sha256(desc_content).hexdigest()
# First check that bids_dir looks like a BIDS folder
subject_list = collect_participants(
config.execution.layout, participant_label=config.execution.participant_label
)
# Called with reports only
if config.execution.reports_only:
build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list))
retval["return_code"] = generate_reports(
config.execution.participant_label,
config.execution.fmriprep_dir,
config.execution.run_uuid,
config=pkgrf("fmriprep", "data/reports-spec.yml"),
packagename="fmriprep",
)
return retval
# Build main workflow
init_msg = [
"Building fMRIPrep's workflow:",
f"BIDS dataset path: {config.execution.bids_dir}.",
f"Participant list: {subject_list}.",
f"Run identifier: {config.execution.run_uuid}.",
f"Output spaces: {config.execution.output_spaces}.",
]
if config.execution.anat_derivatives:
init_msg += [f"Anatomical derivatives: {config.execution.anat_derivatives}."]
if config.execution.fs_subjects_dir:
init_msg += [f"Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."]
build_log.log(25, f"\n{' ' * 11}* ".join(init_msg))
retval["workflow"] = init_fmriprep_wf()
# Check for FS license after building the workflow
if not check_valid_fs_license():
from ..utils.misc import fips_enabled
if fips_enabled():
build_log.critical(
"""\
ERROR: Federal Information Processing Standard (FIPS) mode is enabled on your system. \
FreeSurfer (and thus fMRIPrep) cannot be used in FIPS mode. \
Contact your system administrator for assistance."""
)
else:
build_log.critical(
"""\
ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \
license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html"""
)
retval["return_code"] = 126 # 126 == Command invoked cannot execute.
return retval
# Check workflow for missing commands
missing = check_deps(retval["workflow"])
if missing:
build_log.critical(
"Cannot run fMRIPrep. Missing dependencies:%s",
"\n\t* ".join([""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]),
)
retval["return_code"] = 127 # 127 == command not found.
return retval
config.to_filename(config_file)
build_log.info(
"fMRIPrep workflow graph with %d nodes built successfully.",
len(retval["workflow"]._get_all_nodes()),
)
retval["return_code"] = 0
return retval
def build_boilerplate(config_file, workflow):
"""Write boilerplate in an isolated process."""
from .. import config
config.load(config_file)
logs_path = config.execution.fmriprep_dir / "logs"
boilerplate = workflow.visit_desc()
citation_files = {
ext: logs_path / ("CITATION.%s" % ext) for ext in ("bib", "tex", "md", "html")
}
if boilerplate:
# To please git-annex users and also to guarantee consistency
# among different renderings of the same file, first remove any
# existing one
for citation_file in citation_files.values():
try:
citation_file.unlink()
except FileNotFoundError:
pass
citation_files["md"].write_text(boilerplate)
if not config.execution.md_only_boilerplate and citation_files["md"].exists():
from pathlib import Path
from subprocess import CalledProcessError, TimeoutExpired, check_call
from pkg_resources import resource_filename as pkgrf
bib_text = Path(pkgrf("fmriprep", "data/boilerplate.bib")).read_text()
citation_files["bib"].write_text(
bib_text.replace("fMRIPrep <version>", f"fMRIPrep {config.environment.version}")
)
# Generate HTML file resolving citations
cmd = [
"pandoc",
"-s",
"--bibliography",
str(citation_files["bib"]),
"--citeproc",
"--metadata",
'pagetitle="fMRIPrep citation boilerplate"',
str(citation_files["md"]),
"-o",
str(citation_files["html"]),
]
config.loggers.cli.info("Generating an HTML version of the citation boilerplate...")
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
config.loggers.cli.warning("Could not generate CITATION.html file:\n%s", " ".join(cmd))
# Generate LaTex file resolving citations
cmd = [
"pandoc",
"-s",
"--bibliography",
str(citation_files["bib"]),
"--natbib",
str(citation_files["md"]),
"-o",
str(citation_files["tex"]),
]
config.loggers.cli.info("Generating a LaTeX version of the citation boilerplate...")
try:
check_call(cmd, timeout=10)
except (FileNotFoundError, CalledProcessError, TimeoutExpired):
config.loggers.cli.warning("Could not generate CITATION.tex file:\n%s", " ".join(cmd))
| 8,484 | 36.214912 | 99 | py |
fmriprep | fmriprep-master/fmriprep/cli/tests/test_parser.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Test parser."""
from argparse import ArgumentError
from contextlib import nullcontext
import pytest
from packaging.version import Version
from ... import config
from ...tests.test_config import _reset_config
from .. import version as _version
from ..parser import _build_parser, parse_args
MIN_ARGS = ["data/", "out/", "participant"]
@pytest.mark.parametrize(
"args,code",
[
([], 2),
(MIN_ARGS, 2), # bids_dir does not exist
(MIN_ARGS + ["--fs-license-file"], 2),
(MIN_ARGS + ["--fs-license-file", "fslicense.txt"], 2),
],
)
def test_parser_errors(args, code):
"""Check behavior of the parser."""
with pytest.raises(SystemExit) as error:
_build_parser().parse_args(args)
assert error.value.code == code
@pytest.mark.parametrize("args", [MIN_ARGS, MIN_ARGS + ["--fs-license-file"]])
def test_parser_valid(tmp_path, args):
"""Check valid arguments."""
datapath = tmp_path / "data"
datapath.mkdir(exist_ok=True)
args[0] = str(datapath)
if "--fs-license-file" in args:
_fs_file = tmp_path / "license.txt"
_fs_file.write_text("")
args.insert(args.index("--fs-license-file") + 1, str(_fs_file.absolute()))
opts = _build_parser().parse_args(args)
assert opts.bids_dir == datapath
@pytest.mark.parametrize(
"argval,gb",
[
("1G", 1),
("1GB", 1),
("1000", 1), # Default units are MB
("32000", 32), # Default units are MB
("4000", 4), # Default units are MB
("1000M", 1),
("1000MB", 1),
("1T", 1000),
("1TB", 1000),
("%dK" % 1e6, 1),
("%dKB" % 1e6, 1),
("%dB" % 1e9, 1),
],
)
def test_memory_arg(tmp_path, argval, gb):
"""Check the correct parsing of the memory argument."""
datapath = tmp_path / "data"
datapath.mkdir(exist_ok=True)
_fs_file = tmp_path / "license.txt"
_fs_file.write_text("")
args = [str(datapath)] + MIN_ARGS[1:] + ["--fs-license-file", str(_fs_file), "--mem", argval]
opts = _build_parser().parse_args(args)
assert opts.memory_gb == gb
@pytest.mark.parametrize("current,latest", [("1.0.0", "1.3.2"), ("1.3.2", "1.3.2")])
def test_get_parser_update(monkeypatch, capsys, current, latest):
"""Make sure the out-of-date banner is shown."""
expectation = Version(current) < Version(latest)
def _mock_check_latest(*args, **kwargs):
return Version(latest)
monkeypatch.setattr(config.environment, "version", current)
monkeypatch.setattr(_version, "check_latest", _mock_check_latest)
_build_parser()
captured = capsys.readouterr().err
msg = f"""\
You are using fMRIPrep-{current}, and a newer version of fMRIPrep is available: {latest}.
Please check out our documentation about how and when to upgrade:
https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading"""
assert (msg in captured) is expectation
@pytest.mark.parametrize("flagged", [(True, None), (True, "random reason"), (False, None)])
def test_get_parser_blacklist(monkeypatch, capsys, flagged):
"""Make sure the blacklisting banner is shown."""
def _mock_is_bl(*args, **kwargs):
return flagged
monkeypatch.setattr(_version, "is_flagged", _mock_is_bl)
_build_parser()
captured = capsys.readouterr().err
assert ("FLAGGED" in captured) is flagged[0]
if flagged[0]:
assert (flagged[1] or "reason: unknown") in captured
def test_parse_args(tmp_path, minimal_bids):
"""Basic smoke test showing that our parse_args() function
implements the BIDS App protocol"""
out_dir = tmp_path / "out"
work_dir = tmp_path / "work"
parse_args(
args=[
str(minimal_bids),
str(out_dir),
"participant", # BIDS App
"-w",
str(work_dir), # Don't pollute CWD
"--skip-bids-validation", # Empty files make BIDS sad
]
)
assert config.execution.layout.root == str(minimal_bids)
_reset_config()
def test_bids_filter_file(tmp_path, capsys):
bids_path = tmp_path / "data"
out_path = tmp_path / "out"
bff = tmp_path / "filter.json"
args = [str(bids_path), str(out_path), "participant", "--bids-filter-file", str(bff)]
bids_path.mkdir()
parser = _build_parser()
with pytest.raises(SystemExit):
parser.parse_args(args)
err = capsys.readouterr().err
assert "Path does not exist:" in err
bff.write_text('{"invalid json": }')
with pytest.raises(SystemExit):
parser.parse_args(args)
err = capsys.readouterr().err
assert "JSON syntax error in:" in err
_reset_config()
@pytest.mark.parametrize("st_ref", (None, "0", "1", "0.5", "start", "middle"))
def test_slice_time_ref(tmp_path, st_ref):
bids_path = tmp_path / "data"
out_path = tmp_path / "out"
args = [str(bids_path), str(out_path), "participant"]
if st_ref:
args.extend(["--slice-time-ref", st_ref])
bids_path.mkdir()
parser = _build_parser()
parser.parse_args(args)
_reset_config()
@pytest.mark.parametrize(
"args, expectation",
(
([], False),
(["--use-syn-sdc"], "error"),
(["--use-syn-sdc", "error"], "error"),
(["--use-syn-sdc", "warn"], "warn"),
(["--use-syn-sdc", "other"], (SystemExit, ArgumentError)),
),
)
def test_use_syn_sdc(tmp_path, args, expectation):
bids_path = tmp_path / "data"
out_path = tmp_path / "out"
args = [str(bids_path), str(out_path), "participant"] + args
bids_path.mkdir()
parser = _build_parser()
cm = nullcontext()
if isinstance(expectation, tuple):
cm = pytest.raises(expectation)
with cm:
opts = parser.parse_args(args)
if not isinstance(expectation, tuple):
assert opts.use_syn_sdc == expectation
_reset_config()
| 6,755 | 28.373913 | 97 | py |
fmriprep | fmriprep-master/fmriprep/cli/tests/test_version.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Test version checks."""
from datetime import datetime
from os import getenv, geteuid
from pathlib import Path
import pytest
from packaging.version import Version
from .. import version as _version
from ..version import DATE_FMT, check_latest, is_flagged, requests
class MockResponse:
"""Mocks the requests module so that Pypi is not actually queried."""
status_code = 200
_json = {"releases": {"1.0.0": None, "1.0.1": None, "1.1.0": None, "1.1.1rc1": None}}
def __init__(self, code=200, json=None):
"""Allow setting different response codes."""
self.status_code = code
if json is not None:
self._json = json
def json(self):
"""Redefine the response object."""
return self._json
def test_check_latest1(tmpdir, monkeypatch):
"""Test latest version check."""
tmpdir.chdir()
monkeypatch.setenv("HOME", str(tmpdir))
assert str(Path.home()) == str(tmpdir)
def mock_get(*args, **kwargs):
return MockResponse()
monkeypatch.setattr(requests, "get", mock_get)
# Initially, cache should not exist
cachefile = Path.home() / ".cache" / "fmriprep" / "latest"
assert not cachefile.exists()
# First check actually fetches from pypi
v = check_latest()
assert cachefile.exists()
assert isinstance(v, Version)
assert v == Version("1.1.0")
assert cachefile.read_text().split("|") == [
str(v),
datetime.now().strftime(DATE_FMT),
]
# Second check - test the cache file is read
cachefile.write_text("|".join(("1.0.0", cachefile.read_text().split("|")[1])))
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.0.0")
# Third check - forced oudating of cache
cachefile.write_text("2.0.0|20180121")
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.1.0")
# Mock timeouts
def mock_get(*args, **kwargs):
raise requests.exceptions.Timeout
monkeypatch.setattr(requests, "get", mock_get)
cachefile.write_text("|".join(("1.0.0", cachefile.read_text().split("|")[1])))
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.0.0")
cachefile.write_text("2.0.0|20180121")
v = check_latest()
assert v is None
cachefile.unlink()
v = check_latest()
assert v is None
@pytest.mark.parametrize(
("result", "code", "json"),
[
(None, 404, None),
(None, 200, {"releases": {"1.0.0rc1": None}}),
(Version("1.1.0"), 200, None),
(Version("1.0.0"), 200, {"releases": {"1.0.0": None}}),
],
)
def test_check_latest2(tmpdir, monkeypatch, result, code, json):
"""Test latest version check with varying server responses."""
tmpdir.chdir()
monkeypatch.setenv("HOME", str(tmpdir))
assert str(Path.home()) == str(tmpdir)
def mock_get(*args, **kwargs):
return MockResponse(code=code, json=json)
monkeypatch.setattr(requests, "get", mock_get)
v = check_latest()
if result is None:
assert v is None
else:
assert isinstance(v, Version)
assert v == result
@pytest.mark.parametrize(
"bad_cache",
[
"3laj#r???d|3akajdf#",
"2.0.0|3akajdf#",
"|".join(("2.0.0", datetime.now().strftime(DATE_FMT), "")),
"",
],
)
def test_check_latest3(tmpdir, monkeypatch, bad_cache):
"""Test latest version check when the cache file is corrupted."""
tmpdir.chdir()
monkeypatch.setenv("HOME", str(tmpdir))
assert str(Path.home()) == str(tmpdir)
def mock_get(*args, **kwargs):
return MockResponse()
monkeypatch.setattr(requests, "get", mock_get)
# Initially, cache should not exist
cachefile = Path.home() / ".cache" / "fmriprep" / "latest"
cachefile.parent.mkdir(parents=True, exist_ok=True)
assert not cachefile.exists()
cachefile.write_text(bad_cache)
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.1.0")
@pytest.mark.parametrize(
("result", "version", "code", "json"),
[
(False, "1.2.1", 200, {"flagged": {"1.0.0": None}}),
(True, "1.2.1", 200, {"flagged": {"1.2.1": None}}),
(True, "1.2.1", 200, {"flagged": {"1.2.1": "FATAL Bug!"}}),
(False, "1.2.1", 404, {"flagged": {"1.0.0": None}}),
(False, "1.2.1", 200, {"flagged": []}),
(False, "1.2.1", 200, {}),
],
)
def test_is_flagged(monkeypatch, result, version, code, json):
"""Test that the flagged-versions check is correct."""
monkeypatch.setattr(_version, "__version__", version)
def mock_get(*args, **kwargs):
return MockResponse(code=code, json=json)
monkeypatch.setattr(requests, "get", mock_get)
val, reason = is_flagged()
assert val is result
test_reason = None
if val:
test_reason = json.get("flagged", {}).get(version, None)
if test_reason is not None:
assert reason == test_reason
else:
assert reason is None
def test_readonly(tmp_path, monkeypatch):
"""Test behavior when $HOME/.cache/fmriprep/latest can't be written out."""
home_path = Path("/home/readonly") if getenv("TEST_READONLY_FILESYSTEM") else tmp_path
monkeypatch.setenv("HOME", str(home_path))
cachedir = home_path / ".cache"
if getenv("TEST_READONLY_FILESYSTEM") is None:
if geteuid() == 0:
pytest.skip("Cannot mock being unable to create directories as root")
cachedir.mkdir(mode=0o555, exist_ok=True)
# Make sure creating the folder will raise the exception.
with pytest.raises(OSError):
(cachedir / "fmriprep").mkdir(parents=True)
# Should not raise
check_latest()
| 6,603 | 29.293578 | 90 | py |
fmriprep | fmriprep-master/fmriprep/cli/tests/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/workflows/base.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
fMRIPrep base processing workflows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_fmriprep_wf
.. autofunction:: init_single_subject_wf
"""
import os
import sys
import warnings
from copy import deepcopy
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from niworkflows.utils.connections import listify
from packaging.version import Version
from .. import config
from ..interfaces import DerivativesDataSink
from ..interfaces.reports import AboutSummary, SubjectSummary
from .bold.base import get_estimator, init_func_preproc_wf
def init_fmriprep_wf():
"""
Build *fMRIPrep*'s pipeline.
This workflow organizes the execution of FMRIPREP, with a sub-workflow for
each subject.
If FreeSurfer's ``recon-all`` is to be run, a corresponding folder is created
and populated with any needed template subjects under the derivatives folder.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.tests import mock_config
from fmriprep.workflows.base import init_fmriprep_wf
with mock_config():
wf = init_fmriprep_wf()
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.bids import BIDSFreeSurferDir
ver = Version(config.environment.version)
fmriprep_wf = Workflow(name=f'fmriprep_{ver.major}_{ver.minor}_wf')
fmriprep_wf.base_dir = config.execution.work_dir
freesurfer = config.workflow.run_reconall
if freesurfer:
fsdir = pe.Node(
BIDSFreeSurferDir(
derivatives=config.execution.output_dir,
freesurfer_home=os.getenv('FREESURFER_HOME'),
spaces=config.workflow.spaces.get_fs_spaces(),
minimum_fs_version="7.0.0",
),
name='fsdir_run_%s' % config.execution.run_uuid.replace('-', '_'),
run_without_submitting=True,
)
if config.execution.fs_subjects_dir is not None:
fsdir.inputs.subjects_dir = str(config.execution.fs_subjects_dir.absolute())
for subject_id in config.execution.participant_label:
single_subject_wf = init_single_subject_wf(subject_id)
single_subject_wf.config['execution']['crashdump_dir'] = str(
config.execution.fmriprep_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid
)
for node in single_subject_wf._get_all_nodes():
node.config = deepcopy(single_subject_wf.config)
if freesurfer:
fmriprep_wf.connect(fsdir, 'subjects_dir', single_subject_wf, 'inputnode.subjects_dir')
else:
fmriprep_wf.add_nodes([single_subject_wf])
# Dump a copy of the config file into the log directory
log_dir = (
config.execution.fmriprep_dir / f"sub-{subject_id}" / 'log' / config.execution.run_uuid
)
log_dir.mkdir(exist_ok=True, parents=True)
config.to_filename(log_dir / 'fmriprep.toml')
return fmriprep_wf
def init_single_subject_wf(subject_id: str):
"""
Organize the preprocessing pipeline for a single subject.
It collects and reports information about the subject, and prepares
sub-workflows to perform anatomical and functional preprocessing.
Anatomical preprocessing is performed in a single workflow, regardless of
the number of sessions.
Functional preprocessing is performed using a separate workflow for each
individual BOLD series.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.tests import mock_config
from fmriprep.workflows.base import init_single_subject_wf
with mock_config():
wf = init_single_subject_wf('01')
Parameters
----------
subject_id : :obj:`str`
Subject label for this single-subject workflow.
Inputs
------
subjects_dir : :obj:`str`
FreeSurfer's ``$SUBJECTS_DIR``.
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.bids import BIDSDataGrabber, BIDSInfo
from niworkflows.interfaces.nilearn import NILEARN_VERSION
from niworkflows.utils.bids import collect_data
from niworkflows.utils.misc import fix_multi_T1w_source_name
from niworkflows.utils.spaces import Reference
from smriprep.workflows.anatomical import init_anat_preproc_wf
name = "single_subject_%s_wf" % subject_id
subject_data = collect_data(
config.execution.layout,
subject_id,
task=config.execution.task_id,
echo=config.execution.echo_idx,
bids_filters=config.execution.bids_filters,
)[0]
if 'flair' in config.workflow.ignore:
subject_data['flair'] = []
if 't2w' in config.workflow.ignore:
subject_data['t2w'] = []
anat_only = config.workflow.anat_only
anat_derivatives = config.execution.anat_derivatives
spaces = config.workflow.spaces
# Make sure we always go through these two checks
if not anat_only and not subject_data['bold']:
task_id = config.execution.task_id
raise RuntimeError(
"No BOLD images found for participant {} and task {}. "
"All workflows require BOLD images.".format(
subject_id, task_id if task_id else '<all>'
)
)
if anat_derivatives:
from smriprep.utils.bids import collect_derivatives
std_spaces = spaces.get_spaces(nonstandard=False, dim=(3,))
anat_derivatives = collect_derivatives(
anat_derivatives.absolute(),
subject_id,
std_spaces,
config.workflow.run_reconall,
)
if anat_derivatives is None:
config.loggers.workflow.warning(
f"""\
Attempted to access pre-existing anatomical derivatives at \
<{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \
were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \
reconall <{config.workflow.run_reconall}>)."""
)
if not anat_derivatives and not subject_data['t1w']:
raise Exception(
f"No T1w images found for participant {subject_id}. All workflows require T1w images."
)
if subject_data['roi']:
warnings.warn(
f"Lesion mask {subject_data['roi']} found. "
"Future versions of fMRIPrep will use alternative conventions. "
"Please refer to the documentation before upgrading.",
FutureWarning,
)
workflow = Workflow(name=name)
workflow.__desc__ = """
Results included in this manuscript come from preprocessing
performed using *fMRIPrep* {fmriprep_ver}
(@fmriprep1; @fmriprep2; RRID:SCR_016216),
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).
""".format(
fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version
)
workflow.__postdesc__ = """
Many internal operations of *fMRIPrep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
mostly within the functional processing workflow.
For more details of the pipeline, see [the section corresponding
to workflows in *fMRIPrep*'s documentation]\
(https://fmriprep.readthedocs.io/en/latest/workflows.html \
"FMRIPrep's documentation").
### Copyright Waiver
The above boilerplate text was automatically generated by fMRIPrep
with the express intention that users should copy and paste this
text into their manuscripts *unchanged*.
It is released under the [CC0]\
(https://creativecommons.org/publicdomain/zero/1.0/) license.
### References
""".format(
nilearn_ver=NILEARN_VERSION
)
fmriprep_dir = str(config.execution.fmriprep_dir)
inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']), name='inputnode')
bidssrc = pe.Node(
BIDSDataGrabber(
subject_data=subject_data,
anat_only=anat_only,
anat_derivatives=anat_derivatives,
subject_id=subject_id,
),
name='bidssrc',
)
bids_info = pe.Node(
BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name='bids_info'
)
summary = pe.Node(
SubjectSummary(
std_spaces=spaces.get_spaces(nonstandard=False),
nstd_spaces=spaces.get_spaces(standard=False),
),
name='summary',
run_without_submitting=True,
)
about = pe.Node(
AboutSummary(version=config.environment.version, command=' '.join(sys.argv)),
name='about',
run_without_submitting=True,
)
ds_report_summary = pe.Node(
DerivativesDataSink(
base_directory=fmriprep_dir,
desc='summary',
datatype="figures",
dismiss_entities=("echo",),
),
name='ds_report_summary',
run_without_submitting=True,
)
ds_report_about = pe.Node(
DerivativesDataSink(
base_directory=fmriprep_dir,
desc='about',
datatype="figures",
dismiss_entities=("echo",),
),
name='ds_report_about',
run_without_submitting=True,
)
# Preprocessing of T1w (includes registration to MNI)
anat_preproc_wf = init_anat_preproc_wf(
bids_root=str(config.execution.bids_dir),
sloppy=config.execution.sloppy,
debug=config.execution.debug,
existing_derivatives=anat_derivatives,
freesurfer=config.workflow.run_reconall,
hires=config.workflow.hires,
longitudinal=config.workflow.longitudinal,
omp_nthreads=config.nipype.omp_nthreads,
output_dir=fmriprep_dir,
skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed,
skull_strip_mode=config.workflow.skull_strip_t1w,
skull_strip_template=Reference.from_string(config.workflow.skull_strip_template)[0],
spaces=spaces,
t1w=subject_data['t1w'],
t2w=subject_data['t2w'],
cifti_output=config.workflow.cifti_output,
)
# fmt:off
workflow.connect([
(inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
(inputnode, summary, [('subjects_dir', 'subjects_dir')]),
(bidssrc, summary, [('bold', 'bold')]),
(bids_info, summary, [('subject', 'subject_id')]),
(bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]),
(bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
('t2w', 'inputnode.t2w'),
('roi', 'inputnode.roi'),
('flair', 'inputnode.flair')]),
(summary, ds_report_summary, [('out_report', 'in_file')]),
(about, ds_report_about, [('out_report', 'in_file')]),
])
if not anat_derivatives:
workflow.connect([
(bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
(bidssrc, summary, [('t1w', 't1w'),
('t2w', 't2w')]),
(bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
])
else:
workflow.connect([
(bidssrc, bids_info, [(('bold', fix_multi_T1w_source_name), 'in_file')]),
(anat_preproc_wf, summary, [('outputnode.t1w_preproc', 't1w')]),
(anat_preproc_wf, ds_report_summary, [('outputnode.t1w_preproc', 'source_file')]),
(anat_preproc_wf, ds_report_about, [('outputnode.t1w_preproc', 'source_file')]),
])
# fmt:on
# Overwrite ``out_path_base`` of smriprep's DataSinks
for node in workflow.list_node_names():
if node.split('.')[-1].startswith('ds_'):
workflow.get_node(node).interface.out_path_base = ""
if anat_only:
return workflow
from sdcflows import fieldmaps as fm
fmap_estimators = None
if any(
(
"fieldmaps" not in config.workflow.ignore,
config.workflow.use_syn_sdc,
config.workflow.force_syn,
)
):
from sdcflows.utils.wrangler import find_estimators
# SDC Step 1: Run basic heuristics to identify available data for fieldmap estimation
# For now, no fmapless
filters = None
if config.execution.bids_filters is not None:
filters = config.execution.bids_filters.get("fmap")
# In the case where fieldmaps are ignored and `--use-syn-sdc` is requested,
# SDCFlows `find_estimators` still receives a full layout (which includes the fmap modality)
# and will not calculate fmapless schemes.
# Similarly, if fieldmaps are ignored and `--force-syn` is requested,
# `fmapless` should be set to True to ensure BOLD targets are found to be corrected.
fmapless = bool(config.workflow.use_syn_sdc) or (
"fieldmaps" in config.workflow.ignore and config.workflow.force_syn
)
force_fmapless = config.workflow.force_syn or (
"fieldmaps" in config.workflow.ignore and config.workflow.use_syn_sdc
)
fmap_estimators = find_estimators(
layout=config.execution.layout,
subject=subject_id,
fmapless=fmapless,
force_fmapless=force_fmapless,
bids_filters=filters,
)
if config.workflow.use_syn_sdc and not fmap_estimators:
message = (
"Fieldmap-less (SyN) estimation was requested, but PhaseEncodingDirection "
"information appears to be absent."
)
config.loggers.workflow.error(message)
if config.workflow.use_syn_sdc == "error":
raise ValueError(message)
if "fieldmaps" in config.workflow.ignore and any(
f.method == fm.EstimatorType.ANAT for f in fmap_estimators
):
config.loggers.workflow.info(
'Option "--ignore fieldmaps" was set, but either "--use-syn-sdc" '
'or "--force-syn" were given, so fieldmap-less estimation will be executed.'
)
fmap_estimators = [f for f in fmap_estimators if f.method == fm.EstimatorType.ANAT]
# Do not calculate fieldmaps that we will not use
if fmap_estimators:
used_estimators = {
key
for bold_file in subject_data['bold']
for key in get_estimator(config.execution.layout, listify(bold_file)[0])
}
fmap_estimators = [fmap for fmap in fmap_estimators if fmap.bids_id in used_estimators]
if fmap_estimators:
config.loggers.workflow.info(
"B0 field inhomogeneity map will be estimated with "
f" the following {len(fmap_estimators)} estimators: "
f"{[e.method for e in fmap_estimators]}."
)
# Append the functional section to the existing anatomical excerpt
# That way we do not need to stream down the number of bold datasets
func_pre_desc = """
Functional data preprocessing
: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(
num_bold=len(subject_data['bold'])
)
func_preproc_wfs = []
has_fieldmap = bool(fmap_estimators)
for bold_file in subject_data['bold']:
func_preproc_wf = init_func_preproc_wf(bold_file, has_fieldmap=has_fieldmap)
if func_preproc_wf is None:
continue
func_preproc_wf.__desc__ = func_pre_desc + (func_preproc_wf.__desc__ or "")
# fmt:off
workflow.connect([
(anat_preproc_wf, func_preproc_wf, [
('outputnode.t1w_preproc', 'inputnode.t1w_preproc'),
('outputnode.t1w_mask', 'inputnode.t1w_mask'),
('outputnode.t1w_dseg', 'inputnode.t1w_dseg'),
('outputnode.t1w_aseg', 'inputnode.t1w_aseg'),
('outputnode.t1w_aparc', 'inputnode.t1w_aparc'),
('outputnode.t1w_tpms', 'inputnode.t1w_tpms'),
('outputnode.template', 'inputnode.template'),
('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'),
('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'),
# Undefined if --fs-no-reconall, but this is safe
('outputnode.subjects_dir', 'inputnode.subjects_dir'),
('outputnode.subject_id', 'inputnode.subject_id'),
('outputnode.anat_ribbon', 'inputnode.anat_ribbon'),
('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'),
('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm'),
('outputnode.surfaces', 'inputnode.surfaces'),
('outputnode.morphometrics', 'inputnode.morphometrics'),
('outputnode.sphere_reg_fsLR', 'inputnode.sphere_reg_fsLR'),
]),
])
# fmt:on
func_preproc_wfs.append(func_preproc_wf)
if not has_fieldmap:
return workflow
from sdcflows.workflows.base import init_fmap_preproc_wf
fmap_wf = init_fmap_preproc_wf(
debug="fieldmaps" in config.execution.debug,
estimators=fmap_estimators,
omp_nthreads=config.nipype.omp_nthreads,
output_dir=fmriprep_dir,
subject=subject_id,
)
fmap_wf.__desc__ = f"""
Preprocessing of B<sub>0</sub> inhomogeneity mappings
: A total of {len(fmap_estimators)} fieldmaps were found available within the input
BIDS structure for this particular subject.
"""
for func_preproc_wf in func_preproc_wfs:
# fmt:off
workflow.connect([
(fmap_wf, func_preproc_wf, [
("outputnode.fmap", "inputnode.fmap"),
("outputnode.fmap_ref", "inputnode.fmap_ref"),
("outputnode.fmap_coeff", "inputnode.fmap_coeff"),
("outputnode.fmap_mask", "inputnode.fmap_mask"),
("outputnode.fmap_id", "inputnode.fmap_id"),
("outputnode.method", "inputnode.sdc_method"),
]),
])
# fmt:on
# Overwrite ``out_path_base`` of sdcflows's DataSinks
for node in fmap_wf.list_node_names():
if node.split(".")[-1].startswith("ds_"):
fmap_wf.get_node(node).interface.out_path_base = ""
# Step 3: Manually connect PEPOLAR and ANAT workflows
# Select "MNI152NLin2009cAsym" from standard references.
# This node may be used by multiple ANAT estimators, so define outside loop.
from niworkflows.interfaces.utility import KeySelect
fmap_select_std = pe.Node(
KeySelect(fields=["std2anat_xfm"], key="MNI152NLin2009cAsym"),
name="fmap_select_std",
run_without_submitting=True,
)
if any(estimator.method == fm.EstimatorType.ANAT for estimator in fmap_estimators):
# fmt:off
workflow.connect([
(anat_preproc_wf, fmap_select_std, [
("outputnode.std2anat_xfm", "std2anat_xfm"),
("outputnode.template", "keys")]),
])
# fmt:on
for estimator in fmap_estimators:
config.loggers.workflow.info(
f"""\
Setting-up fieldmap "{estimator.bids_id}" ({estimator.method}) with \
<{', '.join(s.path.name for s in estimator.sources)}>"""
)
# Mapped and phasediff can be connected internally by SDCFlows
if estimator.method in (fm.EstimatorType.MAPPED, fm.EstimatorType.PHASEDIFF):
continue
suffices = [s.suffix for s in estimator.sources]
if estimator.method == fm.EstimatorType.PEPOLAR:
if len(suffices) == 2 and all(suf in ("epi", "bold", "sbref") for suf in suffices):
wf_inputs = getattr(fmap_wf.inputs, f"in_{estimator.bids_id}")
wf_inputs.in_data = [str(s.path) for s in estimator.sources]
wf_inputs.metadata = [s.metadata for s in estimator.sources]
else:
raise NotImplementedError("Sophisticated PEPOLAR schemes are unsupported.")
elif estimator.method == fm.EstimatorType.ANAT:
from sdcflows.workflows.fit.syn import init_syn_preprocessing_wf
sources = [str(s.path) for s in estimator.sources if s.suffix == "bold"]
source_meta = [s.metadata for s in estimator.sources if s.suffix == "bold"]
syn_preprocessing_wf = init_syn_preprocessing_wf(
omp_nthreads=config.nipype.omp_nthreads,
debug=config.execution.sloppy,
auto_bold_nss=True,
t1w_inversion=False,
name=f"syn_preprocessing_{estimator.bids_id}",
)
syn_preprocessing_wf.inputs.inputnode.in_epis = sources
syn_preprocessing_wf.inputs.inputnode.in_meta = source_meta
# fmt:off
workflow.connect([
(anat_preproc_wf, syn_preprocessing_wf, [
("outputnode.t1w_preproc", "inputnode.in_anat"),
("outputnode.t1w_mask", "inputnode.mask_anat"),
]),
(fmap_select_std, syn_preprocessing_wf, [
("std2anat_xfm", "inputnode.std2anat_xfm"),
]),
(syn_preprocessing_wf, fmap_wf, [
("outputnode.epi_ref", f"in_{estimator.bids_id}.epi_ref"),
("outputnode.epi_mask", f"in_{estimator.bids_id}.epi_mask"),
("outputnode.anat_ref", f"in_{estimator.bids_id}.anat_ref"),
("outputnode.anat_mask", f"in_{estimator.bids_id}.anat_mask"),
("outputnode.sd_prior", f"in_{estimator.bids_id}.sd_prior"),
]),
])
# fmt:on
return workflow
def _prefix(subid):
return subid if subid.startswith('sub-') else f'sub-{subid}'
| 23,060 | 37.435 | 100 | py |
fmriprep | fmriprep-master/fmriprep/workflows/tests.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Utilities and mocks for testing and documentation building."""
import os
import shutil
from contextlib import contextmanager
from pathlib import Path
from tempfile import mkdtemp
from pkg_resources import resource_filename as pkgrf
from toml import loads
@contextmanager
def mock_config():
"""Create a mock config for documentation and testing purposes."""
from .. import config
_old_fs = os.getenv('FREESURFER_HOME')
if not _old_fs:
os.environ['FREESURFER_HOME'] = mkdtemp()
filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != 'environment':
section = getattr(config, sectionname)
section.load(configs, init=False)
config.nipype.omp_nthreads = 1
config.nipype.init()
config.loggers.init()
config.init_spaces()
config.execution.work_dir = Path(mkdtemp())
config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()
config.execution.fmriprep_dir = Path(mkdtemp())
config.execution.init()
yield
shutil.rmtree(config.execution.work_dir)
shutil.rmtree(config.execution.fmriprep_dir)
if not _old_fs:
del os.environ["FREESURFER_HOME"]
| 2,182 | 32.075758 | 89 | py |
fmriprep | fmriprep-master/fmriprep/workflows/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/workflows/bold/base.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Orchestrating the BOLD-preprocessing workflow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_func_preproc_wf
.. autofunction:: init_func_derivatives_wf
"""
import os
import nibabel as nb
import numpy as np
from nipype.interfaces import utility as niu
from nipype.interfaces.fsl import Split as FSLSplit
from nipype.pipeline import engine as pe
from niworkflows.utils.connections import listify, pop_file
from ... import config
from ...interfaces import DerivativesDataSink
from ...interfaces.reports import FunctionalSummary
from ...utils.meepi import combine_meepi_source
# BOLD workflows
from .confounds import init_bold_confs_wf, init_carpetplot_wf
from .hmc import init_bold_hmc_wf
from .outputs import init_func_derivatives_wf
from .registration import init_bold_reg_wf, init_bold_t1_trans_wf
from .resampling import (
init_bold_preproc_trans_wf,
init_bold_std_trans_wf,
init_bold_surf_wf,
)
from .stc import init_bold_stc_wf
from .t2s import init_bold_t2s_wf, init_t2s_reporting_wf
def init_func_preproc_wf(bold_file, has_fieldmap=False):
"""
This workflow controls the functional preprocessing stages of *fMRIPrep*.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.tests import mock_config
from fmriprep import config
from fmriprep.workflows.bold.base import init_func_preproc_wf
with mock_config():
bold_file = config.execution.bids_dir / "sub-01" / "func" \
/ "sub-01_task-mixedgamblestask_run-01_bold.nii.gz"
wf = init_func_preproc_wf(str(bold_file))
Parameters
----------
bold_file
Path to NIfTI file (single echo) or list of paths to NIfTI files (multi-echo)
has_fieldmap : :obj:`bool`
Signals the workflow to use inputnode fieldmap files
Inputs
------
bold_file
BOLD series NIfTI file
t1w_preproc
Bias-corrected structural template image
t1w_mask
Mask of the skull-stripped template image
t1w_dseg
Segmentation of preprocessed structural image, including
gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
t1w_aseg
Segmentation of structural image, done with FreeSurfer.
t1w_aparc
Parcellation of structural image, done with FreeSurfer.
t1w_tpms
List of tissue probability maps in T1w space
template
List of templates to target
anat2std_xfm
List of transform files, collated with templates
std2anat_xfm
List of inverse transform files, collated with templates
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
t1w2fsnative_xfm
LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
fsnative2t1w_xfm
LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w
Outputs
-------
bold_t1
BOLD series, resampled to T1w space
bold_t1_ref
BOLD reference image, resampled to T1w space
bold2anat_xfm
Affine transform from BOLD reference space to T1w space
anat2bold_xfm
Affine transform from T1w space to BOLD reference space
hmc_xforms
Affine transforms for each BOLD volume to the BOLD reference
bold_mask_t1
BOLD series mask in T1w space
bold_aseg_t1
FreeSurfer ``aseg`` resampled to match ``bold_t1``
bold_aparc_t1
FreeSurfer ``aparc+aseg`` resampled to match ``bold_t1``
bold_std
BOLD series, resampled to template space
bold_std_ref
BOLD reference image, resampled to template space
bold_mask_std
BOLD series mask in template space
bold_aseg_std
FreeSurfer ``aseg`` resampled to match ``bold_std``
bold_aparc_std
FreeSurfer ``aparc+aseg`` resampled to match ``bold_std``
bold_native
BOLD series, with distortion corrections applied (native space)
bold_native_ref
BOLD reference image in native space
bold_mask_native
BOLD series mask in native space
bold_echos_native
Per-echo BOLD series, with distortion corrections applied
bold_cifti
BOLD CIFTI image
cifti_metadata
Path of metadata files corresponding to ``bold_cifti``.
surfaces
BOLD series, resampled to FreeSurfer surfaces
t2star_bold
Estimated T2\\* map in BOLD native space
t2star_t1
Estimated T2\\* map in T1w space
t2star_std
Estimated T2\\* map in template space
confounds
TSV of confounds
confounds_metadata
Confounds metadata dictionary
See Also
--------
* :py:func:`~niworkflows.func.util.init_bold_reference_wf`
* :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
* :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
* :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
* :py:func:`~fmriprep.workflows.bold.t2s.init_t2s_reporting_wf`
* :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
* :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
* :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confs_wf`
* :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
* :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
* :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
* :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
* :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
* :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
* :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
* :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.func.util import init_bold_reference_wf
from niworkflows.interfaces.nibabel import ApplyMask
from niworkflows.interfaces.reportlets.registration import (
SimpleBeforeAfterRPT as SimpleBeforeAfter,
)
from niworkflows.interfaces.utility import KeySelect
img = nb.load(bold_file[0] if isinstance(bold_file, (list, tuple)) else bold_file)
nvols = 1 if img.ndim < 4 else img.shape[3]
if nvols <= 5 - config.execution.sloppy:
config.loggers.workflow.warning(
f"Too short BOLD series (<= 5 timepoints). Skipping processing of <{bold_file}>."
)
return
mem_gb = {"filesize": 1, "resampled": 1, "largemem": 1}
bold_tlen = 10
# Have some options handy
omp_nthreads = config.nipype.omp_nthreads
freesurfer = config.workflow.run_reconall
spaces = config.workflow.spaces
fmriprep_dir = str(config.execution.fmriprep_dir)
freesurfer_spaces = spaces.get_fs_spaces()
project_goodvoxels = config.workflow.project_goodvoxels and config.workflow.cifti_output
# Extract BIDS entities and metadata from BOLD file(s)
entities = extract_entities(bold_file)
layout = config.execution.layout
# Extract metadata
all_metadata = [layout.get_metadata(fname) for fname in listify(bold_file)]
# Take first file as reference
ref_file = pop_file(bold_file)
metadata = all_metadata[0]
# get original image orientation
ref_orientation = get_img_orientation(ref_file)
echo_idxs = listify(entities.get("echo", []))
multiecho = len(echo_idxs) > 2
if len(echo_idxs) == 1:
config.loggers.workflow.warning(
f"Running a single echo <{ref_file}> from a seemingly multi-echo dataset."
)
bold_file = ref_file # Just in case - drop the list
if len(echo_idxs) == 2:
raise RuntimeError(
"Multi-echo processing requires at least three different echos (found two)."
)
if multiecho:
# Drop echo entity for future queries, have a boolean shorthand
entities.pop("echo", None)
# reorder echoes from shortest to largest
tes, bold_file = zip(
*sorted([(layout.get_metadata(bf)["EchoTime"], bf) for bf in bold_file])
)
ref_file = bold_file[0] # Reset reference to be the shortest TE
shapes = [nb.load(echo).shape for echo in bold_file]
if len(set(shapes)) != 1:
diagnostic = "\n".join(
f"{os.path.basename(echo)}: {shape}" for echo, shape in zip(bold_file, shapes)
)
raise RuntimeError(f"Multi-echo images found with mismatching shapes\n{diagnostic}")
if os.path.isfile(ref_file):
bold_tlen, mem_gb = _create_mem_gb(ref_file)
wf_name = _get_wf_name(ref_file)
config.loggers.workflow.debug(
"Creating bold processing workflow for <%s> (%.2f GB / %d TRs). "
"Memory resampled/largemem=%.2f/%.2f GB.",
ref_file,
mem_gb["filesize"],
bold_tlen,
mem_gb["resampled"],
mem_gb["largemem"],
)
# Find associated sbref, if possible
overrides = {
"suffix": "sbref",
"extension": [".nii", ".nii.gz"],
}
if config.execution.bids_filters:
overrides.update(config.execution.bids_filters.get('sbref', {}))
sb_ents = {**entities, **overrides}
sbref_files = layout.get(return_type="file", **sb_ents)
sbref_msg = f"No single-band-reference found for {os.path.basename(ref_file)}."
if sbref_files and "sbref" in config.workflow.ignore:
sbref_msg = "Single-band reference file(s) found and ignored."
sbref_files = []
elif sbref_files:
sbref_msg = "Using single-band reference file(s) {}.".format(
",".join([os.path.basename(sbf) for sbf in sbref_files])
)
config.loggers.workflow.info(sbref_msg)
if has_fieldmap:
estimator_key = get_estimator(layout, bold_file if not multiecho else bold_file[0])
if not estimator_key:
has_fieldmap = False
config.loggers.workflow.critical(
f"None of the available B0 fieldmaps are associated to <{bold_file}>"
)
else:
config.loggers.workflow.info(
f"Found usable B0-map (fieldmap) estimator(s) <{', '.join(estimator_key)}> "
f"to correct <{bold_file}> for susceptibility-derived distortions."
)
# Check whether STC must/can be run
run_stc = bool(metadata.get("SliceTiming")) and "slicetiming" not in config.workflow.ignore
# Build workflow
workflow = Workflow(name=wf_name)
workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"bold_file",
"subjects_dir",
"subject_id",
"t1w_preproc",
"t1w_mask",
"t1w_dseg",
"t1w_tpms",
"t1w_aseg",
"t1w_aparc",
"anat2std_xfm",
"std2anat_xfm",
"template",
"anat_ribbon",
"t1w2fsnative_xfm",
"fsnative2t1w_xfm",
"surfaces",
"morphometrics",
"sphere_reg_fsLR",
"fmap",
"fmap_ref",
"fmap_coeff",
"fmap_mask",
"fmap_id",
"sdc_method",
]
),
name="inputnode",
)
inputnode.inputs.bold_file = bold_file
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"bold_t1",
"bold_t1_ref",
"bold2anat_xfm",
"anat2bold_xfm",
"hmc_xforms",
"bold_mask_t1",
"bold_aseg_t1",
"bold_aparc_t1",
"bold_std",
"bold_std_ref",
"bold_mask_std",
"bold_aseg_std",
"bold_aparc_std",
"bold_native",
"bold_native_ref",
"bold_mask_native",
"bold_echos_native",
"bold_cifti",
"cifti_metadata",
"surfaces",
"t2star_bold",
"t2star_t1",
"t2star_std",
"confounds",
"confounds_metadata",
]
),
name="outputnode",
)
# Generate a brain-masked conversion of the t1w
t1w_brain = pe.Node(ApplyMask(), name="t1w_brain")
# Track echo index - this allows us to treat multi- and single-echo workflows
# almost identically
echo_index = pe.Node(niu.IdentityInterface(fields=["echoidx"]), name="echo_index")
if multiecho:
echo_index.iterables = [("echoidx", range(len(bold_file)))]
else:
echo_index.inputs.echoidx = 0
# BOLD source: track original BOLD file(s)
bold_source = pe.Node(niu.Select(inlist=bold_file), name="bold_source")
# BOLD buffer: an identity used as a pointer to either the original BOLD
# or the STC'ed one for further use.
boldbuffer = pe.Node(niu.IdentityInterface(fields=["bold_file"]), name="boldbuffer")
summary = pe.Node(
FunctionalSummary(
slice_timing=run_stc,
registration=("FSL", "FreeSurfer")[freesurfer],
registration_dof=config.workflow.bold2t1w_dof,
registration_init=config.workflow.bold2t1w_init,
pe_direction=metadata.get("PhaseEncodingDirection"),
echo_idx=echo_idxs,
tr=metadata["RepetitionTime"],
orientation=ref_orientation,
),
name="summary",
mem_gb=config.DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True,
)
summary.inputs.dummy_scans = config.workflow.dummy_scans
func_derivatives_wf = init_func_derivatives_wf(
bids_root=layout.root,
cifti_output=config.workflow.cifti_output,
freesurfer=freesurfer,
project_goodvoxels=project_goodvoxels,
all_metadata=all_metadata,
multiecho=multiecho,
output_dir=fmriprep_dir,
spaces=spaces,
)
func_derivatives_wf.inputs.inputnode.all_source_files = bold_file
func_derivatives_wf.inputs.inputnode.cifti_density = config.workflow.cifti_output
# fmt:off
workflow.connect([
(outputnode, func_derivatives_wf, [
("bold_t1", "inputnode.bold_t1"),
("bold_t1_ref", "inputnode.bold_t1_ref"),
("bold2anat_xfm", "inputnode.bold2anat_xfm"),
("anat2bold_xfm", "inputnode.anat2bold_xfm"),
("hmc_xforms", "inputnode.hmc_xforms"),
("bold_aseg_t1", "inputnode.bold_aseg_t1"),
("bold_aparc_t1", "inputnode.bold_aparc_t1"),
("bold_mask_t1", "inputnode.bold_mask_t1"),
("bold_native", "inputnode.bold_native"),
("bold_native_ref", "inputnode.bold_native_ref"),
("bold_mask_native", "inputnode.bold_mask_native"),
("bold_echos_native", "inputnode.bold_echos_native"),
("confounds", "inputnode.confounds"),
("surfaces", "inputnode.surf_files"),
("bold_cifti", "inputnode.bold_cifti"),
("cifti_metadata", "inputnode.cifti_metadata"),
("t2star_bold", "inputnode.t2star_bold"),
("t2star_t1", "inputnode.t2star_t1"),
("t2star_std", "inputnode.t2star_std"),
("confounds_metadata", "inputnode.confounds_metadata"),
("acompcor_masks", "inputnode.acompcor_masks"),
("tcompcor_mask", "inputnode.tcompcor_mask"),
]),
])
# fmt:on
# Generate a tentative boldref
initial_boldref_wf = init_bold_reference_wf(
name="initial_boldref_wf",
omp_nthreads=omp_nthreads,
bold_file=bold_file,
sbref_files=sbref_files,
multiecho=multiecho,
)
initial_boldref_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans
# Select validated BOLD files (orientations checked or corrected)
select_bold = pe.Node(niu.Select(), name="select_bold")
# Top-level BOLD splitter
bold_split = pe.Node(FSLSplit(dimension="t"), name="bold_split", mem_gb=mem_gb["filesize"] * 3)
# HMC on the BOLD
bold_hmc_wf = init_bold_hmc_wf(
name="bold_hmc_wf", mem_gb=mem_gb["filesize"], omp_nthreads=omp_nthreads
)
# calculate BOLD registration to T1w
bold_reg_wf = init_bold_reg_wf(
bold2t1w_dof=config.workflow.bold2t1w_dof,
bold2t1w_init=config.workflow.bold2t1w_init,
freesurfer=freesurfer,
mem_gb=mem_gb["resampled"],
name="bold_reg_wf",
omp_nthreads=omp_nthreads,
sloppy=config.execution.sloppy,
use_bbr=config.workflow.use_bbr,
use_compression=False,
)
# apply BOLD registration to T1w
bold_t1_trans_wf = init_bold_t1_trans_wf(
name="bold_t1_trans_wf",
freesurfer=freesurfer,
mem_gb=mem_gb["resampled"],
omp_nthreads=omp_nthreads,
use_compression=False,
)
bold_t1_trans_wf.inputs.inputnode.fieldwarp = "identity"
# get confounds
bold_confounds_wf = init_bold_confs_wf(
mem_gb=mem_gb["largemem"],
metadata=metadata,
freesurfer=freesurfer,
regressors_all_comps=config.workflow.regressors_all_comps,
regressors_fd_th=config.workflow.regressors_fd_th,
regressors_dvars_th=config.workflow.regressors_dvars_th,
name="bold_confounds_wf",
)
bold_confounds_wf.get_node("inputnode").inputs.t1_transform_flags = [False]
# SLICE-TIME CORRECTION (or bypass) #############################################
if run_stc:
bold_stc_wf = init_bold_stc_wf(name="bold_stc_wf", metadata=metadata)
# fmt:off
workflow.connect([
(initial_boldref_wf, bold_stc_wf, [("outputnode.skip_vols", "inputnode.skip_vols")]),
(select_bold, bold_stc_wf, [("out", "inputnode.bold_file")]),
(bold_stc_wf, boldbuffer, [("outputnode.stc_file", "bold_file")]),
])
# fmt:on
# bypass STC from original BOLD in both SE and ME cases
else:
workflow.connect([(select_bold, boldbuffer, [("out", "bold_file")])])
# MULTI-ECHO EPI DATA #############################################
if multiecho: # instantiate relevant interfaces, imports
split_opt_comb = bold_split.clone(name="split_opt_comb")
inputnode.inputs.bold_file = ref_file # Replace reference w first echo
join_echos = pe.JoinNode(
niu.IdentityInterface(fields=["bold_files"]),
joinsource="echo_index",
joinfield=["bold_files"],
name="join_echos",
)
# create optimal combination, adaptive T2* map
bold_t2s_wf = init_bold_t2s_wf(
echo_times=tes,
mem_gb=mem_gb["filesize"],
omp_nthreads=omp_nthreads,
name="bold_t2smap_wf",
)
t2s_reporting_wf = init_t2s_reporting_wf()
ds_report_t2scomp = pe.Node(
DerivativesDataSink(
desc="t2scomp",
datatype="figures",
dismiss_entities=("echo",),
),
name="ds_report_t2scomp",
run_without_submitting=True,
)
ds_report_t2star_hist = pe.Node(
DerivativesDataSink(
desc="t2starhist",
datatype="figures",
dismiss_entities=("echo",),
),
name="ds_report_t2star_hist",
run_without_submitting=True,
)
bold_final = pe.Node(
niu.IdentityInterface(fields=["bold", "boldref", "mask", "bold_echos", "t2star"]),
name="bold_final",
)
# Generate a final BOLD reference
# This BOLD references *does not use* single-band reference images.
final_boldref_wf = init_bold_reference_wf(
name="final_boldref_wf",
omp_nthreads=omp_nthreads,
multiecho=multiecho,
)
final_boldref_wf.__desc__ = None # Unset description to avoid second appearance
# MAIN WORKFLOW STRUCTURE #######################################################
# fmt:off
workflow.connect([
# Prepare masked T1w image
(inputnode, t1w_brain, [("t1w_preproc", "in_file"),
("t1w_mask", "in_mask")]),
# Select validated bold files per-echo
(initial_boldref_wf, select_bold, [("outputnode.all_bold_files", "inlist")]),
# BOLD buffer has slice-time corrected if it was run, original otherwise
(boldbuffer, bold_split, [("bold_file", "in_file")]),
# HMC
(initial_boldref_wf, bold_hmc_wf, [
("outputnode.raw_ref_image", "inputnode.raw_ref_image"),
("outputnode.bold_file", "inputnode.bold_file"),
]),
(bold_hmc_wf, outputnode, [
("outputnode.xforms", "hmc_xforms"),
]),
# EPI-T1w registration workflow
(inputnode, bold_reg_wf, [
("t1w_dseg", "inputnode.t1w_dseg"),
# Undefined if --fs-no-reconall, but this is safe
("subjects_dir", "inputnode.subjects_dir"),
("subject_id", "inputnode.subject_id"),
("fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"),
]),
(bold_final, bold_reg_wf, [
("boldref", "inputnode.ref_bold_brain")]),
(t1w_brain, bold_reg_wf, [("out_file", "inputnode.t1w_brain")]),
(inputnode, bold_t1_trans_wf, [
("bold_file", "inputnode.name_source"),
("t1w_mask", "inputnode.t1w_mask"),
("t1w_aseg", "inputnode.t1w_aseg"),
("t1w_aparc", "inputnode.t1w_aparc"),
]),
(t1w_brain, bold_t1_trans_wf, [("out_file", "inputnode.t1w_brain")]),
(bold_reg_wf, outputnode, [
("outputnode.itk_bold_to_t1", "bold2anat_xfm"),
("outputnode.itk_t1_to_bold", "anat2bold_xfm"),
]),
(bold_reg_wf, bold_t1_trans_wf, [
("outputnode.itk_bold_to_t1", "inputnode.itk_bold_to_t1"),
]),
(bold_final, bold_t1_trans_wf, [
("mask", "inputnode.ref_bold_mask"),
("boldref", "inputnode.ref_bold_brain"),
]),
(bold_t1_trans_wf, outputnode, [
("outputnode.bold_t1", "bold_t1"),
("outputnode.bold_t1_ref", "bold_t1_ref"),
("outputnode.bold_aseg_t1", "bold_aseg_t1"),
("outputnode.bold_aparc_t1", "bold_aparc_t1"),
]),
# Connect bold_confounds_wf
(inputnode, bold_confounds_wf, [
("t1w_tpms", "inputnode.t1w_tpms"),
("t1w_mask", "inputnode.t1w_mask"),
]),
(bold_hmc_wf, bold_confounds_wf, [
("outputnode.movpar_file", "inputnode.movpar_file"),
("outputnode.rmsd_file", "inputnode.rmsd_file"),
]),
(bold_reg_wf, bold_confounds_wf, [
("outputnode.itk_t1_to_bold", "inputnode.t1_bold_xform")
]),
(initial_boldref_wf, bold_confounds_wf, [
("outputnode.skip_vols", "inputnode.skip_vols"),
]),
(initial_boldref_wf, final_boldref_wf, [
("outputnode.skip_vols", "inputnode.dummy_scans"),
]),
(final_boldref_wf, bold_final, [
("outputnode.ref_image", "boldref"),
("outputnode.bold_mask", "mask"),
]),
(bold_final, bold_confounds_wf, [
("bold", "inputnode.bold"),
("mask", "inputnode.bold_mask"),
]),
(bold_confounds_wf, outputnode, [
("outputnode.confounds_file", "confounds"),
("outputnode.confounds_metadata", "confounds_metadata"),
("outputnode.acompcor_masks", "acompcor_masks"),
("outputnode.tcompcor_mask", "tcompcor_mask"),
]),
# Native-space BOLD files (if calculated)
(bold_final, outputnode, [
("bold", "bold_native"),
("boldref", "bold_native_ref"),
("mask", "bold_mask_native"),
("bold_echos", "bold_echos_native"),
("t2star", "t2star_bold"),
]),
# Summary
(initial_boldref_wf, summary, [("outputnode.algo_dummy_scans", "algo_dummy_scans")]),
(bold_reg_wf, summary, [("outputnode.fallback", "fallback")]),
(outputnode, summary, [("confounds", "confounds_file")]),
# Select echo indices for original/validated BOLD files
(echo_index, bold_source, [("echoidx", "index")]),
(echo_index, select_bold, [("echoidx", "index")]),
])
# fmt:on
# for standard EPI data, pass along correct file
if not multiecho:
# fmt:off
workflow.connect([
(inputnode, func_derivatives_wf, [("bold_file", "inputnode.source_file")]),
(bold_split, bold_t1_trans_wf, [("out_files", "inputnode.bold_split")]),
(bold_hmc_wf, bold_t1_trans_wf, [("outputnode.xforms", "inputnode.hmc_xforms")]),
])
# fmt:on
else: # for meepi, use optimal combination
# fmt:off
workflow.connect([
# update name source for optimal combination
(inputnode, func_derivatives_wf, [
(("bold_file", combine_meepi_source), "inputnode.source_file"),
]),
(join_echos, bold_t2s_wf, [("bold_files", "inputnode.bold_file")]),
(join_echos, bold_final, [("bold_files", "bold_echos")]),
(bold_t2s_wf, split_opt_comb, [("outputnode.bold", "in_file")]),
(split_opt_comb, bold_t1_trans_wf, [("out_files", "inputnode.bold_split")]),
(bold_t2s_wf, bold_final, [("outputnode.bold", "bold"),
("outputnode.t2star_map", "t2star")]),
(inputnode, t2s_reporting_wf, [("t1w_dseg", "inputnode.label_file")]),
(bold_reg_wf, t2s_reporting_wf, [
("outputnode.itk_t1_to_bold", "inputnode.label_bold_xform")
]),
(bold_final, t2s_reporting_wf, [("t2star", "inputnode.t2star_file"),
("boldref", "inputnode.boldref")]),
(t2s_reporting_wf, ds_report_t2scomp, [('outputnode.t2s_comp_report', 'in_file')]),
(t2s_reporting_wf, ds_report_t2star_hist, [("outputnode.t2star_hist", "in_file")]),
])
# fmt:on
# Already applied in bold_bold_trans_wf, which inputs to bold_t2s_wf
bold_t1_trans_wf.inputs.inputnode.hmc_xforms = "identity"
# Map final BOLD mask into T1w space (if required)
nonstd_spaces = set(spaces.get_nonstandard())
if nonstd_spaces.intersection(("T1w", "anat")):
from niworkflows.interfaces.fixes import (
FixHeaderApplyTransforms as ApplyTransforms,
)
boldmask_to_t1w = pe.Node(
ApplyTransforms(interpolation="MultiLabel"),
name="boldmask_to_t1w",
mem_gb=0.1,
)
# fmt:off
workflow.connect([
(bold_reg_wf, boldmask_to_t1w, [("outputnode.itk_bold_to_t1", "transforms")]),
(bold_t1_trans_wf, boldmask_to_t1w, [("outputnode.bold_mask_t1", "reference_image")]),
(bold_final, boldmask_to_t1w, [("mask", "input_image")]),
(boldmask_to_t1w, outputnode, [("output_image", "bold_mask_t1")]),
])
# fmt:on
if multiecho:
t2star_to_t1w = pe.Node(
ApplyTransforms(interpolation="LanczosWindowedSinc", float=True),
name="t2star_to_t1w",
mem_gb=0.1,
)
# fmt:off
workflow.connect([
(bold_reg_wf, t2star_to_t1w, [("outputnode.itk_bold_to_t1", "transforms")]),
(bold_t1_trans_wf, t2star_to_t1w, [
("outputnode.bold_mask_t1", "reference_image")
]),
(bold_final, t2star_to_t1w, [("t2star", "input_image")]),
(t2star_to_t1w, outputnode, [("output_image", "t2star_t1")]),
])
# fmt:on
if spaces.get_spaces(nonstandard=False, dim=(3,)):
# Apply transforms in 1 shot
bold_std_trans_wf = init_bold_std_trans_wf(
freesurfer=freesurfer,
mem_gb=mem_gb["resampled"],
omp_nthreads=omp_nthreads,
spaces=spaces,
multiecho=multiecho,
name="bold_std_trans_wf",
use_compression=not config.execution.low_mem,
)
bold_std_trans_wf.inputs.inputnode.fieldwarp = "identity"
# fmt:off
workflow.connect([
(inputnode, bold_std_trans_wf, [
("template", "inputnode.templates"),
("anat2std_xfm", "inputnode.anat2std_xfm"),
("bold_file", "inputnode.name_source"),
("t1w_aseg", "inputnode.bold_aseg"),
("t1w_aparc", "inputnode.bold_aparc"),
]),
(bold_final, bold_std_trans_wf, [
("mask", "inputnode.bold_mask"),
("t2star", "inputnode.t2star"),
]),
(bold_reg_wf, bold_std_trans_wf, [
("outputnode.itk_bold_to_t1", "inputnode.itk_bold_to_t1"),
]),
(bold_std_trans_wf, outputnode, [
("outputnode.bold_std", "bold_std"),
("outputnode.bold_std_ref", "bold_std_ref"),
("outputnode.bold_mask_std", "bold_mask_std"),
]),
])
# fmt:on
if freesurfer:
# fmt:off
workflow.connect([
(bold_std_trans_wf, func_derivatives_wf, [
("outputnode.bold_aseg_std", "inputnode.bold_aseg_std"),
("outputnode.bold_aparc_std", "inputnode.bold_aparc_std"),
]),
(bold_std_trans_wf, outputnode, [
("outputnode.bold_aseg_std", "bold_aseg_std"),
("outputnode.bold_aparc_std", "bold_aparc_std"),
]),
])
# fmt:on
if not multiecho:
# fmt:off
workflow.connect([
(bold_split, bold_std_trans_wf, [("out_files", "inputnode.bold_split")]),
(bold_hmc_wf, bold_std_trans_wf, [
("outputnode.xforms", "inputnode.hmc_xforms"),
]),
])
# fmt:on
else:
# fmt:off
workflow.connect([
(split_opt_comb, bold_std_trans_wf, [("out_files", "inputnode.bold_split")]),
(bold_std_trans_wf, outputnode, [("outputnode.t2star_std", "t2star_std")]),
])
# fmt:on
# Already applied in bold_bold_trans_wf, which inputs to bold_t2s_wf
bold_std_trans_wf.inputs.inputnode.hmc_xforms = "identity"
# fmt:off
# func_derivatives_wf internally parametrizes over snapshotted spaces.
workflow.connect([
(bold_std_trans_wf, func_derivatives_wf, [
("outputnode.template", "inputnode.template"),
("outputnode.spatial_reference", "inputnode.spatial_reference"),
("outputnode.bold_std_ref", "inputnode.bold_std_ref"),
("outputnode.bold_std", "inputnode.bold_std"),
("outputnode.bold_mask_std", "inputnode.bold_mask_std"),
]),
])
# fmt:on
# SURFACES ##################################################################################
# Freesurfer
if freesurfer and freesurfer_spaces:
config.loggers.workflow.debug("Creating BOLD surface-sampling workflow.")
bold_surf_wf = init_bold_surf_wf(
mem_gb=mem_gb["resampled"],
surface_spaces=freesurfer_spaces,
medial_surface_nan=config.workflow.medial_surface_nan,
name="bold_surf_wf",
)
# fmt:off
workflow.connect([
(inputnode, bold_surf_wf, [
("subjects_dir", "inputnode.subjects_dir"),
("subject_id", "inputnode.subject_id"),
("t1w2fsnative_xfm", "inputnode.t1w2fsnative_xfm"),
]),
(bold_t1_trans_wf, bold_surf_wf, [("outputnode.bold_t1", "inputnode.source_file")]),
(bold_surf_wf, outputnode, [("outputnode.surfaces", "surfaces")]),
(bold_surf_wf, func_derivatives_wf, [("outputnode.target", "inputnode.surf_refs")]),
])
# fmt:on
# CIFTI output
if config.workflow.cifti_output:
from .resampling import init_bold_fsLR_resampling_wf, init_bold_grayords_wf
bold_fsLR_resampling_wf = init_bold_fsLR_resampling_wf(
estimate_goodvoxels=project_goodvoxels,
grayord_density=config.workflow.cifti_output,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb["resampled"],
)
bold_grayords_wf = init_bold_grayords_wf(
grayord_density=config.workflow.cifti_output,
mem_gb=mem_gb["resampled"],
repetition_time=metadata["RepetitionTime"],
)
# fmt:off
workflow.connect([
(inputnode, bold_fsLR_resampling_wf, [
("surfaces", "inputnode.surfaces"),
("morphometrics", "inputnode.morphometrics"),
("sphere_reg_fsLR", "inputnode.sphere_reg_fsLR"),
("anat_ribbon", "inputnode.anat_ribbon"),
]),
(bold_t1_trans_wf, bold_fsLR_resampling_wf, [
("outputnode.bold_t1", "inputnode.bold_file"),
]),
(bold_std_trans_wf, bold_grayords_wf, [
("outputnode.bold_std", "inputnode.bold_std"),
("outputnode.spatial_reference", "inputnode.spatial_reference"),
]),
(bold_fsLR_resampling_wf, bold_grayords_wf, [
("outputnode.bold_fsLR", "inputnode.bold_fsLR"),
]),
(bold_fsLR_resampling_wf, func_derivatives_wf, [
("outputnode.goodvoxels_mask", "inputnode.goodvoxels_mask"),
]),
(bold_grayords_wf, outputnode, [
("outputnode.cifti_bold", "bold_cifti"),
("outputnode.cifti_metadata", "cifti_metadata"),
]),
])
# fmt:on
if spaces.get_spaces(nonstandard=False, dim=(3,)):
carpetplot_wf = init_carpetplot_wf(
mem_gb=mem_gb["resampled"],
metadata=metadata,
cifti_output=config.workflow.cifti_output,
name="carpetplot_wf",
)
# Xform to "MNI152NLin2009cAsym" is always computed.
carpetplot_select_std = pe.Node(
KeySelect(fields=["std2anat_xfm"], key="MNI152NLin2009cAsym"),
name="carpetplot_select_std",
run_without_submitting=True,
)
if config.workflow.cifti_output:
# fmt:off
workflow.connect(
bold_grayords_wf, "outputnode.cifti_bold", carpetplot_wf, "inputnode.cifti_bold",
)
# fmt:on
def _last(inlist):
return inlist[-1]
# fmt:off
workflow.connect([
(initial_boldref_wf, carpetplot_wf, [
("outputnode.skip_vols", "inputnode.dummy_scans"),
]),
(inputnode, carpetplot_select_std, [("std2anat_xfm", "std2anat_xfm"),
("template", "keys")]),
(carpetplot_select_std, carpetplot_wf, [
("std2anat_xfm", "inputnode.std2anat_xfm"),
]),
(bold_final, carpetplot_wf, [
("bold", "inputnode.bold"),
("mask", "inputnode.bold_mask"),
]),
(bold_reg_wf, carpetplot_wf, [
("outputnode.itk_t1_to_bold", "inputnode.t1_bold_xform"),
]),
(bold_confounds_wf, carpetplot_wf, [
("outputnode.confounds_file", "inputnode.confounds_file"),
("outputnode.crown_mask", "inputnode.crown_mask"),
(("outputnode.acompcor_masks", _last), "inputnode.acompcor_mask"),
]),
])
# fmt:on
# REPORTING ############################################################
ds_report_summary = pe.Node(
DerivativesDataSink(desc="summary", datatype="figures", dismiss_entities=("echo",)),
name="ds_report_summary",
run_without_submitting=True,
mem_gb=config.DEFAULT_MEMORY_MIN_GB,
)
ds_report_validation = pe.Node(
DerivativesDataSink(desc="validation", datatype="figures", dismiss_entities=("echo",)),
name="ds_report_validation",
run_without_submitting=True,
mem_gb=config.DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(summary, ds_report_summary, [("out_report", "in_file")]),
(initial_boldref_wf, ds_report_validation, [("outputnode.validation_report", "in_file")]),
])
# fmt:on
# Fill-in datasinks of reportlets seen so far
for node in workflow.list_node_names():
if node.split(".")[-1].startswith("ds_report"):
workflow.get_node(node).inputs.base_directory = fmriprep_dir
workflow.get_node(node).inputs.source_file = ref_file
if not has_fieldmap:
# Finalize workflow without SDC connections
summary.inputs.distortion_correction = "None"
# Resample in native space in just one shot
bold_bold_trans_wf = init_bold_preproc_trans_wf(
mem_gb=mem_gb["resampled"],
omp_nthreads=omp_nthreads,
use_compression=not config.execution.low_mem,
use_fieldwarp=False,
name="bold_bold_trans_wf",
)
bold_bold_trans_wf.inputs.inputnode.fieldwarp = "identity"
# fmt:off
workflow.connect([
# Connect bold_bold_trans_wf
(bold_source, bold_bold_trans_wf, [("out", "inputnode.name_source")]),
(bold_split, bold_bold_trans_wf, [("out_files", "inputnode.bold_file")]),
(bold_hmc_wf, bold_bold_trans_wf, [
("outputnode.xforms", "inputnode.hmc_xforms"),
]),
])
workflow.connect([
(bold_bold_trans_wf, bold_final, [("outputnode.bold", "bold")]),
(bold_bold_trans_wf, final_boldref_wf, [
("outputnode.bold", "inputnode.bold_file"),
]),
] if not multiecho else [
(initial_boldref_wf, bold_t2s_wf, [
("outputnode.bold_mask", "inputnode.bold_mask"),
]),
(bold_bold_trans_wf, join_echos, [
("outputnode.bold", "bold_files"),
]),
(join_echos, final_boldref_wf, [
("bold_files", "inputnode.bold_file"),
]),
])
# fmt:on
return workflow
from niworkflows.interfaces.utility import KeySelect
from sdcflows.workflows.apply.correction import init_unwarp_wf
from sdcflows.workflows.apply.registration import init_coeff2epi_wf
coeff2epi_wf = init_coeff2epi_wf(
debug="fieldmaps" in config.execution.debug,
omp_nthreads=config.nipype.omp_nthreads,
sloppy=config.execution.sloppy,
write_coeff=True,
)
unwarp_wf = init_unwarp_wf(
free_mem=config.environment.free_mem,
debug="fieldmaps" in config.execution.debug,
omp_nthreads=config.nipype.omp_nthreads,
)
unwarp_wf.inputs.inputnode.metadata = metadata
output_select = pe.Node(
KeySelect(fields=["fmap", "fmap_ref", "fmap_coeff", "fmap_mask", "sdc_method"]),
name="output_select",
run_without_submitting=True,
)
output_select.inputs.key = estimator_key[0]
if len(estimator_key) > 1:
config.loggers.workflow.warning(
f"Several fieldmaps <{', '.join(estimator_key)}> are "
f"'IntendedFor' <{bold_file}>, using {estimator_key[0]}"
)
sdc_report = pe.Node(
SimpleBeforeAfter(
before_label="Distorted",
after_label="Corrected",
dismiss_affine=True,
),
name="sdc_report",
mem_gb=0.1,
)
ds_report_sdc = pe.Node(
DerivativesDataSink(
base_directory=fmriprep_dir,
desc="sdc",
suffix="bold",
datatype="figures",
dismiss_entities=("echo",),
),
name="ds_report_sdc",
run_without_submitting=True,
)
# fmt:off
workflow.connect([
(inputnode, output_select, [("fmap", "fmap"),
("fmap_ref", "fmap_ref"),
("fmap_coeff", "fmap_coeff"),
("fmap_mask", "fmap_mask"),
("sdc_method", "sdc_method"),
("fmap_id", "keys")]),
(output_select, coeff2epi_wf, [
("fmap_ref", "inputnode.fmap_ref"),
("fmap_coeff", "inputnode.fmap_coeff"),
("fmap_mask", "inputnode.fmap_mask")]),
(output_select, summary, [("sdc_method", "distortion_correction")]),
(initial_boldref_wf, coeff2epi_wf, [
("outputnode.ref_image", "inputnode.target_ref"),
("outputnode.bold_mask", "inputnode.target_mask")]),
(initial_boldref_wf, unwarp_wf, [
("outputnode.ref_image", "inputnode.distorted_ref"),
]),
(coeff2epi_wf, unwarp_wf, [
("outputnode.fmap_coeff", "inputnode.fmap_coeff")]),
(bold_hmc_wf, unwarp_wf, [
("outputnode.xforms", "inputnode.hmc_xforms")]),
(initial_boldref_wf, sdc_report, [
("outputnode.ref_image", "before")]),
(bold_split, unwarp_wf, [
("out_files", "inputnode.distorted")]),
(final_boldref_wf, sdc_report, [
("outputnode.ref_image", "after"),
("outputnode.bold_mask", "wm_seg")]),
(inputnode, ds_report_sdc, [("bold_file", "source_file")]),
(sdc_report, ds_report_sdc, [("out_report", "in_file")]),
])
# fmt:on
if "fieldmaps" in config.execution.debug:
# Generate additional reportlets to assess SDC
from sdcflows.interfaces.reportlets import FieldmapReportlet
# First, one for checking the co-registration between fieldmap and EPI
sdc_coreg_report = pe.Node(
SimpleBeforeAfter(
before_label="Distorted target",
after_label="Fieldmap ref.",
),
name="sdc_coreg_report",
mem_gb=0.1,
)
ds_report_sdc_coreg = pe.Node(
DerivativesDataSink(
base_directory=fmriprep_dir,
datatype="figures",
desc="fmapCoreg",
dismiss_entities=("echo",),
suffix="bold",
),
name="ds_report_sdc_coreg",
run_without_submitting=True,
)
# Second, showing the fieldmap reconstructed from coefficients in the EPI space
fmap_report = pe.Node(FieldmapReportlet(), "fmap_report")
ds_fmap_report = pe.Node(
DerivativesDataSink(
base_directory=fmriprep_dir,
datatype="figures",
desc="fieldmap",
dismiss_entities=("echo",),
suffix="bold",
),
name="ds_fmap_report",
run_without_submitting=True,
)
# fmt:off
workflow.connect([
(initial_boldref_wf, sdc_coreg_report, [
("outputnode.ref_image", "before"),
]),
(coeff2epi_wf, sdc_coreg_report, [
("coregister.inverse_warped_image", "after"),
]),
(final_boldref_wf, sdc_coreg_report, [
("outputnode.bold_mask", "wm_seg"),
]),
(inputnode, ds_report_sdc_coreg, [("bold_file", "source_file")]),
(sdc_coreg_report, ds_report_sdc_coreg, [("out_report", "in_file")]),
(unwarp_wf, fmap_report, [(("outputnode.fieldmap", pop_file), "fieldmap")]),
(coeff2epi_wf, fmap_report, [
("coregister.inverse_warped_image", "reference"),
]),
(final_boldref_wf, fmap_report, [
("outputnode.bold_mask", "mask"),
]),
(fmap_report, ds_fmap_report, [("out_report", "in_file")]),
(inputnode, ds_fmap_report, [("bold_file", "source_file")]),
])
# fmt:on
if not multiecho:
# fmt:off
workflow.connect([
(unwarp_wf, bold_final, [("outputnode.corrected", "bold")]),
# remaining workflow connections
(unwarp_wf, final_boldref_wf, [
("outputnode.corrected", "inputnode.bold_file"),
]),
(unwarp_wf, bold_t1_trans_wf, [
# TEMPORARY: For the moment we can't use frame-wise fieldmaps
(("outputnode.fieldwarp_ref", pop_file), "inputnode.fieldwarp"),
]),
(unwarp_wf, bold_std_trans_wf, [
# TEMPORARY: For the moment we can't use frame-wise fieldmaps
(("outputnode.fieldwarp_ref", pop_file), "inputnode.fieldwarp"),
]),
])
# fmt:on
return workflow
# Finalize connections if ME-EPI
join_sdc_echos = pe.JoinNode(
niu.IdentityInterface(
fields=[
"fieldmap",
"fieldwarp",
"corrected",
"corrected_ref",
"corrected_mask",
]
),
joinsource="echo_index",
joinfield=[
"fieldmap",
"fieldwarp",
"corrected",
"corrected_ref",
"corrected_mask",
],
name="join_sdc_echos",
)
def _dpop(list_of_lists):
return list_of_lists[0][0]
# fmt:off
workflow.connect([
(unwarp_wf, join_echos, [
("outputnode.corrected", "bold_files"),
]),
(unwarp_wf, join_sdc_echos, [
("outputnode.fieldmap", "fieldmap"),
("outputnode.fieldwarp", "fieldwarp"),
("outputnode.corrected", "corrected"),
("outputnode.corrected_ref", "corrected_ref"),
("outputnode.corrected_mask", "corrected_mask"),
]),
# remaining workflow connections
(join_sdc_echos, final_boldref_wf, [
("corrected", "inputnode.bold_file"),
]),
(join_sdc_echos, bold_t2s_wf, [
(("corrected_mask", pop_file), "inputnode.bold_mask"),
]),
])
# fmt:on
return workflow
def _create_mem_gb(bold_fname):
img = nb.load(bold_fname)
nvox = int(np.prod(img.shape, dtype='u8'))
# Assume tools will coerce to 8-byte floats to be safe
bold_size_gb = 8 * nvox / (1024**3)
bold_tlen = img.shape[-1]
mem_gb = {
"filesize": bold_size_gb,
"resampled": bold_size_gb * 4,
"largemem": bold_size_gb * (max(bold_tlen / 100, 1.0) + 4),
}
return bold_tlen, mem_gb
def _get_wf_name(bold_fname):
"""
Derive the workflow name for supplied BOLD file.
>>> _get_wf_name("/completely/made/up/path/sub-01_task-nback_bold.nii.gz")
'func_preproc_task_nback_wf'
>>> _get_wf_name("/completely/made/up/path/sub-01_task-nback_run-01_echo-1_bold.nii.gz")
'func_preproc_task_nback_run_01_echo_1_wf'
"""
from nipype.utils.filemanip import split_filename
fname = split_filename(bold_fname)[1]
fname_nosub = "_".join(fname.split("_")[1:])
name = "func_preproc_" + fname_nosub.replace(".", "_").replace(" ", "").replace(
"-", "_"
).replace("_bold", "_wf")
return name
def _to_join(in_file, join_file):
"""Join two tsv files if the join_file is not ``None``."""
from niworkflows.interfaces.utility import JoinTSVColumns
if join_file is None:
return in_file
res = JoinTSVColumns(in_file=in_file, join_file=join_file).run()
return res.outputs.out_file
def extract_entities(file_list):
"""
Return a dictionary of common entities given a list of files.
Examples
--------
>>> extract_entities("sub-01/anat/sub-01_T1w.nii.gz")
{'subject': '01', 'suffix': 'T1w', 'datatype': 'anat', 'extension': '.nii.gz'}
>>> extract_entities(["sub-01/anat/sub-01_T1w.nii.gz"] * 2)
{'subject': '01', 'suffix': 'T1w', 'datatype': 'anat', 'extension': '.nii.gz'}
>>> extract_entities(["sub-01/anat/sub-01_run-1_T1w.nii.gz",
... "sub-01/anat/sub-01_run-2_T1w.nii.gz"])
{'subject': '01', 'run': [1, 2], 'suffix': 'T1w', 'datatype': 'anat', 'extension': '.nii.gz'}
"""
from collections import defaultdict
from bids.layout import parse_file_entities
entities = defaultdict(list)
for e, v in [
ev_pair for f in listify(file_list) for ev_pair in parse_file_entities(f).items()
]:
entities[e].append(v)
def _unique(inlist):
inlist = sorted(set(inlist))
if len(inlist) == 1:
return inlist[0]
return inlist
return {k: _unique(v) for k, v in entities.items()}
def get_img_orientation(imgf):
"""Return the image orientation as a string"""
img = nb.load(imgf)
return "".join(nb.aff2axcodes(img.affine))
def get_estimator(layout, fname):
field_source = layout.get_metadata(fname).get("B0FieldSource")
if isinstance(field_source, str):
field_source = (field_source,)
if field_source is None:
import re
from pathlib import Path
from sdcflows.fieldmaps import get_identifier
# Fallback to IntendedFor
intended_rel = re.sub(r"^sub-[a-zA-Z0-9]*/", "", str(Path(fname).relative_to(layout.root)))
field_source = get_identifier(intended_rel)
return field_source
| 51,518 | 36.660088 | 99 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/registration.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Registration workflows
++++++++++++++++++++++
.. autofunction:: init_bold_reg_wf
.. autofunction:: init_bold_t1_trans_wf
.. autofunction:: init_bbreg_wf
.. autofunction:: init_fsl_bbr_wf
"""
import os
import os.path as op
import typing as ty
import pkg_resources as pkgr
from nipype.interfaces import c3, fsl
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from ... import config
from ...interfaces import DerivativesDataSink
DEFAULT_MEMORY_MIN_GB = config.DEFAULT_MEMORY_MIN_GB
LOGGER = config.loggers.workflow
AffineDOF = ty.Literal[6, 9, 12]
RegistrationInit = ty.Literal['register', 'header']
def init_bold_reg_wf(
freesurfer: bool,
use_bbr: bool,
bold2t1w_dof: AffineDOF,
bold2t1w_init: RegistrationInit,
mem_gb: float,
omp_nthreads: int,
name: str = 'bold_reg_wf',
sloppy: bool = False,
use_compression: bool = True,
write_report: bool = True,
):
"""
Build a workflow to run same-subject, BOLD-to-T1w image-registration.
Calculates the registration between a reference BOLD image and T1w-space
using a boundary-based registration (BBR) cost function.
If FreeSurfer-based preprocessing is enabled, the ``bbregister`` utility
is used to align the BOLD images to the reconstructed subject, and the
resulting transform is adjusted to target the T1 space.
If FreeSurfer-based preprocessing is disabled, FSL FLIRT is used with the
BBR cost function to directly target the T1 space.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.registration import init_bold_reg_wf
wf = init_bold_reg_wf(freesurfer=True,
mem_gb=3,
omp_nthreads=1,
use_bbr=True,
bold2t1w_dof=9,
bold2t1w_init='register')
Parameters
----------
freesurfer : :obj:`bool`
Enable FreeSurfer functional registration (bbregister)
use_bbr : :obj:`bool` or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
bold2t1w_init : str, 'header' or 'register'
If ``'header'``, use header information for initialization of BOLD and T1 images.
If ``'register'``, align volumes by their centers.
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``bold_reg_wf``)
use_compression : :obj:`bool`
Save registered BOLD series as ``.nii.gz``
use_fieldwarp : :obj:`bool`
Include SDC warp in single-shot transform from BOLD to T1
write_report : :obj:`bool`
Whether a reportlet should be stored
Inputs
------
ref_bold_brain
Reference image to which BOLD series is aligned
If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
t1w_brain
Skull-stripped ``t1w_preproc``
t1w_dseg
Segmentation of preprocessed structural image, including
gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
fsnative2t1w_xfm
LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w
Outputs
-------
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
itk_t1_to_bold
Affine transform from T1 space to BOLD space (ITK format)
fallback
Boolean indicating whether BBR was rejected (mri_coreg registration returned)
See Also
--------
* :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
* :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
workflow = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
'ref_bold_brain',
't1w_brain',
't1w_dseg',
'subjects_dir',
'subject_id',
'fsnative2t1w_xfm',
]
),
name='inputnode',
)
outputnode = pe.Node(
niu.IdentityInterface(fields=['itk_bold_to_t1', 'itk_t1_to_bold', 'fallback']),
name='outputnode',
)
if freesurfer:
bbr_wf = init_bbreg_wf(
use_bbr=use_bbr,
bold2t1w_dof=bold2t1w_dof,
bold2t1w_init=bold2t1w_init,
omp_nthreads=omp_nthreads,
)
else:
bbr_wf = init_fsl_bbr_wf(
use_bbr=use_bbr,
bold2t1w_dof=bold2t1w_dof,
bold2t1w_init=bold2t1w_init,
sloppy=sloppy,
omp_nthreads=omp_nthreads,
)
# fmt:off
workflow.connect([
(inputnode, bbr_wf, [
('ref_bold_brain', 'inputnode.in_file'),
('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm'),
('subjects_dir', 'inputnode.subjects_dir'),
('subject_id', 'inputnode.subject_id'),
('t1w_dseg', 'inputnode.t1w_dseg'),
('t1w_brain', 'inputnode.t1w_brain')]),
(bbr_wf, outputnode, [('outputnode.itk_bold_to_t1', 'itk_bold_to_t1'),
('outputnode.itk_t1_to_bold', 'itk_t1_to_bold'),
('outputnode.fallback', 'fallback')]),
])
# fmt:on
if write_report:
ds_report_reg = pe.Node(
DerivativesDataSink(datatype="figures", dismiss_entities=("echo",)),
name='ds_report_reg',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
def _bold_reg_suffix(fallback, freesurfer):
if fallback:
return 'coreg' if freesurfer else 'flirtnobbr'
return 'bbregister' if freesurfer else 'flirtbbr'
# fmt:off
workflow.connect([
(bbr_wf, ds_report_reg, [
('outputnode.out_report', 'in_file'),
(('outputnode.fallback', _bold_reg_suffix, freesurfer), 'desc')]),
])
# fmt:on
return workflow
def init_bold_t1_trans_wf(
freesurfer: bool,
mem_gb: float,
omp_nthreads: int,
use_compression: bool = True,
name: str = 'bold_t1_trans_wf',
):
"""
Co-register the reference BOLD image to T1w-space.
The workflow uses :abbr:`BBR (boundary-based registration)`.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.registration import init_bold_t1_trans_wf
wf = init_bold_t1_trans_wf(freesurfer=True,
mem_gb=3,
omp_nthreads=1)
Parameters
----------
freesurfer : :obj:`bool`
Enable FreeSurfer functional registration (bbregister)
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
use_compression : :obj:`bool`
Save registered BOLD series as ``.nii.gz``
name : :obj:`str`
Name of workflow (default: ``bold_reg_wf``)
Inputs
------
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
ref_bold_brain
Reference image to which BOLD series is aligned
If ``fieldwarp == True``, ``ref_bold_brain`` should be unwarped
ref_bold_mask
Skull-stripping mask of reference image
t1w_brain
Skull-stripped bias-corrected structural template image
t1w_mask
Mask of the skull-stripped template image
t1w_aseg
FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
(only if ``recon-all`` was run).
t1w_aparc
FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
(only if ``recon-all`` was run).
bold_split
Individual 3D BOLD volumes, not motion corrected
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
fieldwarp
a :abbr:`DFM (displacements field map)` in ITK format
Outputs
-------
bold_t1
Motion-corrected BOLD series in T1 space
bold_t1_ref
Reference, contrast-enhanced summary of the motion-corrected BOLD series in T1w space
bold_mask_t1
BOLD mask in T1 space
bold_aseg_t1
FreeSurfer's ``aseg.mgz`` atlas, in T1w-space at the BOLD resolution
(only if ``recon-all`` was run).
bold_aparc_t1
FreeSurfer's ``aparc+aseg.mgz`` atlas, in T1w-space at the BOLD resolution
(only if ``recon-all`` was run).
See also
--------
* :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`
* :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.func.util import init_bold_reference_wf
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.itk import MultiApplyTransforms
from niworkflows.interfaces.nibabel import GenerateSamplingReference
from niworkflows.interfaces.nilearn import Merge
from fmriprep.interfaces.maths import Clip
workflow = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
'name_source',
'ref_bold_brain',
'ref_bold_mask',
't1w_brain',
't1w_mask',
't1w_aseg',
't1w_aparc',
'bold_split',
'fieldwarp',
'hmc_xforms',
'itk_bold_to_t1',
]
),
name='inputnode',
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=['bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1', 'bold_aparc_t1']
),
name='outputnode',
)
gen_ref = pe.Node(
GenerateSamplingReference(), name='gen_ref', mem_gb=0.3
) # 256x256x256 * 64 / 8 ~ 150MB
mask_t1w_tfm = pe.Node(
ApplyTransforms(interpolation='MultiLabel'), name='mask_t1w_tfm', mem_gb=0.1
)
# fmt:off
workflow.connect([
(inputnode, gen_ref, [('ref_bold_brain', 'moving_image'),
('t1w_brain', 'fixed_image'),
('t1w_mask', 'fov_mask')]),
(inputnode, mask_t1w_tfm, [('ref_bold_mask', 'input_image')]),
(gen_ref, mask_t1w_tfm, [('out_file', 'reference_image')]),
(inputnode, mask_t1w_tfm, [('itk_bold_to_t1', 'transforms')]),
(mask_t1w_tfm, outputnode, [('output_image', 'bold_mask_t1')]),
])
# fmt:on
if freesurfer:
# Resample aseg and aparc in T1w space (no transforms needed)
aseg_t1w_tfm = pe.Node(
ApplyTransforms(interpolation='MultiLabel', transforms='identity'),
name='aseg_t1w_tfm',
mem_gb=0.1,
)
aparc_t1w_tfm = pe.Node(
ApplyTransforms(interpolation='MultiLabel', transforms='identity'),
name='aparc_t1w_tfm',
mem_gb=0.1,
)
# fmt:off
workflow.connect([
(inputnode, aseg_t1w_tfm, [('t1w_aseg', 'input_image')]),
(inputnode, aparc_t1w_tfm, [('t1w_aparc', 'input_image')]),
(gen_ref, aseg_t1w_tfm, [('out_file', 'reference_image')]),
(gen_ref, aparc_t1w_tfm, [('out_file', 'reference_image')]),
(aseg_t1w_tfm, outputnode, [('output_image', 'bold_aseg_t1')]),
(aparc_t1w_tfm, outputnode, [('output_image', 'bold_aparc_t1')]),
])
# fmt:on
bold_to_t1w_transform = pe.Node(
MultiApplyTransforms(interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
name='bold_to_t1w_transform',
mem_gb=mem_gb * 3 * omp_nthreads,
n_procs=omp_nthreads,
)
# Interpolation can occasionally produce below-zero values as an artifact
threshold = pe.MapNode(
Clip(minimum=0), name="threshold", iterfield=['in_file'], mem_gb=DEFAULT_MEMORY_MIN_GB
)
# merge 3D volumes into 4D timeseries
merge = pe.Node(Merge(compress=use_compression), name='merge', mem_gb=mem_gb)
# Generate a reference on the target T1w space
gen_final_ref = init_bold_reference_wf(omp_nthreads, pre_mask=True)
# Merge transforms placing the head motion correction last
merge_xforms = pe.Node(
niu.Merge(3),
name='merge_xforms',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, merge, [('name_source', 'header_source')]),
(inputnode, merge_xforms, [
('hmc_xforms', 'in3'), # May be 'identity' if HMC already applied
('fieldwarp', 'in2'), # May be 'identity' if SDC already applied
('itk_bold_to_t1', 'in1')]),
(inputnode, bold_to_t1w_transform, [('bold_split', 'input_image')]),
(merge_xforms, bold_to_t1w_transform, [('out', 'transforms')]),
(gen_ref, bold_to_t1w_transform, [('out_file', 'reference_image')]),
(bold_to_t1w_transform, threshold, [('out_files', 'in_file')]),
(threshold, merge, [('out_file', 'in_files')]),
(merge, gen_final_ref, [('out_file', 'inputnode.bold_file')]),
(mask_t1w_tfm, gen_final_ref, [('output_image', 'inputnode.bold_mask')]),
(merge, outputnode, [('out_file', 'bold_t1')]),
(gen_final_ref, outputnode, [('outputnode.ref_image', 'bold_t1_ref')]),
])
# fmt:on
return workflow
def init_bbreg_wf(
use_bbr: bool,
bold2t1w_dof: AffineDOF,
bold2t1w_init: RegistrationInit,
omp_nthreads: int,
name: str = 'bbreg_wf',
):
"""
Build a workflow to run FreeSurfer's ``bbregister``.
This workflow uses FreeSurfer's ``bbregister`` to register a BOLD image to
a T1-weighted structural image.
It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`,
which performs the same task using FSL's FLIRT with a BBR cost function.
The ``use_bbr`` option permits a high degree of control over registration.
If ``False``, standard, affine coregistration will be performed using
FreeSurfer's ``mri_coreg`` tool.
If ``True``, ``bbregister`` will be seeded with the initial transform found
by ``mri_coreg`` (equivalent to running ``bbregister --init-coreg``).
If ``None``, after ``bbregister`` is run, the resulting affine transform
will be compared to the initial transform found by ``mri_coreg``.
Excessive deviation will result in rejecting the BBR refinement and
accepting the original, affine registration.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.registration import init_bbreg_wf
wf = init_bbreg_wf(use_bbr=True, bold2t1w_dof=9,
bold2t1w_init='register', omp_nthreads=1)
Parameters
----------
use_bbr : :obj:`bool` or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
bold2t1w_init : str, 'header' or 'register'
If ``'header'``, use header information for initialization of BOLD and T1 images.
If ``'register'``, align volumes by their centers.
name : :obj:`str`, optional
Workflow name (default: bbreg_wf)
Inputs
------
in_file
Reference BOLD image to be registered
fsnative2t1w_xfm
FSL-style affine matrix translating from FreeSurfer T1.mgz to T1w
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID (must have folder in SUBJECTS_DIR)
t1w_brain
Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`)
t1w_dseg
Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_fsl_bbr_wf`)
Outputs
-------
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
itk_t1_to_bold
Affine transform from T1 space to BOLD space (ITK format)
out_report
Reportlet for assessing registration quality
fallback
Boolean indicating whether BBR was rejected (mri_coreg registration returned)
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
# See https://github.com/nipreps/fmriprep/issues/768
from niworkflows.interfaces.freesurfer import PatchedBBRegisterRPT as BBRegisterRPT
from niworkflows.interfaces.freesurfer import PatchedLTAConvert as LTAConvert
from niworkflows.interfaces.freesurfer import PatchedMRICoregRPT as MRICoregRPT
from niworkflows.interfaces.nitransforms import ConcatenateXFMs
workflow = Workflow(name=name)
workflow.__desc__ = """\
The BOLD reference was then co-registered to the T1w reference using
`bbregister` (FreeSurfer) which implements boundary-based registration [@bbr].
Co-registration was configured with {dof} degrees of freedom{reason}.
""".format(
dof={6: 'six', 9: 'nine', 12: 'twelve'}[bold2t1w_dof],
reason=''
if bold2t1w_dof == 6
else 'to account for distortions remaining in the BOLD reference',
)
inputnode = pe.Node(
niu.IdentityInterface(
[
'in_file',
'fsnative2t1w_xfm',
'subjects_dir',
'subject_id', # BBRegister
't1w_dseg',
't1w_brain',
]
), # FLIRT BBR
name='inputnode',
)
outputnode = pe.Node(
niu.IdentityInterface(['itk_bold_to_t1', 'itk_t1_to_bold', 'out_report', 'fallback']),
name='outputnode',
)
if bold2t1w_init not in ("register", "header"):
raise ValueError(f"Unknown BOLD-T1w initialization option: {bold2t1w_init}")
# For now make BBR unconditional - in the future, we can fall back to identity,
# but adding the flexibility without testing seems a bit dangerous
if bold2t1w_init == "header":
if use_bbr is False:
raise ValueError("Cannot disable BBR and use header registration")
if use_bbr is None:
LOGGER.warning("Initializing BBR with header; affine fallback disabled")
use_bbr = True
# Define both nodes, but only connect conditionally
mri_coreg = pe.Node(
MRICoregRPT(
dof=bold2t1w_dof, sep=[4], ftol=0.0001, linmintol=0.01, generate_report=not use_bbr
),
name='mri_coreg',
n_procs=omp_nthreads,
mem_gb=5,
)
bbregister = pe.Node(
BBRegisterRPT(
dof=bold2t1w_dof,
contrast_type='t2',
registered_file=True,
out_lta_file=True,
generate_report=True,
),
name='bbregister',
mem_gb=12,
)
if bold2t1w_init == "header":
bbregister.inputs.init = "header"
transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name='transforms')
lta_ras2ras = pe.MapNode(
LTAConvert(out_lta=True), iterfield=['in_lta'], name='lta_ras2ras', mem_gb=2
)
# In cases where Merge(2) only has `in1` or `in2` defined
# output list will just contain a single element
select_transform = pe.Node(
niu.Select(index=0), run_without_submitting=True, name='select_transform'
)
merge_ltas = pe.Node(niu.Merge(2), name='merge_ltas', run_without_submitting=True)
concat_xfm = pe.Node(ConcatenateXFMs(inverse=True), name='concat_xfm')
# fmt:off
workflow.connect([
(inputnode, merge_ltas, [('fsnative2t1w_xfm', 'in2')]),
# Wire up the co-registration alternatives
(transforms, lta_ras2ras, [('out', 'in_lta')]),
(lta_ras2ras, select_transform, [('out_lta', 'inlist')]),
(select_transform, merge_ltas, [('out', 'in1')]),
(merge_ltas, concat_xfm, [('out', 'in_xfms')]),
(concat_xfm, outputnode, [('out_xfm', 'itk_bold_to_t1')]),
(concat_xfm, outputnode, [('out_inv', 'itk_t1_to_bold')]),
])
# fmt:on
# Do not initialize with header, use mri_coreg
if bold2t1w_init == "register":
# fmt:off
workflow.connect([
(inputnode, mri_coreg, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id'),
('in_file', 'source_file')]),
(mri_coreg, transforms, [('out_lta_file', 'in2')]),
])
# fmt:on
# Short-circuit workflow building, use initial registration
if use_bbr is False:
workflow.connect(mri_coreg, 'out_report', outputnode, 'out_report')
outputnode.inputs.fallback = True
return workflow
# Otherwise bbregister will also be used
workflow.connect(mri_coreg, 'out_lta_file', bbregister, 'init_reg_file')
# Use bbregister
# fmt:off
workflow.connect([
(inputnode, bbregister, [('subjects_dir', 'subjects_dir'),
('subject_id', 'subject_id'),
('in_file', 'source_file')]),
(bbregister, transforms, [('out_lta_file', 'in1')]),
])
# fmt:on
# Short-circuit workflow building, use boundary-based registration
if use_bbr is True:
workflow.connect(bbregister, 'out_report', outputnode, 'out_report')
outputnode.inputs.fallback = False
return workflow
# Only reach this point if bold2t1w_init is "register" and use_bbr is None
reports = pe.Node(niu.Merge(2), run_without_submitting=True, name='reports')
compare_transforms = pe.Node(niu.Function(function=compare_xforms), name='compare_transforms')
select_report = pe.Node(niu.Select(), run_without_submitting=True, name='select_report')
# fmt:off
workflow.connect([
# Normalize LTA transforms to RAS2RAS (inputs are VOX2VOX) and compare
(lta_ras2ras, compare_transforms, [('out_lta', 'lta_list')]),
(compare_transforms, outputnode, [('out', 'fallback')]),
# Select output transform
(compare_transforms, select_transform, [('out', 'index')]),
# Select output report
(bbregister, reports, [('out_report', 'in1')]),
(mri_coreg, reports, [('out_report', 'in2')]),
(reports, select_report, [('out', 'inlist')]),
(compare_transforms, select_report, [('out', 'index')]),
(select_report, outputnode, [('out', 'out_report')]),
])
# fmt:on
return workflow
def init_fsl_bbr_wf(
use_bbr: bool,
bold2t1w_dof: AffineDOF,
bold2t1w_init: RegistrationInit,
omp_nthreads: int,
sloppy: bool = False,
name: str = 'fsl_bbr_wf',
):
"""
Build a workflow to run FSL's ``flirt``.
This workflow uses FSL FLIRT to register a BOLD image to a T1-weighted
structural image, using a boundary-based registration (BBR) cost function.
It is a counterpart to :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`,
which performs the same task using FreeSurfer's ``bbregister``.
The ``use_bbr`` option permits a high degree of control over registration.
If ``False``, standard, rigid coregistration will be performed by FLIRT.
If ``True``, FLIRT-BBR will be seeded with the initial transform found by
the rigid coregistration.
If ``None``, after FLIRT-BBR is run, the resulting affine transform
will be compared to the initial transform found by FLIRT.
Excessive deviation will result in rejecting the BBR refinement and
accepting the original, affine registration.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.registration import init_fsl_bbr_wf
wf = init_fsl_bbr_wf(use_bbr=True, bold2t1w_dof=9, bold2t1w_init='register', omp_nthreads=1)
Parameters
----------
use_bbr : :obj:`bool` or None
Enable/disable boundary-based registration refinement.
If ``None``, test BBR result for distortion before accepting.
bold2t1w_dof : 6, 9 or 12
Degrees-of-freedom for BOLD-T1w registration
bold2t1w_init : str, 'header' or 'register'
If ``'header'``, use header information for initialization of BOLD and T1 images.
If ``'register'``, align volumes by their centers.
name : :obj:`str`, optional
Workflow name (default: fsl_bbr_wf)
Inputs
------
in_file
Reference BOLD image to be registered
t1w_brain
Skull-stripped T1-weighted structural image
t1w_dseg
FAST segmentation of ``t1w_brain``
fsnative2t1w_xfm
Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
subjects_dir
Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
subject_id
Unused (see :py:func:`~fmriprep.workflows.bold.registration.init_bbreg_wf`)
Outputs
-------
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1w space (ITK format)
itk_t1_to_bold
Affine transform from T1 space to BOLD space (ITK format)
out_report
Reportlet for assessing registration quality
fallback
Boolean indicating whether BBR was rejected (rigid FLIRT registration returned)
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.freesurfer import PatchedLTAConvert as LTAConvert
from niworkflows.interfaces.freesurfer import PatchedMRICoregRPT as MRICoregRPT
from niworkflows.interfaces.reportlets.registration import FLIRTRPT
from niworkflows.utils.images import dseg_label as _dseg_label
workflow = Workflow(name=name)
workflow.__desc__ = """\
The BOLD reference was then co-registered to the T1w reference using
`mri_coreg` (FreeSurfer) followed by `flirt` [FSL {fsl_ver}, @flirt]
with the boundary-based registration [@bbr] cost-function.
Co-registration was configured with {dof} degrees of freedom{reason}.
""".format(
fsl_ver=FLIRTRPT().version or '<ver>',
dof={6: 'six', 9: 'nine', 12: 'twelve'}[bold2t1w_dof],
reason=''
if bold2t1w_dof == 6
else 'to account for distortions remaining in the BOLD reference',
)
inputnode = pe.Node(
niu.IdentityInterface(
[
'in_file',
'fsnative2t1w_xfm',
'subjects_dir',
'subject_id', # BBRegister
't1w_dseg',
't1w_brain',
]
), # FLIRT BBR
name='inputnode',
)
outputnode = pe.Node(
niu.IdentityInterface(['itk_bold_to_t1', 'itk_t1_to_bold', 'out_report', 'fallback']),
name='outputnode',
)
wm_mask = pe.Node(niu.Function(function=_dseg_label), name='wm_mask')
wm_mask.inputs.label = 2 # BIDS default is WM=2
if bold2t1w_init not in ("register", "header"):
raise ValueError(f"Unknown BOLD-T1w initialization option: {bold2t1w_init}")
if bold2t1w_init == "header":
raise NotImplementedError("Header-based registration initialization not supported for FSL")
mri_coreg = pe.Node(
MRICoregRPT(
dof=bold2t1w_dof, sep=[4], ftol=0.0001, linmintol=0.01, generate_report=not use_bbr
),
name='mri_coreg',
n_procs=omp_nthreads,
mem_gb=5,
)
lta_to_fsl = pe.Node(LTAConvert(out_fsl=True), name='lta_to_fsl', mem_gb=DEFAULT_MEMORY_MIN_GB)
invt_bbr = pe.Node(
fsl.ConvertXFM(invert_xfm=True), name='invt_bbr', mem_gb=DEFAULT_MEMORY_MIN_GB
)
# BOLD to T1 transform matrix is from fsl, using c3 tools to convert to
# something ANTs will like.
fsl2itk_fwd = pe.Node(
c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
name='fsl2itk_fwd',
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
fsl2itk_inv = pe.Node(
c3.C3dAffineTool(fsl2ras=True, itk_transform=True),
name='fsl2itk_inv',
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, mri_coreg, [('in_file', 'source_file'),
('t1w_brain', 'reference_file')]),
(inputnode, fsl2itk_fwd, [('t1w_brain', 'reference_file'),
('in_file', 'source_file')]),
(inputnode, fsl2itk_inv, [('in_file', 'reference_file'),
('t1w_brain', 'source_file')]),
(mri_coreg, lta_to_fsl, [('out_lta_file', 'in_lta')]),
(invt_bbr, fsl2itk_inv, [('out_file', 'transform_file')]),
(fsl2itk_fwd, outputnode, [('itk_transform', 'itk_bold_to_t1')]),
(fsl2itk_inv, outputnode, [('itk_transform', 'itk_t1_to_bold')]),
])
# fmt:on
# Short-circuit workflow building, use rigid registration
if use_bbr is False:
# fmt:off
workflow.connect([
(lta_to_fsl, invt_bbr, [('out_fsl', 'in_file')]),
(lta_to_fsl, fsl2itk_fwd, [('out_fsl', 'transform_file')]),
(mri_coreg, outputnode, [('out_report', 'out_report')]),
])
# fmt:on
outputnode.inputs.fallback = True
return workflow
flt_bbr = pe.Node(
FLIRTRPT(cost_func='bbr', dof=bold2t1w_dof, args="-basescale 1", generate_report=True),
name='flt_bbr',
)
FSLDIR = os.getenv('FSLDIR')
if FSLDIR:
flt_bbr.inputs.schedule = op.join(FSLDIR, 'etc/flirtsch/bbr.sch')
else:
# Should mostly be hit while building docs
LOGGER.warning("FSLDIR unset - using packaged BBR schedule")
flt_bbr.inputs.schedule = pkgr.resource_filename('fmriprep', 'data/flirtsch/bbr.sch')
# fmt:off
workflow.connect([
(inputnode, wm_mask, [('t1w_dseg', 'in_seg')]),
(inputnode, flt_bbr, [('in_file', 'in_file')]),
(lta_to_fsl, flt_bbr, [('out_fsl', 'in_matrix_file')]),
])
# fmt:on
if sloppy is True:
downsample = pe.Node(
niu.Function(
function=_conditional_downsampling, output_names=["out_file", "out_mask"]
),
name='downsample',
)
# fmt:off
workflow.connect([
(inputnode, downsample, [("t1w_brain", "in_file")]),
(wm_mask, downsample, [("out", "in_mask")]),
(downsample, flt_bbr, [('out_file', 'reference'),
('out_mask', 'wm_seg')]),
])
# fmt:on
else:
# fmt:off
workflow.connect([
(inputnode, flt_bbr, [('t1w_brain', 'reference')]),
(wm_mask, flt_bbr, [('out', 'wm_seg')]),
])
# fmt:on
# Short-circuit workflow building, use boundary-based registration
if use_bbr is True:
# fmt:off
workflow.connect([
(flt_bbr, invt_bbr, [('out_matrix_file', 'in_file')]),
(flt_bbr, fsl2itk_fwd, [('out_matrix_file', 'transform_file')]),
(flt_bbr, outputnode, [('out_report', 'out_report')]),
])
# fmt:on
outputnode.inputs.fallback = False
return workflow
transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name='transforms')
reports = pe.Node(niu.Merge(2), run_without_submitting=True, name='reports')
compare_transforms = pe.Node(niu.Function(function=compare_xforms), name='compare_transforms')
select_transform = pe.Node(niu.Select(), run_without_submitting=True, name='select_transform')
select_report = pe.Node(niu.Select(), run_without_submitting=True, name='select_report')
fsl_to_lta = pe.MapNode(LTAConvert(out_lta=True), iterfield=['in_fsl'], name='fsl_to_lta')
# fmt:off
workflow.connect([
(flt_bbr, transforms, [('out_matrix_file', 'in1')]),
(lta_to_fsl, transforms, [('out_fsl', 'in2')]),
# Convert FSL transforms to LTA (RAS2RAS) transforms and compare
(inputnode, fsl_to_lta, [('in_file', 'source_file'),
('t1w_brain', 'target_file')]),
(transforms, fsl_to_lta, [('out', 'in_fsl')]),
(fsl_to_lta, compare_transforms, [('out_lta', 'lta_list')]),
(compare_transforms, outputnode, [('out', 'fallback')]),
# Select output transform
(transforms, select_transform, [('out', 'inlist')]),
(compare_transforms, select_transform, [('out', 'index')]),
(select_transform, invt_bbr, [('out', 'in_file')]),
(select_transform, fsl2itk_fwd, [('out', 'transform_file')]),
(flt_bbr, reports, [('out_report', 'in1')]),
(mri_coreg, reports, [('out_report', 'in2')]),
(reports, select_report, [('out', 'inlist')]),
(compare_transforms, select_report, [('out', 'index')]),
(select_report, outputnode, [('out', 'out_report')]),
])
# fmt:on
return workflow
def compare_xforms(lta_list, norm_threshold=15):
"""
Computes a normalized displacement between two affine transforms as the
maximum overall displacement of the midpoints of the faces of a cube, when
each transform is applied to the cube.
This combines displacement resulting from scaling, translation and rotation.
Although the norm is in mm, in a scaling context, it is not necessarily
equivalent to that distance in translation.
We choose a default threshold of 15mm as a rough heuristic.
Normalized displacement above 20mm showed clear signs of distortion, while
"good" BBR refinements were frequently below 10mm displaced from the rigid
transform.
The 10-20mm range was more ambiguous, and 15mm chosen as a compromise.
This is open to revisiting in either direction.
See discussion in
`GitHub issue #681`_ <https://github.com/nipreps/fmriprep/issues/681>`_
and the `underlying implementation
<https://github.com/nipy/nipype/blob/56b7c81eedeeae884ba47c80096a5f66bd9f8116/nipype/algorithms/rapidart.py#L108-L159>`_.
Parameters
----------
lta_list : :obj:`list` or :obj:`tuple` of :obj:`str`
the two given affines in LTA format
norm_threshold : :obj:`float`
the upper bound limit to the normalized displacement caused by the
second transform relative to the first (default: `15`)
"""
from nipype.algorithms.rapidart import _calc_norm_affine
from niworkflows.interfaces.surf import load_transform
bbr_affine = load_transform(lta_list[0])
fallback_affine = load_transform(lta_list[1])
norm, _ = _calc_norm_affine([fallback_affine, bbr_affine], use_differences=True)
return norm[1] > norm_threshold
def _conditional_downsampling(in_file, in_mask, zoom_th=4.0):
"""Downsamples the input dataset for sloppy mode."""
from pathlib import Path
import nibabel as nb
import nitransforms as nt
import numpy as np
from scipy.ndimage.filters import gaussian_filter
img = nb.load(in_file)
zooms = np.array(img.header.get_zooms()[:3])
if not np.any(zooms < zoom_th):
return in_file, in_mask
out_file = Path('desc-resampled_input.nii.gz').absolute()
out_mask = Path('desc-resampled_mask.nii.gz').absolute()
shape = np.array(img.shape[:3])
scaling = zoom_th / zooms
newrot = np.diag(scaling).dot(img.affine[:3, :3])
newshape = np.ceil(shape / scaling).astype(int)
old_center = img.affine.dot(np.hstack((0.5 * (shape - 1), 1.0)))[:3]
offset = old_center - newrot.dot((newshape - 1) * 0.5)
newaffine = nb.affines.from_matvec(newrot, offset)
newref = nb.Nifti1Image(np.zeros(newshape, dtype=np.uint8), newaffine)
nt.Affine(reference=newref).apply(img).to_filename(out_file)
mask = nb.load(in_mask)
mask.set_data_dtype(float)
mdata = gaussian_filter(mask.get_fdata(dtype=float), scaling)
floatmask = nb.Nifti1Image(mdata, mask.affine, mask.header)
newmask = nt.Affine(reference=newref).apply(floatmask)
hdr = newmask.header.copy()
hdr.set_data_dtype(np.uint8)
newmaskdata = (newmask.get_fdata(dtype=float) > 0.5).astype(np.uint8)
nb.Nifti1Image(newmaskdata, newmask.affine, hdr).to_filename(out_mask)
return str(out_file), str(out_mask)
| 37,874 | 36.462908 | 125 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/resampling.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Resampling workflows
++++++++++++++++++++
.. autofunction:: init_bold_surf_wf
.. autofunction:: init_bold_std_trans_wf
.. autofunction:: init_bold_preproc_trans_wf
.. autofunction:: init_bold_fsLR_resampling_wf
.. autofunction:: init_bold_grayords_wf
.. autofunction:: init_goodvoxels_bold_mask_wf
"""
from __future__ import annotations
import typing as ty
from nipype import Function
from nipype.interfaces import freesurfer as fs
from nipype.interfaces import fsl
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.freesurfer import MedialNaNs
from ...config import DEFAULT_MEMORY_MIN_GB
from ...interfaces.workbench import MetricDilate, MetricMask, MetricResample
if ty.TYPE_CHECKING:
from niworkflows.utils.spaces import SpatialReferences
def init_bold_surf_wf(
*,
mem_gb: float,
surface_spaces: ty.List[str],
medial_surface_nan: bool,
name: str = "bold_surf_wf",
):
"""
Sample functional images to FreeSurfer surfaces.
For each vertex, the cortical ribbon is sampled at six points (spaced 20% of thickness apart)
and averaged.
Outputs are in GIFTI format.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from fmriprep.workflows.bold import init_bold_surf_wf
wf = init_bold_surf_wf(mem_gb=0.1,
surface_spaces=["fsnative", "fsaverage5"],
medial_surface_nan=False,
)
Parameters
----------
surface_spaces : :obj:`list`
List of FreeSurfer surface-spaces (either ``fsaverage{3,4,5,6,}`` or ``fsnative``)
the functional images are to be resampled to.
For ``fsnative``, images will be resampled to the individual subject's
native surface.
medial_surface_nan : :obj:`bool`
Replace medial wall values with NaNs on functional GIFTI files
Inputs
------
source_file
Motion-corrected BOLD series in T1 space
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
t1w2fsnative_xfm
LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
Outputs
-------
surfaces
BOLD series, resampled to FreeSurfer surfaces
"""
from nipype.interfaces.io import FreeSurferSource
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.surf import GiftiSetAnatomicalStructure
workflow = Workflow(name=name)
workflow.__desc__ = """\
The BOLD time-series were resampled onto the following surfaces
(FreeSurfer reconstruction nomenclature):
{out_spaces}.
""".format(
out_spaces=", ".join(["*%s*" % s for s in surface_spaces])
)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"source_file",
"subject_id",
"subjects_dir",
"t1w2fsnative_xfm",
]
),
name="inputnode",
)
itersource = pe.Node(niu.IdentityInterface(fields=["target"]), name="itersource")
itersource.iterables = [("target", surface_spaces)]
get_fsnative = pe.Node(FreeSurferSource(), name="get_fsnative", run_without_submitting=True)
def select_target(subject_id, space):
"""Get the target subject ID, given a source subject ID and a target space."""
return subject_id if space == "fsnative" else space
targets = pe.Node(
niu.Function(function=select_target),
name="targets",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# Rename the source file to the output space to simplify naming later
rename_src = pe.Node(
niu.Rename(format_string="%(subject)s", keep_ext=True),
name="rename_src",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
itk2lta = pe.Node(niu.Function(function=_itk2lta), name="itk2lta", run_without_submitting=True)
sampler = pe.MapNode(
fs.SampleToSurface(
interp_method="trilinear",
out_type="gii",
override_reg_subj=True,
sampling_method="average",
sampling_range=(0, 1, 0.2),
sampling_units="frac",
),
iterfield=["hemi"],
name="sampler",
mem_gb=mem_gb * 3,
)
sampler.inputs.hemi = ["lh", "rh"]
update_metadata = pe.MapNode(
GiftiSetAnatomicalStructure(),
iterfield=["in_file"],
name="update_metadata",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
joinnode = pe.JoinNode(
niu.IdentityInterface(fields=["surfaces", "target"]),
joinsource="itersource",
name="joinnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["surfaces", "target"]),
name="outputnode",
)
# fmt: off
workflow.connect([
(inputnode, get_fsnative, [
("subject_id", "subject_id"),
("subjects_dir", "subjects_dir")
]),
(inputnode, targets, [("subject_id", "subject_id")]),
(inputnode, itk2lta, [
("source_file", "src_file"),
("t1w2fsnative_xfm", "in_file"),
]),
(get_fsnative, itk2lta, [("T1", "dst_file")]),
(inputnode, sampler, [
("subjects_dir", "subjects_dir"),
("subject_id", "subject_id"),
]),
(itersource, targets, [("target", "space")]),
(inputnode, rename_src, [("source_file", "in_file")]),
(itersource, rename_src, [("target", "subject")]),
(rename_src, sampler, [("out_file", "source_file")]),
(itk2lta, sampler, [("out", "reg_file")]),
(targets, sampler, [("out", "target_subject")]),
(update_metadata, joinnode, [("out_file", "surfaces")]),
(itersource, joinnode, [("target", "target")]),
(joinnode, outputnode, [
("surfaces", "surfaces"),
("target", "target"),
]),
])
# fmt: on
# Refine if medial vertices should be NaNs
medial_nans = pe.MapNode(
MedialNaNs(), iterfield=["in_file"], name="medial_nans", mem_gb=DEFAULT_MEMORY_MIN_GB
)
if medial_surface_nan:
# fmt: off
workflow.connect([
(inputnode, medial_nans, [("subjects_dir", "subjects_dir")]),
(sampler, medial_nans, [("out_file", "in_file")]),
(medial_nans, update_metadata, [("out_file", "in_file")]),
])
# fmt: on
else:
workflow.connect([(sampler, update_metadata, [("out_file", "in_file")])])
return workflow
def init_goodvoxels_bold_mask_wf(mem_gb: float, name: str = "goodvoxels_bold_mask_wf"):
"""Calculate a mask of a BOLD series excluding high variance voxels.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from fmriprep.workflows.bold.resampling import init_goodvoxels_bold_mask_wf
wf = init_goodvoxels_bold_mask_wf(mem_gb=0.1)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
name : :obj:`str`
Name of workflow (default: ``goodvoxels_bold_mask_wf``)
Inputs
------
anat_ribbon
Cortical ribbon in T1w space
bold_file
Motion-corrected BOLD series in T1w space
Outputs
-------
masked_bold
BOLD series after masking outlier voxels with locally high COV
goodvoxels_ribbon
Cortical ribbon mask excluding voxels with locally high COV
"""
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"anat_ribbon",
"bold_file",
]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"goodvoxels_mask",
"goodvoxels_ribbon",
]
),
name="outputnode",
)
ribbon_boldsrc_xfm = pe.Node(
ApplyTransforms(interpolation='MultiLabel', transforms='identity'),
name="ribbon_boldsrc_xfm",
mem_gb=mem_gb,
)
stdev_volume = pe.Node(
fsl.maths.StdImage(dimension='T'),
name="stdev_volume",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
mean_volume = pe.Node(
fsl.maths.MeanImage(dimension='T'),
name="mean_volume",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_volume = pe.Node(
fsl.maths.BinaryMaths(operation='div'),
name="cov_volume",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_ribbon = pe.Node(
fsl.ApplyMask(),
name="cov_ribbon",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_ribbon_mean = pe.Node(
fsl.ImageStats(op_string='-M'),
name="cov_ribbon_mean",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_ribbon_std = pe.Node(
fsl.ImageStats(op_string='-S'),
name="cov_ribbon_std",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_ribbon_norm = pe.Node(
fsl.maths.BinaryMaths(operation='div'),
name="cov_ribbon_norm",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
smooth_norm = pe.Node(
fsl.maths.MathsCommand(args="-bin -s 5"),
name="smooth_norm",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
merge_smooth_norm = pe.Node(
niu.Merge(1),
name="merge_smooth_norm",
mem_gb=DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True,
)
cov_ribbon_norm_smooth = pe.Node(
fsl.maths.MultiImageMaths(op_string='-s 5 -div %s -dilD'),
name="cov_ribbon_norm_smooth",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_norm = pe.Node(
fsl.maths.BinaryMaths(operation='div'),
name="cov_norm",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_norm_modulate = pe.Node(
fsl.maths.BinaryMaths(operation='div'),
name="cov_norm_modulate",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
cov_norm_modulate_ribbon = pe.Node(
fsl.ApplyMask(),
name="cov_norm_modulate_ribbon",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
def _calc_upper_thr(in_stats):
return in_stats[0] + (in_stats[1] * 0.5)
upper_thr_val = pe.Node(
Function(
input_names=["in_stats"], output_names=["upper_thresh"], function=_calc_upper_thr
),
name="upper_thr_val",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
def _calc_lower_thr(in_stats):
return in_stats[1] - (in_stats[0] * 0.5)
lower_thr_val = pe.Node(
Function(
input_names=["in_stats"], output_names=["lower_thresh"], function=_calc_lower_thr
),
name="lower_thr_val",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
mod_ribbon_mean = pe.Node(
fsl.ImageStats(op_string='-M'),
name="mod_ribbon_mean",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
mod_ribbon_std = pe.Node(
fsl.ImageStats(op_string='-S'),
name="mod_ribbon_std",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
merge_mod_ribbon_stats = pe.Node(
niu.Merge(2),
name="merge_mod_ribbon_stats",
mem_gb=DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True,
)
bin_mean_volume = pe.Node(
fsl.maths.UnaryMaths(operation="bin"),
name="bin_mean_volume",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
merge_goodvoxels_operands = pe.Node(
niu.Merge(2),
name="merge_goodvoxels_operands",
mem_gb=DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True,
)
goodvoxels_thr = pe.Node(
fsl.maths.Threshold(),
name="goodvoxels_thr",
mem_gb=mem_gb,
)
goodvoxels_mask = pe.Node(
fsl.maths.MultiImageMaths(op_string='-bin -sub %s -mul -1'),
name="goodvoxels_mask",
mem_gb=mem_gb,
)
# make HCP-style "goodvoxels" mask in t1w space for filtering outlier voxels
# in bold timeseries, based on modulated normalized covariance
workflow.connect(
[
(inputnode, ribbon_boldsrc_xfm, [("anat_ribbon", "input_image")]),
(inputnode, stdev_volume, [("bold_file", "in_file")]),
(inputnode, mean_volume, [("bold_file", "in_file")]),
(mean_volume, ribbon_boldsrc_xfm, [("out_file", "reference_image")]),
(stdev_volume, cov_volume, [("out_file", "in_file")]),
(mean_volume, cov_volume, [("out_file", "operand_file")]),
(cov_volume, cov_ribbon, [("out_file", "in_file")]),
(ribbon_boldsrc_xfm, cov_ribbon, [("output_image", "mask_file")]),
(cov_ribbon, cov_ribbon_mean, [("out_file", "in_file")]),
(cov_ribbon, cov_ribbon_std, [("out_file", "in_file")]),
(cov_ribbon, cov_ribbon_norm, [("out_file", "in_file")]),
(cov_ribbon_mean, cov_ribbon_norm, [("out_stat", "operand_value")]),
(cov_ribbon_norm, smooth_norm, [("out_file", "in_file")]),
(smooth_norm, merge_smooth_norm, [("out_file", "in1")]),
(cov_ribbon_norm, cov_ribbon_norm_smooth, [("out_file", "in_file")]),
(merge_smooth_norm, cov_ribbon_norm_smooth, [("out", "operand_files")]),
(cov_ribbon_mean, cov_norm, [("out_stat", "operand_value")]),
(cov_volume, cov_norm, [("out_file", "in_file")]),
(cov_norm, cov_norm_modulate, [("out_file", "in_file")]),
(cov_ribbon_norm_smooth, cov_norm_modulate, [("out_file", "operand_file")]),
(cov_norm_modulate, cov_norm_modulate_ribbon, [("out_file", "in_file")]),
(ribbon_boldsrc_xfm, cov_norm_modulate_ribbon, [("output_image", "mask_file")]),
(cov_norm_modulate_ribbon, mod_ribbon_mean, [("out_file", "in_file")]),
(cov_norm_modulate_ribbon, mod_ribbon_std, [("out_file", "in_file")]),
(mod_ribbon_mean, merge_mod_ribbon_stats, [("out_stat", "in1")]),
(mod_ribbon_std, merge_mod_ribbon_stats, [("out_stat", "in2")]),
(merge_mod_ribbon_stats, upper_thr_val, [("out", "in_stats")]),
(merge_mod_ribbon_stats, lower_thr_val, [("out", "in_stats")]),
(mean_volume, bin_mean_volume, [("out_file", "in_file")]),
(upper_thr_val, goodvoxels_thr, [("upper_thresh", "thresh")]),
(cov_norm_modulate, goodvoxels_thr, [("out_file", "in_file")]),
(bin_mean_volume, merge_goodvoxels_operands, [("out_file", "in1")]),
(goodvoxels_thr, goodvoxels_mask, [("out_file", "in_file")]),
(merge_goodvoxels_operands, goodvoxels_mask, [("out", "operand_files")]),
]
)
goodvoxels_ribbon_mask = pe.Node(
fsl.ApplyMask(),
name_source=['in_file'],
keep_extension=True,
name="goodvoxels_ribbon_mask",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# apply goodvoxels ribbon mask to bold
workflow.connect(
[
(goodvoxels_mask, goodvoxels_ribbon_mask, [("out_file", "in_file")]),
(ribbon_boldsrc_xfm, goodvoxels_ribbon_mask, [("output_image", "mask_file")]),
(goodvoxels_mask, outputnode, [("out_file", "goodvoxels_mask")]),
(goodvoxels_ribbon_mask, outputnode, [("out_file", "goodvoxels_ribbon")]),
]
)
return workflow
def init_bold_fsLR_resampling_wf(
grayord_density: ty.Literal['91k', '170k'],
estimate_goodvoxels: bool,
omp_nthreads: int,
mem_gb: float,
name: str = "bold_fsLR_resampling_wf",
):
"""Resample BOLD time series to fsLR surface.
This workflow is derived heavily from three scripts within the DCAN-HCP pipelines scripts
Line numbers correspond to the locations of the code in the original scripts, found at:
https://github.com/DCAN-Labs/DCAN-HCP/tree/9291324/
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from fmriprep.workflows.bold.resampling import init_bold_fsLR_resampling_wf
wf = init_bold_fsLR_resampling_wf(
estimate_goodvoxels=True,
grayord_density='92k',
omp_nthreads=1,
mem_gb=1,
)
Parameters
----------
grayord_density : :class:`str`
Either ``"91k"`` or ``"170k"``, representing the total *grayordinates*.
estimate_goodvoxels : :class:`bool`
Calculate mask excluding voxels with a locally high coefficient of variation to
exclude from surface resampling
omp_nthreads : :class:`int`
Maximum number of threads an individual process may use
mem_gb : :class:`float`
Size of BOLD file in GB
name : :class:`str`
Name of workflow (default: ``bold_fsLR_resampling_wf``)
Inputs
------
bold_file : :class:`str`
Path to BOLD file resampled into T1 space
surfaces : :class:`list` of :class:`str`
Path to left and right hemisphere white, pial and midthickness GIFTI surfaces
morphometrics : :class:`list` of :class:`str`
Path to left and right hemisphere morphometric GIFTI surfaces, which must include thickness
sphere_reg_fsLR : :class:`list` of :class:`str`
Path to left and right hemisphere sphere.reg GIFTI surfaces, mapping from subject to fsLR
anat_ribbon : :class:`str`
Path to mask of cortical ribbon in T1w space, for calculating goodvoxels
Outputs
-------
bold_fsLR : :class:`list` of :class:`str`
Path to BOLD series resampled as functional GIFTI files in fsLR space
goodvoxels_mask : :class:`str`
Path to mask of voxels, excluding those with locally high coefficients of variation
"""
import templateflow.api as tf
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from smriprep import data as smriprep_data
from smriprep.interfaces.workbench import SurfaceResample
from fmriprep.interfaces.gifti import CreateROI
from fmriprep.interfaces.workbench import (
MetricFillHoles,
MetricRemoveIslands,
VolumeToSurfaceMapping,
)
fslr_density = "32k" if grayord_density == "91k" else "59k"
workflow = Workflow(name=name)
workflow.__desc__ = """\
The BOLD time-series were resampled onto the left/right-symmetric template
"fsLR" [@hcppipelines].
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
'bold_file',
'surfaces',
'morphometrics',
'sphere_reg_fsLR',
'anat_ribbon',
]
),
name='inputnode',
)
itersource = pe.Node(
niu.IdentityInterface(fields=['hemi']),
name='itersource',
iterables=[('hemi', ['L', 'R'])],
)
joinnode = pe.JoinNode(
niu.IdentityInterface(fields=['bold_fsLR']),
name='joinnode',
joinsource='itersource',
)
outputnode = pe.Node(
niu.IdentityInterface(fields=['bold_fsLR', 'goodvoxels_mask']),
name='outputnode',
)
# select white, midthickness and pial surfaces based on hemi
select_surfaces = pe.Node(
niu.Function(
function=_select_surfaces,
output_names=[
'white',
'pial',
'midthickness',
'thickness',
'sphere_reg',
'template_sphere',
'template_roi',
],
),
name='select_surfaces',
)
select_surfaces.inputs.template_spheres = [
str(sphere)
for sphere in tf.get(
template='fsLR',
density=fslr_density,
suffix='sphere',
space=None,
extension='.surf.gii',
)
]
atlases = smriprep_data.load_resource('atlases')
select_surfaces.inputs.template_rois = [
str(atlases / 'L.atlasroi.32k_fs_LR.shape.gii'),
str(atlases / 'R.atlasroi.32k_fs_LR.shape.gii'),
]
# Reimplements lines 282-290 of FreeSurfer2CaretConvertAndRegisterNonlinear.sh
initial_roi = pe.Node(CreateROI(), name="initial_roi", mem_gb=DEFAULT_MEMORY_MIN_GB)
# Lines 291-292
fill_holes = pe.Node(MetricFillHoles(), name="fill_holes", mem_gb=DEFAULT_MEMORY_MIN_GB)
native_roi = pe.Node(MetricRemoveIslands(), name="native_roi", mem_gb=DEFAULT_MEMORY_MIN_GB)
# Line 393 of FreeSurfer2CaretConvertAndRegisterNonlinear.sh
downsampled_midthickness = pe.Node(
SurfaceResample(method='BARYCENTRIC'),
name="downsampled_midthickness",
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# RibbonVolumeToSurfaceMapping.sh
# Line 85 thru ...
volume_to_surface = pe.Node(
VolumeToSurfaceMapping(method="ribbon-constrained"),
name="volume_to_surface",
mem_gb=mem_gb * 3,
n_procs=omp_nthreads,
)
metric_dilate = pe.Node(
MetricDilate(distance=10, nearest=True),
name="metric_dilate",
n_procs=omp_nthreads,
)
mask_native = pe.Node(MetricMask(), name="mask_native")
resample_to_fsLR = pe.Node(
MetricResample(method='ADAP_BARY_AREA', area_surfs=True),
name="resample_to_fsLR",
n_procs=omp_nthreads,
)
# ... line 89
mask_fsLR = pe.Node(MetricMask(), name="mask_fsLR")
# fmt: off
workflow.connect([
(inputnode, select_surfaces, [
('surfaces', 'surfaces'),
('morphometrics', 'morphometrics'),
('sphere_reg_fsLR', 'spherical_registrations'),
]),
(itersource, select_surfaces, [('hemi', 'hemi')]),
# Native ROI file from thickness
(itersource, initial_roi, [('hemi', 'hemisphere')]),
(select_surfaces, initial_roi, [('thickness', 'thickness_file')]),
(select_surfaces, fill_holes, [('midthickness', 'surface_file')]),
(select_surfaces, native_roi, [('midthickness', 'surface_file')]),
(initial_roi, fill_holes, [('roi_file', 'metric_file')]),
(fill_holes, native_roi, [('out_file', 'metric_file')]),
# Downsample midthickness to fsLR density
(select_surfaces, downsampled_midthickness, [
('midthickness', 'surface_in'),
('sphere_reg', 'current_sphere'),
('template_sphere', 'new_sphere'),
]),
# Resample BOLD to native surface, dilate and mask
(inputnode, volume_to_surface, [
('bold_file', 'volume_file'),
]),
(select_surfaces, volume_to_surface, [
('midthickness', 'surface_file'),
('white', 'inner_surface'),
('pial', 'outer_surface'),
]),
(select_surfaces, metric_dilate, [('midthickness', 'surf_file')]),
(volume_to_surface, metric_dilate, [('out_file', 'in_file')]),
(native_roi, mask_native, [('out_file', 'mask')]),
(metric_dilate, mask_native, [('out_file', 'in_file')]),
# Resample BOLD to fsLR and mask
(select_surfaces, resample_to_fsLR, [
('sphere_reg', 'current_sphere'),
('template_sphere', 'new_sphere'),
('midthickness', 'current_area'),
]),
(downsampled_midthickness, resample_to_fsLR, [('surface_out', 'new_area')]),
(native_roi, resample_to_fsLR, [('out_file', 'roi_metric')]),
(mask_native, resample_to_fsLR, [('out_file', 'in_file')]),
(select_surfaces, mask_fsLR, [('template_roi', 'mask')]),
(resample_to_fsLR, mask_fsLR, [('out_file', 'in_file')]),
# Output
(mask_fsLR, joinnode, [('out_file', 'bold_fsLR')]),
(joinnode, outputnode, [('bold_fsLR', 'bold_fsLR')]),
])
# fmt: on
if estimate_goodvoxels:
workflow.__desc__ += """\
A "goodvoxels" mask was applied during volume-to-surface sampling in fsLR space,
excluding voxels whose time-series have a locally high coefficient of variation.
"""
goodvoxels_bold_mask_wf = init_goodvoxels_bold_mask_wf(mem_gb)
# fmt: off
workflow.connect([
(inputnode, goodvoxels_bold_mask_wf, [
("bold_file", "inputnode.bold_file"),
("anat_ribbon", "inputnode.anat_ribbon"),
]),
(goodvoxels_bold_mask_wf, volume_to_surface, [
("outputnode.goodvoxels_mask", "volume_roi"),
]),
(goodvoxels_bold_mask_wf, outputnode, [
("outputnode.goodvoxels_mask", "goodvoxels_mask"),
]),
])
# fmt: on
return workflow
def init_bold_std_trans_wf(
freesurfer: bool,
mem_gb: float,
omp_nthreads: int,
spaces: SpatialReferences,
multiecho: bool,
name: str = "bold_std_trans_wf",
use_compression: bool = True,
):
"""
Sample fMRI into standard space with a single-step resampling of the original BOLD series.
.. important::
This workflow provides two outputnodes.
One output node (with name ``poutputnode``) will be parameterized in a Nipype sense
(see `Nipype iterables
<https://miykael.github.io/nipype_tutorial/notebooks/basic_iteration.html>`__), and a
second node (``outputnode``) will collapse the parameterized outputs into synchronous
lists of the output fields listed below.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from niworkflows.utils.spaces import SpatialReferences
from fmriprep.workflows.bold import init_bold_std_trans_wf
wf = init_bold_std_trans_wf(
freesurfer=True,
mem_gb=3,
omp_nthreads=1,
spaces=SpatialReferences(
spaces=["MNI152Lin",
("MNIPediatricAsym", {"cohort": "6"})],
checkpoint=True),
multiecho=False,
)
Parameters
----------
freesurfer : :obj:`bool`
Whether to generate FreeSurfer's aseg/aparc segmentations on BOLD space.
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{"resolution": 2}``
would lead to resampling on a 2mm resolution of the space).
name : :obj:`str`
Name of workflow (default: ``bold_std_trans_wf``)
use_compression : :obj:`bool`
Save registered BOLD series as ``.nii.gz``
Inputs
------
anat2std_xfm
List of anatomical-to-standard space transforms generated during
spatial normalization.
bold_aparc
FreeSurfer's ``aparc+aseg.mgz`` atlas projected into the T1w reference
(only if ``recon-all`` was run).
bold_aseg
FreeSurfer's ``aseg.mgz`` atlas projected into the T1w reference
(only if ``recon-all`` was run).
bold_mask
Skull-stripping mask of reference image
bold_split
Individual 3D volumes, not motion corrected
t2star
Estimated T2\\* map in BOLD native space
fieldwarp
a :abbr:`DFM (displacements field map)` in ITK format
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
templates
List of templates that were applied as targets during
spatial normalization.
Outputs
-------
bold_std
BOLD series, resampled to template space
bold_std_ref
Reference, contrast-enhanced summary of the BOLD series, resampled to template space
bold_mask_std
BOLD series mask in template space
bold_aseg_std
FreeSurfer's ``aseg.mgz`` atlas, in template space at the BOLD resolution
(only if ``recon-all`` was run)
bold_aparc_std
FreeSurfer's ``aparc+aseg.mgz`` atlas, in template space at the BOLD resolution
(only if ``recon-all`` was run)
t2star_std
Estimated T2\\* map in template space
template
Template identifiers synchronized correspondingly to previously
described outputs.
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.func.util import init_bold_reference_wf
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.itk import MultiApplyTransforms
from niworkflows.interfaces.nibabel import GenerateSamplingReference
from niworkflows.interfaces.nilearn import Merge
from niworkflows.interfaces.utility import KeySelect
from niworkflows.utils.spaces import format_reference
from fmriprep.interfaces.maths import Clip
workflow = Workflow(name=name)
output_references = spaces.cached.get_spaces(nonstandard=False, dim=(3,))
std_vol_references = [
(s.fullname, s.spec) for s in spaces.references if s.standard and s.dim == 3
]
if len(output_references) == 1:
workflow.__desc__ = """\
The BOLD time-series were resampled into standard space,
generating a *preprocessed BOLD run in {tpl} space*.
""".format(
tpl=output_references[0]
)
elif len(output_references) > 1:
workflow.__desc__ = """\
The BOLD time-series were resampled into several standard spaces,
correspondingly generating the following *spatially-normalized,
preprocessed BOLD runs*: {tpl}.
""".format(
tpl=", ".join(output_references)
)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"anat2std_xfm",
"bold_aparc",
"bold_aseg",
"bold_mask",
"bold_split",
"t2star",
"fieldwarp",
"hmc_xforms",
"itk_bold_to_t1",
"name_source",
"templates",
]
),
name="inputnode",
)
iterablesource = pe.Node(niu.IdentityInterface(fields=["std_target"]), name="iterablesource")
# Generate conversions for every template+spec at the input
iterablesource.iterables = [("std_target", std_vol_references)]
split_target = pe.Node(
niu.Function(
function=_split_spec,
input_names=["in_target"],
output_names=["space", "template", "spec"],
),
run_without_submitting=True,
name="split_target",
)
select_std = pe.Node(
KeySelect(fields=["anat2std_xfm"]),
name="select_std",
run_without_submitting=True,
)
select_tpl = pe.Node(
niu.Function(function=_select_template),
name="select_tpl",
run_without_submitting=True,
)
gen_ref = pe.Node(
GenerateSamplingReference(), name="gen_ref", mem_gb=0.3
) # 256x256x256 * 64 / 8 ~ 150MB)
mask_std_tfm = pe.Node(
ApplyTransforms(interpolation="MultiLabel"), name="mask_std_tfm", mem_gb=1
)
# Write corrected file in the designated output dir
mask_merge_tfms = pe.Node(
niu.Merge(2),
name="mask_merge_tfms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
merge_xforms = pe.Node(
niu.Merge(4),
name="merge_xforms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
bold_to_std_transform = pe.Node(
MultiApplyTransforms(interpolation="LanczosWindowedSinc", float=True, copy_dtype=True),
name="bold_to_std_transform",
mem_gb=mem_gb * 3 * omp_nthreads,
n_procs=omp_nthreads,
)
# Interpolation can occasionally produce below-zero values as an artifact
threshold = pe.MapNode(
Clip(minimum=0), name="threshold", iterfield=['in_file'], mem_gb=DEFAULT_MEMORY_MIN_GB
)
merge = pe.Node(Merge(compress=use_compression), name="merge", mem_gb=mem_gb * 3)
# Generate a reference on the target standard space
gen_final_ref = init_bold_reference_wf(omp_nthreads=omp_nthreads, pre_mask=True)
# fmt:off
workflow.connect([
(iterablesource, split_target, [("std_target", "in_target")]),
(iterablesource, select_tpl, [("std_target", "template")]),
(inputnode, select_std, [("anat2std_xfm", "anat2std_xfm"),
("templates", "keys")]),
(inputnode, mask_std_tfm, [("bold_mask", "input_image")]),
(inputnode, gen_ref, [(("bold_split", _first), "moving_image")]),
(inputnode, merge_xforms, [("hmc_xforms", "in4"),
("fieldwarp", "in3"),
(("itk_bold_to_t1", _aslist), "in2")]),
(inputnode, merge, [("name_source", "header_source")]),
(inputnode, mask_merge_tfms, [(("itk_bold_to_t1", _aslist), "in2")]),
(inputnode, bold_to_std_transform, [("bold_split", "input_image")]),
(split_target, select_std, [("space", "key")]),
(select_std, merge_xforms, [("anat2std_xfm", "in1")]),
(select_std, mask_merge_tfms, [("anat2std_xfm", "in1")]),
(split_target, gen_ref, [(("spec", _is_native), "keep_native")]),
(select_tpl, gen_ref, [("out", "fixed_image")]),
(merge_xforms, bold_to_std_transform, [("out", "transforms")]),
(gen_ref, bold_to_std_transform, [("out_file", "reference_image")]),
(gen_ref, mask_std_tfm, [("out_file", "reference_image")]),
(mask_merge_tfms, mask_std_tfm, [("out", "transforms")]),
(mask_std_tfm, gen_final_ref, [("output_image", "inputnode.bold_mask")]),
(bold_to_std_transform, threshold, [("out_files", "in_file")]),
(threshold, merge, [("out_file", "in_files")]),
(merge, gen_final_ref, [("out_file", "inputnode.bold_file")]),
])
# fmt:on
output_names = [
"bold_mask_std",
"bold_std",
"bold_std_ref",
"spatial_reference",
"template",
]
if freesurfer:
output_names.extend(["bold_aseg_std", "bold_aparc_std"])
if multiecho:
output_names.append("t2star_std")
poutputnode = pe.Node(niu.IdentityInterface(fields=output_names), name="poutputnode")
# fmt:off
workflow.connect([
# Connecting outputnode
(iterablesource, poutputnode, [
(("std_target", format_reference), "spatial_reference")]),
(merge, poutputnode, [("out_file", "bold_std")]),
(gen_final_ref, poutputnode, [("outputnode.ref_image", "bold_std_ref")]),
(mask_std_tfm, poutputnode, [("output_image", "bold_mask_std")]),
(select_std, poutputnode, [("key", "template")]),
])
# fmt:on
if freesurfer:
# Sample the parcellation files to functional space
aseg_std_tfm = pe.Node(
ApplyTransforms(interpolation="MultiLabel"), name="aseg_std_tfm", mem_gb=1
)
aparc_std_tfm = pe.Node(
ApplyTransforms(interpolation="MultiLabel"), name="aparc_std_tfm", mem_gb=1
)
# fmt:off
workflow.connect([
(inputnode, aseg_std_tfm, [("bold_aseg", "input_image")]),
(inputnode, aparc_std_tfm, [("bold_aparc", "input_image")]),
(select_std, aseg_std_tfm, [("anat2std_xfm", "transforms")]),
(select_std, aparc_std_tfm, [("anat2std_xfm", "transforms")]),
(gen_ref, aseg_std_tfm, [("out_file", "reference_image")]),
(gen_ref, aparc_std_tfm, [("out_file", "reference_image")]),
(aseg_std_tfm, poutputnode, [("output_image", "bold_aseg_std")]),
(aparc_std_tfm, poutputnode, [("output_image", "bold_aparc_std")]),
])
# fmt:on
if multiecho:
t2star_std_tfm = pe.Node(
ApplyTransforms(interpolation="LanczosWindowedSinc", float=True),
name="t2star_std_tfm",
mem_gb=1,
)
# fmt:off
workflow.connect([
(inputnode, t2star_std_tfm, [("t2star", "input_image")]),
(select_std, t2star_std_tfm, [("anat2std_xfm", "transforms")]),
(gen_ref, t2star_std_tfm, [("out_file", "reference_image")]),
(t2star_std_tfm, poutputnode, [("output_image", "t2star_std")]),
])
# fmt:on
# Connect parametric outputs to a Join outputnode
outputnode = pe.JoinNode(
niu.IdentityInterface(fields=output_names),
name="outputnode",
joinsource="iterablesource",
)
# fmt:off
workflow.connect([
(poutputnode, outputnode, [(f, f) for f in output_names]),
])
# fmt:on
return workflow
def init_bold_preproc_trans_wf(
mem_gb: float,
omp_nthreads: int,
name: str = "bold_preproc_trans_wf",
use_compression: bool = True,
use_fieldwarp: bool = False,
interpolation: str = "LanczosWindowedSinc",
):
"""
Resample in native (original) space.
This workflow resamples the input fMRI in its native (original)
space in a "single shot" from the original BOLD series.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from fmriprep.workflows.bold import init_bold_preproc_trans_wf
wf = init_bold_preproc_trans_wf(mem_gb=3, omp_nthreads=1)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``bold_std_trans_wf``)
use_compression : :obj:`bool`
Save registered BOLD series as ``.nii.gz``
use_fieldwarp : :obj:`bool`
Include SDC warp in single-shot transform from BOLD to MNI
interpolation : :obj:`str`
Interpolation type to be used by ANTs' ``applyTransforms``
(default ``"LanczosWindowedSinc"``)
Inputs
------
bold_file
Individual 3D volumes, not motion corrected
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
fieldwarp
a :abbr:`DFM (displacements field map)` in ITK format
Outputs
-------
bold
BOLD series, resampled in native space, including all preprocessing
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.itk import MultiApplyTransforms
from niworkflows.interfaces.nilearn import Merge
from fmriprep.interfaces.maths import Clip
workflow = Workflow(name=name)
workflow.__desc__ = """\
The BOLD time-series (including slice-timing correction when applied)
were resampled onto their original, native space by applying
{transforms}.
These resampled BOLD time-series will be referred to as *preprocessed
BOLD in original space*, or just *preprocessed BOLD*.
""".format(
transforms="""\
a single, composite transform to correct for head-motion and
susceptibility distortions"""
if use_fieldwarp
else """\
the transforms to correct for head-motion"""
)
inputnode = pe.Node(
niu.IdentityInterface(fields=["name_source", "bold_file", "hmc_xforms", "fieldwarp"]),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["bold", "bold_ref", "bold_ref_brain"]),
name="outputnode",
)
merge_xforms = pe.Node(
niu.Merge(2),
name="merge_xforms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
bold_transform = pe.Node(
MultiApplyTransforms(interpolation=interpolation, copy_dtype=True),
name="bold_transform",
mem_gb=mem_gb * 3 * omp_nthreads,
n_procs=omp_nthreads,
)
# Interpolation can occasionally produce below-zero values as an artifact
threshold = pe.MapNode(
Clip(minimum=0), name="threshold", iterfield=['in_file'], mem_gb=DEFAULT_MEMORY_MIN_GB
)
merge = pe.Node(Merge(compress=use_compression), name="merge", mem_gb=mem_gb * 3)
# fmt:off
workflow.connect([
(inputnode, merge_xforms, [("fieldwarp", "in1"),
("hmc_xforms", "in2")]),
(inputnode, bold_transform, [("bold_file", "input_image"),
(("bold_file", _first), "reference_image")]),
(inputnode, merge, [("name_source", "header_source")]),
(merge_xforms, bold_transform, [("out", "transforms")]),
(bold_transform, threshold, [("out_files", "in_file")]),
(threshold, merge, [("out_file", "in_files")]),
(merge, outputnode, [("out_file", "bold")]),
])
# fmt:on
return workflow
def init_bold_grayords_wf(
grayord_density: ty.Literal['91k', '170k'],
mem_gb: float,
repetition_time: float,
name: str = "bold_grayords_wf",
):
"""
Sample Grayordinates files onto the fsLR atlas.
Outputs are in CIFTI2 format.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from fmriprep.workflows.bold.resampling import init_bold_grayords_wf
wf = init_bold_grayords_wf(mem_gb=0.1, grayord_density="91k", repetition_time=2)
Parameters
----------
grayord_density : :class:`str`
Either ``"91k"`` or ``"170k"``, representing the total *grayordinates*.
mem_gb : :obj:`float`
Size of BOLD file in GB
repetition_time : :obj:`float`
Repetition time in seconds
name : :obj:`str`
Unique name for the subworkflow (default: ``"bold_grayords_wf"``)
Inputs
------
bold_fsLR : :obj:`str`
List of paths to BOLD series resampled as functional GIFTI files in fsLR space
bold_std : :obj:`str`
List of BOLD conversions to standard spaces.
spatial_reference : :obj:`str`
List of unique identifiers corresponding to the BOLD standard-conversions.
Outputs
-------
cifti_bold : :obj:`str`
BOLD CIFTI dtseries.
cifti_metadata : :obj:`str`
BIDS metadata file corresponding to ``cifti_bold``.
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.cifti import GenerateCifti
from niworkflows.interfaces.utility import KeySelect
workflow = Workflow(name=name)
mni_density = "2" if grayord_density == "91k" else "1"
workflow.__desc__ = """\
*Grayordinates* files [@hcppipelines] containing {density} samples were also
generated with surface data transformed directly to fsLR space and subcortical
data transformed to {mni_density} mm resolution MNI152NLin6Asym space.
""".format(
density=grayord_density, mni_density=mni_density
)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"bold_std",
"bold_fsLR",
"spatial_reference",
]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["cifti_bold", "cifti_metadata"]),
name="outputnode",
)
# extract out to BOLD base
select_std = pe.Node(
KeySelect(fields=["bold_std"]),
name="select_std",
run_without_submitting=True,
nohash=True,
)
select_std.inputs.key = "MNI152NLin6Asym_res-%s" % mni_density
gen_cifti = pe.Node(
GenerateCifti(
TR=repetition_time,
grayordinates=grayord_density,
),
name="gen_cifti",
)
# fmt:off
workflow.connect([
(inputnode, select_std, [("bold_std", "bold_std"),
("spatial_reference", "keys")]),
(inputnode, gen_cifti, [("bold_fsLR", "surface_bolds")]),
(select_std, gen_cifti, [("bold_std", "bold_file")]),
(gen_cifti, outputnode, [("out_file", "cifti_bold"),
("out_metadata", "cifti_metadata")]),
])
# fmt:on
return workflow
def _split_spec(in_target):
space, spec = in_target
template = space.split(":")[0]
return space, template, spec
def _select_template(template):
from niworkflows.utils.misc import get_template_specs
template, specs = template
template = template.split(":")[0] # Drop any cohort modifier if present
specs = specs.copy()
specs["suffix"] = specs.get("suffix", "T1w")
# Sanitize resolution
res = specs.pop("res", None) or specs.pop("resolution", None) or "native"
if res != "native":
specs["resolution"] = res
return get_template_specs(template, template_spec=specs)[0]
# Map nonstandard resolutions to existing resolutions
specs["resolution"] = 2
try:
out = get_template_specs(template, template_spec=specs)
except RuntimeError:
specs["resolution"] = 1
out = get_template_specs(template, template_spec=specs)
return out[0]
def _first(inlist):
return inlist[0]
def _aslist(in_value):
if isinstance(in_value, list):
return in_value
return [in_value]
def _is_native(in_value):
return in_value.get("resolution") == "native" or in_value.get("res") == "native"
def _itk2lta(in_file, src_file, dst_file):
from pathlib import Path
import nitransforms as nt
out_file = Path("out.lta").absolute()
nt.linear.load(
in_file, fmt="fs" if in_file.endswith(".lta") else "itk", reference=src_file
).to_filename(out_file, moving=dst_file, fmt="fs")
return str(out_file)
def _select_surfaces(
hemi,
surfaces,
morphometrics,
spherical_registrations,
template_spheres,
template_rois,
):
# This function relies on the basenames of the files to differ by L/R or l/r
# so that the sorting correctly identifies left or right.
import os
import re
idx = 0 if hemi == "L" else 1
container = {
'white': [],
'pial': [],
'midthickness': [],
'thickness': [],
'sphere': sorted(spherical_registrations),
'template_sphere': sorted(template_spheres),
'template_roi': sorted(template_rois),
}
find_name = re.compile(r'(?:^|[^d])(?P<name>white|pial|midthickness|thickness)')
for surface in surfaces + morphometrics:
match = find_name.search(os.path.basename(surface))
if match:
container[match.group('name')].append(surface)
return tuple(sorted(surflist, key=os.path.basename)[idx] for surflist in container.values())
| 48,435 | 33.230389 | 99 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/t2s.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Generate T2* map from multi-echo BOLD images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_t2s_wf
"""
import typing as ty
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from ... import config
from ...interfaces.maths import Clip, Label2Mask
from ...interfaces.multiecho import T2SMap
from ...interfaces.reports import LabeledHistogram
LOGGER = config.loggers.workflow
# pylint: disable=R0914
def init_bold_t2s_wf(
echo_times: ty.Sequence[float],
mem_gb: float,
omp_nthreads: int,
name: str = 'bold_t2s_wf',
):
r"""
Combine multiple echos of :abbr:`ME-EPI (multi-echo echo-planar imaging)`.
This workflow wraps the `tedana`_ `T2* workflow`_ to optimally
combine multiple preprocessed echos and derive a T2\ :sup:`★` map.
The following steps are performed:
#. Compute the T2\ :sup:`★` map
#. Create an optimally combined ME-EPI time series
.. _tedana: https://github.com/me-ica/tedana
.. _`T2* workflow`: https://tedana.readthedocs.io/en/latest/generated/tedana.workflows.t2smap_workflow.html#tedana.workflows.t2smap_workflow # noqa
Parameters
----------
echo_times : :obj:`list` or :obj:`tuple`
list of TEs associated with each echo
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``bold_t2s_wf``)
Inputs
------
bold_file
list of individual echo files
bold_mask
a binary mask to apply to the BOLD files
Outputs
-------
bold
the optimally combined time series for all supplied echos
t2star_map
the calculated T2\ :sup:`★` map
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.morphology import BinaryDilation
workflow = Workflow(name=name)
if config.workflow.me_t2s_fit_method == "curvefit":
fit_str = (
"nonlinear regression. "
"The T2<sup>★</sup>/S<sub>0</sub> estimates from a log-linear regression fit "
"were used for initial values"
)
else:
fit_str = "log-linear regression"
workflow.__desc__ = f"""\
A T2<sup>★</sup> map was estimated from the preprocessed EPI echoes, by voxel-wise fitting
the maximal number of echoes with reliable signal in that voxel to a monoexponential signal
decay model with {fit_str}.
The calculated T2<sup>★</sup> map was then used to optimally combine preprocessed BOLD across
echoes following the method described in [@posse_t2s].
The optimally combined time series was carried forward as the *preprocessed BOLD*.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'bold_mask']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['bold', 't2star_map']), name='outputnode')
LOGGER.log(25, 'Generating T2* map and optimally combined ME-EPI time series.')
dilate_mask = pe.Node(BinaryDilation(radius=2), name='dilate_mask')
t2smap_node = pe.Node(
T2SMap(echo_times=list(echo_times), fittype=config.workflow.me_t2s_fit_method),
name='t2smap_node',
mem_gb=2.5 * mem_gb * len(echo_times),
)
# fmt:off
workflow.connect([
(inputnode, dilate_mask, [('bold_mask', 'in_mask')]),
(inputnode, t2smap_node, [('bold_file', 'in_files')]),
(dilate_mask, t2smap_node, [('out_mask', 'mask_file')]),
(t2smap_node, outputnode, [('optimal_comb', 'bold'),
('t2star_map', 't2star_map')]),
])
# fmt:on
return workflow
def init_t2s_reporting_wf(name: str = 't2s_reporting_wf'):
r"""
Generate T2\*-map reports.
This workflow generates a histogram of estimated T2\* values (in seconds) in the
cortical and subcortical gray matter mask.
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``t2s_reporting_wf``)
Inputs
------
t2star_file
estimated T2\* map
boldref
reference BOLD file
label_file
an integer label file identifying gray matter with value ``1``
label_bold_xform
Affine matrix that maps the label file into alignment with the native
BOLD space; can be ``"identity"`` if label file is already aligned
Outputs
-------
t2star_hist
an SVG histogram showing estimated T2\* values in gray matter
t2s_comp_report
a before/after figure comparing the reference BOLD image and T2\* map
"""
from nipype.pipeline import engine as pe
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.reportlets.registration import (
SimpleBeforeAfterRPT as SimpleBeforeAfter,
)
workflow = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=['t2star_file', 'boldref', 'label_file', 'label_bold_xform']),
name='inputnode',
)
outputnode = pe.Node(
niu.IdentityInterface(fields=['t2star_hist', 't2s_comp_report']), name='outputnode'
)
label_tfm = pe.Node(ApplyTransforms(interpolation="MultiLabel"), name="label_tfm")
gm_mask = pe.Node(Label2Mask(label_val=1), name="gm_mask")
clip_t2star = pe.Node(Clip(maximum=0.1), name="clip_t2star")
t2s_hist = pe.Node(
LabeledHistogram(mapping={1: "Gray matter"}, xlabel='T2* (s)'), name='t2s_hist'
)
t2s_comparison = pe.Node(
SimpleBeforeAfter(
before_label="BOLD Reference",
after_label="T2* Map",
dismiss_affine=True,
),
name="t2s_comparison",
mem_gb=0.1,
)
# fmt:off
workflow.connect([
(inputnode, label_tfm, [('label_file', 'input_image'),
('t2star_file', 'reference_image'),
('label_bold_xform', 'transforms')]),
(inputnode, clip_t2star, [('t2star_file', 'in_file')]),
(clip_t2star, t2s_hist, [('out_file', 'in_file')]),
(label_tfm, gm_mask, [('output_image', 'in_file')]),
(gm_mask, t2s_hist, [('out_file', 'label_file')]),
(inputnode, t2s_comparison, [('boldref', 'before'),
('t2star_file', 'after')]),
(gm_mask, t2s_comparison, [('out_file', 'wm_seg')]),
(t2s_hist, outputnode, [('out_report', 't2star_hist')]),
(t2s_comparison, outputnode, [('out_report', 't2s_comp_report')]),
])
# fmt:on
return workflow
| 7,684 | 33.308036 | 152 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/confounds.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Calculate BOLD confounds
^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_confs_wf
"""
from nipype.algorithms import confounds as nac
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from templateflow.api import get as get_template
from ...config import DEFAULT_MEMORY_MIN_GB
from ...interfaces import DerivativesDataSink
from ...interfaces.confounds import (
FilterDropped,
FMRISummary,
GatherConfounds,
RenameACompCor,
)
def init_bold_confs_wf(
mem_gb: float,
metadata: dict,
regressors_all_comps: bool,
regressors_dvars_th: float,
regressors_fd_th: float,
freesurfer: bool = False,
name: str = "bold_confs_wf",
):
"""
Build a workflow to generate and write out confounding signals.
This workflow calculates confounds for a BOLD series, and aggregates them
into a :abbr:`TSV (tab-separated value)` file, for use as nuisance
regressors in a :abbr:`GLM (general linear model)`.
The following confounds are calculated, with column headings in parentheses:
#. Region-wise average signal (``csf``, ``white_matter``, ``global_signal``)
#. DVARS - original and standardized variants (``dvars``, ``std_dvars``)
#. Framewise displacement, based on head-motion parameters
(``framewise_displacement``)
#. Temporal CompCor (``t_comp_cor_XX``)
#. Anatomical CompCor (``a_comp_cor_XX``)
#. Cosine basis set for high-pass filtering w/ 0.008 Hz cut-off
(``cosine_XX``)
#. Non-steady-state volumes (``non_steady_state_XX``)
#. Estimated head-motion parameters, in mm and rad
(``trans_x``, ``trans_y``, ``trans_z``, ``rot_x``, ``rot_y``, ``rot_z``)
Prior to estimating aCompCor and tCompCor, non-steady-state volumes are
censored and high-pass filtered using a :abbr:`DCT (discrete cosine
transform)` basis.
The cosine basis, as well as one regressor per censored volume, are included
for convenience.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.confounds import init_bold_confs_wf
wf = init_bold_confs_wf(
mem_gb=1,
metadata={},
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB - please note that this size
should be calculated after resamplings that may extend
the FoV
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_confs_wf``)
regressors_all_comps : :obj:`bool`
Indicates whether CompCor decompositions should return all
components instead of the minimal number of components necessary
to explain 50 percent of the variance in the decomposition mask.
regressors_dvars_th : :obj:`float`
Criterion for flagging DVARS outliers
regressors_fd_th : :obj:`float`
Criterion for flagging framewise displacement outliers
Inputs
------
bold
BOLD image, after the prescribed corrections (STC, HMC and SDC)
when available.
bold_mask
BOLD series mask
movpar_file
SPM-formatted motion parameters file
rmsd_file
Framewise displacement as measured by ``fsl_motion_outliers``.
skip_vols
number of non steady state volumes
t1w_mask
Mask of the skull-stripped template image
t1w_tpms
List of tissue probability maps in T1w space
t1_bold_xform
Affine matrix that maps the T1w space into alignment with
the native BOLD space
Outputs
-------
confounds_file
TSV of all aggregated confounds
rois_report
Reportlet visualizing white-matter/CSF mask used for aCompCor,
the ROI for tCompCor and the BOLD brain mask.
confounds_metadata
Confounds metadata dictionary.
crown_mask
Mask of brain edge voxels
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.confounds import ExpandModel, SpikeRegressors
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.images import SignalExtraction
from niworkflows.interfaces.morphology import BinaryDilation, BinarySubtraction
from niworkflows.interfaces.nibabel import ApplyMask, Binarize
from niworkflows.interfaces.patches import RobustACompCor as ACompCor
from niworkflows.interfaces.patches import RobustTCompCor as TCompCor
from niworkflows.interfaces.plotting import (
CompCorVariancePlot,
ConfoundsCorrelationPlot,
)
from niworkflows.interfaces.reportlets.masks import ROIsPlot
from niworkflows.interfaces.utility import TSV2JSON, AddTSVHeader, DictMerge
from ...interfaces.confounds import aCompCorMasks
gm_desc = (
"dilating a GM mask extracted from the FreeSurfer's *aseg* segmentation"
if freesurfer
else "thresholding the corresponding partial volume map at 0.05"
)
workflow = Workflow(name=name)
workflow.__desc__ = f"""\
Several confounding time-series were calculated based on the
*preprocessed BOLD*: framewise displacement (FD), DVARS and
three region-wise global signals.
FD was computed using two formulations following Power (absolute sum of
relative motions, @power_fd_dvars) and Jenkinson (relative root mean square
displacement between affines, @mcflirt).
FD and DVARS are calculated for each functional run, both using their
implementations in *Nipype* [following the definitions by @power_fd_dvars].
The three global signals are extracted within the CSF, the WM, and
the whole-brain masks.
Additionally, a set of physiological regressors were extracted to
allow for component-based noise correction [*CompCor*, @compcor].
Principal components are estimated after high-pass filtering the
*preprocessed BOLD* time-series (using a discrete cosine filter with
128s cut-off) for the two *CompCor* variants: temporal (tCompCor)
and anatomical (aCompCor).
tCompCor components are then calculated from the top 2% variable
voxels within the brain mask.
For aCompCor, three probabilistic masks (CSF, WM and combined CSF+WM)
are generated in anatomical space.
The implementation differs from that of Behzadi et al. in that instead
of eroding the masks by 2 pixels on BOLD space, a mask of pixels that
likely contain a volume fraction of GM is subtracted from the aCompCor masks.
This mask is obtained by {gm_desc}, and it ensures components are not extracted
from voxels containing a minimal fraction of GM.
Finally, these masks are resampled into BOLD space and binarized by
thresholding at 0.99 (as in the original implementation).
Components are also calculated separately within the WM and CSF masks.
For each CompCor decomposition, the *k* components with the largest singular
values are retained, such that the retained components' time series are
sufficient to explain 50 percent of variance across the nuisance mask (CSF,
WM, combined, or temporal). The remaining components are dropped from
consideration.
The head-motion estimates calculated in the correction step were also
placed within the corresponding confounds file.
The confound time series derived from head motion estimates and global
signals were expanded with the inclusion of temporal derivatives and
quadratic terms for each [@confounds_satterthwaite_2013].
Frames that exceeded a threshold of {regressors_fd_th} mm FD or
{regressors_dvars_th} standardized DVARS were annotated as motion outliers.
Additional nuisance timeseries are calculated by means of principal components
analysis of the signal found within a thin band (*crown*) of voxels around
the edge of the brain, as proposed by [@patriat_improved_2017].
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"bold",
"bold_mask",
"movpar_file",
"rmsd_file",
"skip_vols",
"t1w_mask",
"t1w_tpms",
"t1_bold_xform",
]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
"confounds_file",
"confounds_metadata",
"acompcor_masks",
"tcompcor_mask",
"crown_mask",
]
),
name="outputnode",
)
# Project T1w mask into BOLD space and merge with BOLD brainmask
t1w_mask_tfm = pe.Node(
ApplyTransforms(interpolation="MultiLabel"),
name="t1w_mask_tfm",
)
union_mask = pe.Node(niu.Function(function=_binary_union), name="union_mask")
# Create the crown mask
dilated_mask = pe.Node(BinaryDilation(), name="dilated_mask")
subtract_mask = pe.Node(BinarySubtraction(), name="subtract_mask")
# DVARS
dvars = pe.Node(
nac.ComputeDVARS(save_nstd=True, save_std=True, remove_zerovariance=True),
name="dvars",
mem_gb=mem_gb,
)
# Frame displacement
fdisp = pe.Node(nac.FramewiseDisplacement(parameter_source="SPM"), name="fdisp", mem_gb=mem_gb)
# Generate aCompCor probseg maps
acc_masks = pe.Node(aCompCorMasks(is_aseg=freesurfer), name="acc_masks")
# Resample probseg maps in BOLD space via T1w-to-BOLD transform
acc_msk_tfm = pe.MapNode(
ApplyTransforms(interpolation="Gaussian"),
iterfield=["input_image"],
name="acc_msk_tfm",
mem_gb=0.1,
)
acc_msk_brain = pe.MapNode(ApplyMask(), name="acc_msk_brain", iterfield=["in_file"])
acc_msk_bin = pe.MapNode(Binarize(thresh_low=0.99), name="acc_msk_bin", iterfield=["in_file"])
acompcor = pe.Node(
ACompCor(
components_file="acompcor.tsv",
header_prefix="a_comp_cor_",
pre_filter="cosine",
save_pre_filter=True,
save_metadata=True,
mask_names=["CSF", "WM", "combined"],
merge_method="none",
failure_mode="NaN",
),
name="acompcor",
mem_gb=mem_gb,
)
crowncompcor = pe.Node(
ACompCor(
components_file="crown_compcor.tsv",
header_prefix="edge_comp_",
pre_filter="cosine",
save_pre_filter=True,
save_metadata=True,
mask_names=["Edge"],
merge_method="none",
failure_mode="NaN",
num_components=24,
),
name="crowncompcor",
mem_gb=mem_gb,
)
tcompcor = pe.Node(
TCompCor(
components_file="tcompcor.tsv",
header_prefix="t_comp_cor_",
pre_filter="cosine",
save_pre_filter=True,
save_metadata=True,
percentile_threshold=0.02,
failure_mode="NaN",
),
name="tcompcor",
mem_gb=mem_gb,
)
# Set number of components
if regressors_all_comps:
acompcor.inputs.num_components = "all"
tcompcor.inputs.num_components = "all"
else:
acompcor.inputs.variance_threshold = 0.5
tcompcor.inputs.variance_threshold = 0.5
# Set TR if present
if "RepetitionTime" in metadata:
tcompcor.inputs.repetition_time = metadata["RepetitionTime"]
acompcor.inputs.repetition_time = metadata["RepetitionTime"]
crowncompcor.inputs.repetition_time = metadata["RepetitionTime"]
# Split aCompCor results into a_comp_cor, c_comp_cor, w_comp_cor
rename_acompcor = pe.Node(RenameACompCor(), name="rename_acompcor")
# Global and segment regressors
signals_class_labels = [
"global_signal",
"csf",
"white_matter",
"csf_wm",
"tcompcor",
]
merge_rois = pe.Node(
niu.Merge(3, ravel_inputs=True), name="merge_rois", run_without_submitting=True
)
signals = pe.Node(
SignalExtraction(class_labels=signals_class_labels), name="signals", mem_gb=mem_gb
)
# Arrange confounds
add_dvars_header = pe.Node(
AddTSVHeader(columns=["dvars"]),
name="add_dvars_header",
mem_gb=0.01,
run_without_submitting=True,
)
add_std_dvars_header = pe.Node(
AddTSVHeader(columns=["std_dvars"]),
name="add_std_dvars_header",
mem_gb=0.01,
run_without_submitting=True,
)
add_motion_headers = pe.Node(
AddTSVHeader(columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]),
name="add_motion_headers",
mem_gb=0.01,
run_without_submitting=True,
)
add_rmsd_header = pe.Node(
AddTSVHeader(columns=["rmsd"]),
name="add_rmsd_header",
mem_gb=0.01,
run_without_submitting=True,
)
concat = pe.Node(GatherConfounds(), name="concat", mem_gb=0.01, run_without_submitting=True)
# CompCor metadata
tcc_metadata_filter = pe.Node(FilterDropped(), name="tcc_metadata_filter")
acc_metadata_filter = pe.Node(FilterDropped(), name="acc_metadata_filter")
tcc_metadata_fmt = pe.Node(
TSV2JSON(
index_column="component",
drop_columns=["mask"],
output=None,
additional_metadata={"Method": "tCompCor"},
enforce_case=True,
),
name="tcc_metadata_fmt",
)
acc_metadata_fmt = pe.Node(
TSV2JSON(
index_column="component",
output=None,
additional_metadata={"Method": "aCompCor"},
enforce_case=True,
),
name="acc_metadata_fmt",
)
crowncc_metadata_fmt = pe.Node(
TSV2JSON(
index_column="component",
output=None,
additional_metadata={"Method": "EdgeRegressor"},
enforce_case=True,
),
name="crowncc_metadata_fmt",
)
mrg_conf_metadata = pe.Node(
niu.Merge(3), name="merge_confound_metadata", run_without_submitting=True
)
mrg_conf_metadata.inputs.in3 = {label: {"Method": "Mean"} for label in signals_class_labels}
mrg_conf_metadata2 = pe.Node(
DictMerge(), name="merge_confound_metadata2", run_without_submitting=True
)
# Expand model to include derivatives and quadratics
model_expand = pe.Node(
ExpandModel(model_formula="(dd1(rps + wm + csf + gsr))^^2 + others"),
name="model_expansion",
)
# Add spike regressors
spike_regress = pe.Node(
SpikeRegressors(fd_thresh=regressors_fd_th, dvars_thresh=regressors_dvars_th),
name="spike_regressors",
)
# Generate reportlet (ROIs)
mrg_compcor = pe.Node(
niu.Merge(3, ravel_inputs=True), name="mrg_compcor", run_without_submitting=True
)
rois_plot = pe.Node(
ROIsPlot(colors=["b", "magenta", "g"], generate_report=True),
name="rois_plot",
mem_gb=mem_gb,
)
ds_report_bold_rois = pe.Node(
DerivativesDataSink(desc="rois", datatype="figures", dismiss_entities=("echo",)),
name="ds_report_bold_rois",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# Generate reportlet (CompCor)
mrg_cc_metadata = pe.Node(
niu.Merge(2), name="merge_compcor_metadata", run_without_submitting=True
)
compcor_plot = pe.Node(
CompCorVariancePlot(
variance_thresholds=(0.5, 0.7, 0.9),
metadata_sources=["tCompCor", "aCompCor", "crownCompCor"],
),
name="compcor_plot",
)
ds_report_compcor = pe.Node(
DerivativesDataSink(desc="compcorvar", datatype="figures", dismiss_entities=("echo",)),
name="ds_report_compcor",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# Generate reportlet (Confound correlation)
conf_corr_plot = pe.Node(
ConfoundsCorrelationPlot(reference_column="global_signal", max_dim=20),
name="conf_corr_plot",
)
ds_report_conf_corr = pe.Node(
DerivativesDataSink(desc="confoundcorr", datatype="figures", dismiss_entities=("echo",)),
name="ds_report_conf_corr",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
def _last(inlist):
return inlist[-1]
def _select_cols(table):
import pandas as pd
return [
col
for col in pd.read_table(table, nrows=2).columns
if not col.startswith(("a_comp_cor_", "t_comp_cor_", "std_dvars"))
]
# fmt:off
workflow.connect([
# connect inputnode to each non-anatomical confound node
(inputnode, dvars, [("bold", "in_file"),
("bold_mask", "in_mask")]),
(inputnode, fdisp, [("movpar_file", "in_file")]),
# Brain mask
(inputnode, t1w_mask_tfm, [("t1w_mask", "input_image"),
("bold_mask", "reference_image"),
("t1_bold_xform", "transforms")]),
(inputnode, union_mask, [("bold_mask", "mask1")]),
(t1w_mask_tfm, union_mask, [("output_image", "mask2")]),
(union_mask, dilated_mask, [("out", "in_mask")]),
(union_mask, subtract_mask, [("out", "in_subtract")]),
(dilated_mask, subtract_mask, [("out_mask", "in_base")]),
(subtract_mask, outputnode, [("out_mask", "crown_mask")]),
# aCompCor
(inputnode, acompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes")]),
(inputnode, acc_masks, [("t1w_tpms", "in_vfs"),
(("bold", _get_zooms), "bold_zooms")]),
(inputnode, acc_msk_tfm, [("t1_bold_xform", "transforms"),
("bold_mask", "reference_image")]),
(inputnode, acc_msk_brain, [("bold_mask", "in_mask")]),
(acc_masks, acc_msk_tfm, [("out_masks", "input_image")]),
(acc_msk_tfm, acc_msk_brain, [("output_image", "in_file")]),
(acc_msk_brain, acc_msk_bin, [("out_file", "in_file")]),
(acc_msk_bin, acompcor, [("out_file", "mask_files")]),
(acompcor, rename_acompcor, [("components_file", "components_file"),
("metadata_file", "metadata_file")]),
# crownCompCor
(inputnode, crowncompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes")]),
(subtract_mask, crowncompcor, [("out_mask", "mask_files")]),
# tCompCor
(inputnode, tcompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes"),
("bold_mask", "mask_files")]),
# Global signals extraction (constrained by anatomy)
(inputnode, signals, [("bold", "in_file")]),
(inputnode, merge_rois, [("bold_mask", "in1")]),
(acc_msk_bin, merge_rois, [("out_file", "in2")]),
(tcompcor, merge_rois, [("high_variance_masks", "in3")]),
(merge_rois, signals, [("out", "label_files")]),
# Collate computed confounds together
(inputnode, add_motion_headers, [("movpar_file", "in_file")]),
(inputnode, add_rmsd_header, [("rmsd_file", "in_file")]),
(dvars, add_dvars_header, [("out_nstd", "in_file")]),
(dvars, add_std_dvars_header, [("out_std", "in_file")]),
(signals, concat, [("out_file", "signals")]),
(fdisp, concat, [("out_file", "fd")]),
(tcompcor, concat, [("components_file", "tcompcor"),
("pre_filter_file", "cos_basis")]),
(rename_acompcor, concat, [("components_file", "acompcor")]),
(crowncompcor, concat, [("components_file", "crowncompcor")]),
(add_motion_headers, concat, [("out_file", "motion")]),
(add_rmsd_header, concat, [("out_file", "rmsd")]),
(add_dvars_header, concat, [("out_file", "dvars")]),
(add_std_dvars_header, concat, [("out_file", "std_dvars")]),
# Confounds metadata
(tcompcor, tcc_metadata_filter, [("metadata_file", "in_file")]),
(tcc_metadata_filter, tcc_metadata_fmt, [("out_file", "in_file")]),
(rename_acompcor, acc_metadata_filter, [("metadata_file", "in_file")]),
(acc_metadata_filter, acc_metadata_fmt, [("out_file", "in_file")]),
(crowncompcor, crowncc_metadata_fmt, [("metadata_file", "in_file")]),
(tcc_metadata_fmt, mrg_conf_metadata, [("output", "in1")]),
(acc_metadata_fmt, mrg_conf_metadata, [("output", "in2")]),
(crowncc_metadata_fmt, mrg_conf_metadata, [("output", "in3")]),
(mrg_conf_metadata, mrg_conf_metadata2, [("out", "in_dicts")]),
# Expand the model with derivatives, quadratics, and spikes
(concat, model_expand, [("confounds_file", "confounds_file")]),
(model_expand, spike_regress, [("confounds_file", "confounds_file")]),
# Set outputs
(spike_regress, outputnode, [("confounds_file", "confounds_file")]),
(mrg_conf_metadata2, outputnode, [("out_dict", "confounds_metadata")]),
(tcompcor, outputnode, [("high_variance_masks", "tcompcor_mask")]),
(acc_msk_bin, outputnode, [("out_file", "acompcor_masks")]),
(inputnode, rois_plot, [("bold", "in_file"),
("bold_mask", "in_mask")]),
(tcompcor, mrg_compcor, [("high_variance_masks", "in1")]),
(acc_msk_bin, mrg_compcor, [(("out_file", _last), "in2")]),
(subtract_mask, mrg_compcor, [("out_mask", "in3")]),
(mrg_compcor, rois_plot, [("out", "in_rois")]),
(rois_plot, ds_report_bold_rois, [("out_report", "in_file")]),
(tcompcor, mrg_cc_metadata, [("metadata_file", "in1")]),
(acompcor, mrg_cc_metadata, [("metadata_file", "in2")]),
(crowncompcor, mrg_cc_metadata, [("metadata_file", "in3")]),
(mrg_cc_metadata, compcor_plot, [("out", "metadata_files")]),
(compcor_plot, ds_report_compcor, [("out_file", "in_file")]),
(concat, conf_corr_plot, [("confounds_file", "confounds_file"),
(("confounds_file", _select_cols), "columns")]),
(conf_corr_plot, ds_report_conf_corr, [("out_file", "in_file")]),
])
# fmt: on
return workflow
def init_carpetplot_wf(
mem_gb: float, metadata: dict, cifti_output: bool, name: str = "bold_carpet_wf"
):
"""
Build a workflow to generate *carpet* plots.
Resamples the MNI parcellation (ad-hoc parcellation derived from the
Harvard-Oxford template and others).
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB - please note that this size
should be calculated after resamplings that may extend
the FoV
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_carpet_wf``)
Inputs
------
bold
BOLD image, after the prescribed corrections (STC, HMC and SDC)
when available.
bold_mask
BOLD series mask
confounds_file
TSV of all aggregated confounds
t1_bold_xform
Affine matrix that maps the T1w space into alignment with
the native BOLD space
std2anat_xfm
ANTs-compatible affine-and-warp transform file
cifti_bold
BOLD image in CIFTI format, to be used in place of volumetric BOLD
crown_mask
Mask of brain edge voxels
acompcor_mask
Mask of deep WM+CSF
dummy_scans
Number of nonsteady states to be dropped at the beginning of the timeseries.
Outputs
-------
out_carpetplot
Path of the generated SVG file
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"bold",
"bold_mask",
"confounds_file",
"t1_bold_xform",
"std2anat_xfm",
"cifti_bold",
"crown_mask",
"acompcor_mask",
"dummy_scans",
]
),
name="inputnode",
)
outputnode = pe.Node(niu.IdentityInterface(fields=["out_carpetplot"]), name="outputnode")
# Carpetplot and confounds plot
conf_plot = pe.Node(
FMRISummary(
tr=metadata["RepetitionTime"],
confounds_list=[
("global_signal", None, "GS"),
("csf", None, "GSCSF"),
("white_matter", None, "GSWM"),
("std_dvars", None, "DVARS"),
("framewise_displacement", "mm", "FD"),
],
),
name="conf_plot",
mem_gb=mem_gb,
)
ds_report_bold_conf = pe.Node(
DerivativesDataSink(
desc="carpetplot", datatype="figures", extension="svg", dismiss_entities=("echo",)
),
name="ds_report_bold_conf",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
parcels = pe.Node(niu.Function(function=_carpet_parcellation), name="parcels")
parcels.inputs.nifti = not cifti_output
# List transforms
mrg_xfms = pe.Node(niu.Merge(2), name="mrg_xfms")
# Warp segmentation into EPI space
resample_parc = pe.Node(
ApplyTransforms(
dimension=3,
input_image=str(
get_template(
"MNI152NLin2009cAsym",
resolution=1,
desc="carpet",
suffix="dseg",
extension=[".nii", ".nii.gz"],
)
),
interpolation="MultiLabel",
args="-u int",
),
name="resample_parc",
)
workflow = Workflow(name=name)
if cifti_output:
workflow.connect(inputnode, "cifti_bold", conf_plot, "in_cifti")
# fmt:off
workflow.connect([
(inputnode, mrg_xfms, [("t1_bold_xform", "in1"),
("std2anat_xfm", "in2")]),
(inputnode, resample_parc, [("bold_mask", "reference_image")]),
(inputnode, parcels, [("crown_mask", "crown_mask")]),
(inputnode, parcels, [("acompcor_mask", "acompcor_mask")]),
(inputnode, conf_plot, [("bold", "in_nifti"),
("confounds_file", "confounds_file"),
("dummy_scans", "drop_trs")]),
(mrg_xfms, resample_parc, [("out", "transforms")]),
(resample_parc, parcels, [("output_image", "segmentation")]),
(parcels, conf_plot, [("out", "in_segm")]),
(conf_plot, ds_report_bold_conf, [("out_file", "in_file")]),
(conf_plot, outputnode, [("out_file", "out_carpetplot")]),
])
# fmt:on
return workflow
def _binary_union(mask1, mask2):
"""Generate the union of two masks."""
from pathlib import Path
import nibabel as nb
import numpy as np
img = nb.load(mask1)
mskarr1 = np.asanyarray(img.dataobj, dtype=int) > 0
mskarr2 = np.asanyarray(nb.load(mask2).dataobj, dtype=int) > 0
out = img.__class__(mskarr1 | mskarr2, img.affine, img.header)
out.set_data_dtype("uint8")
out_name = Path("mask_union.nii.gz").absolute()
out.to_filename(out_name)
return str(out_name)
def _carpet_parcellation(segmentation, crown_mask, acompcor_mask, nifti=False):
"""Generate the union of two masks."""
from pathlib import Path
import nibabel as nb
import numpy as np
img = nb.load(segmentation)
lut = np.zeros((256,), dtype="uint8")
lut[100:201] = 1 if nifti else 0 # Ctx GM
lut[30:99] = 2 if nifti else 0 # dGM
lut[1:11] = 3 if nifti else 1 # WM+CSF
lut[255] = 5 if nifti else 0 # Cerebellum
# Apply lookup table
seg = lut[np.uint16(img.dataobj)]
seg[np.bool_(nb.load(crown_mask).dataobj)] = 6 if nifti else 2
# Separate deep from shallow WM+CSF
seg[np.bool_(nb.load(acompcor_mask).dataobj)] = 4 if nifti else 1
outimg = img.__class__(seg.astype("uint8"), img.affine, img.header)
outimg.set_data_dtype("uint8")
out_file = Path("segments.nii.gz").absolute()
outimg.to_filename(out_file)
return str(out_file)
def _get_zooms(in_file):
import nibabel as nb
return tuple(nb.load(in_file).header.get_zooms()[:3])
| 29,485 | 36.371356 | 99 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/hmc.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Head-Motion Estimation and Correction (HMC) of BOLD images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_hmc_wf
"""
from nipype.interfaces import fsl
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from ...config import DEFAULT_MEMORY_MIN_GB
def init_bold_hmc_wf(mem_gb: float, omp_nthreads: int, name: str = 'bold_hmc_wf'):
"""
Build a workflow to estimate head-motion parameters.
This workflow estimates the motion parameters to perform
:abbr:`HMC (head motion correction)` over the input
:abbr:`BOLD (blood-oxygen-level dependent)` image.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold import init_bold_hmc_wf
wf = init_bold_hmc_wf(
mem_gb=3,
omp_nthreads=1)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``bold_hmc_wf``)
Inputs
------
bold_file
BOLD series NIfTI file
raw_ref_image
Reference image to which BOLD series is motion corrected
Outputs
-------
xforms
ITKTransform file aligning each volume to ``ref_image``
movpar_file
MCFLIRT motion parameters, normalized to SPM format (X, Y, Z, Rx, Ry, Rz)
rms_file
Framewise displacement as measured by ``fsl_motion_outliers`` [Jenkinson2002]_.
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.confounds import NormalizeMotionParams
from niworkflows.interfaces.itk import MCFLIRT2ITK
workflow = Workflow(name=name)
workflow.__desc__ = """\
Head-motion parameters with respect to the BOLD reference
(transformation matrices, and six corresponding rotation and translation
parameters) are estimated before any spatiotemporal filtering using
`mcflirt` [FSL {fsl_ver}, @mcflirt].
""".format(
fsl_ver=fsl.Info().version() or '<ver>'
)
inputnode = pe.Node(
niu.IdentityInterface(fields=['bold_file', 'raw_ref_image']), name='inputnode'
)
outputnode = pe.Node(
niu.IdentityInterface(fields=['xforms', 'movpar_file', 'rmsd_file']), name='outputnode'
)
# Head motion correction (hmc)
mcflirt = pe.Node(
fsl.MCFLIRT(save_mats=True, save_plots=True, save_rms=True),
name='mcflirt',
mem_gb=mem_gb * 3,
)
fsl2itk = pe.Node(MCFLIRT2ITK(), name='fsl2itk', mem_gb=0.05, n_procs=omp_nthreads)
normalize_motion = pe.Node(
NormalizeMotionParams(format='FSL'), name="normalize_motion", mem_gb=DEFAULT_MEMORY_MIN_GB
)
def _pick_rel(rms_files):
return rms_files[-1]
# fmt:off
workflow.connect([
(inputnode, mcflirt, [('raw_ref_image', 'ref_file'),
('bold_file', 'in_file')]),
(inputnode, fsl2itk, [('raw_ref_image', 'in_source'),
('raw_ref_image', 'in_reference')]),
(mcflirt, fsl2itk, [('mat_file', 'in_files')]),
(mcflirt, normalize_motion, [('par_file', 'in_file')]),
(mcflirt, outputnode, [(('rms_files', _pick_rel), 'rmsd_file')]),
(fsl2itk, outputnode, [('out_file', 'xforms')]),
(normalize_motion, outputnode, [('out_file', 'movpar_file')]),
])
# fmt:on
return workflow
| 4,442 | 32.156716 | 98 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/__init__.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Pre-processing fMRI - BOLD signal workflows
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: fmriprep.workflows.bold.base
.. automodule:: fmriprep.workflows.bold.hmc
.. automodule:: fmriprep.workflows.bold.stc
.. automodule:: fmriprep.workflows.bold.t2s
.. automodule:: fmriprep.workflows.bold.registration
.. automodule:: fmriprep.workflows.bold.resampling
.. automodule:: fmriprep.workflows.bold.confounds
"""
from .base import init_func_preproc_wf
from .confounds import init_bold_confs_wf
from .hmc import init_bold_hmc_wf
from .registration import init_bold_reg_wf, init_bold_t1_trans_wf
from .resampling import (
init_bold_preproc_trans_wf,
init_bold_std_trans_wf,
init_bold_surf_wf,
)
from .stc import init_bold_stc_wf
from .t2s import init_bold_t2s_wf
__all__ = [
'init_bold_confs_wf',
'init_bold_hmc_wf',
'init_bold_std_trans_wf',
'init_bold_preproc_trans_wf',
'init_bold_reg_wf',
'init_bold_stc_wf',
'init_bold_surf_wf',
'init_bold_t1_trans_wf',
'init_bold_t2s_wf',
'init_func_preproc_wf',
]
| 1,189 | 26.674419 | 73 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/outputs.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Writing out derivative files."""
from __future__ import annotations
import typing as ty
import numpy as np
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from fmriprep import config
from fmriprep.config import DEFAULT_MEMORY_MIN_GB
from fmriprep.interfaces import DerivativesDataSink
if ty.TYPE_CHECKING:
from niworkflows.utils.spaces import SpatialReferences
def prepare_timing_parameters(metadata: dict):
"""Convert initial timing metadata to post-realignment timing metadata
In particular, SliceTiming metadata is invalid once STC or any realignment is applied,
as a matrix of voxels no longer corresponds to an acquisition slice.
Therefore, if SliceTiming is present in the metadata dictionary, and a sparse
acquisition paradigm is detected, DelayTime or AcquisitionDuration must be derived to
preserve the timing interpretation.
Examples
--------
.. testsetup::
>>> from unittest import mock
If SliceTiming metadata is absent, then the only change is to note that
STC has not been applied:
>>> prepare_timing_parameters(dict(RepetitionTime=2))
{'RepetitionTime': 2, 'SliceTimingCorrected': False}
>>> prepare_timing_parameters(dict(RepetitionTime=2, DelayTime=0.5))
{'RepetitionTime': 2, 'DelayTime': 0.5, 'SliceTimingCorrected': False}
>>> prepare_timing_parameters(dict(VolumeTiming=[0.0, 1.0, 2.0, 5.0, 6.0, 7.0],
... AcquisitionDuration=1.0)) #doctest: +NORMALIZE_WHITESPACE
{'VolumeTiming': [0.0, 1.0, 2.0, 5.0, 6.0, 7.0], 'AcquisitionDuration': 1.0,
'SliceTimingCorrected': False}
When SliceTiming is available and used, then ``SliceTimingCorrected`` is ``True``
and the ``StartTime`` indicates a series offset.
>>> with mock.patch("fmriprep.config.workflow.ignore", []):
... prepare_timing_parameters(dict(RepetitionTime=2, SliceTiming=[0.0, 0.2, 0.4, 0.6]))
{'RepetitionTime': 2, 'SliceTimingCorrected': True, 'DelayTime': 1.2, 'StartTime': 0.3}
>>> with mock.patch("fmriprep.config.workflow.ignore", []):
... prepare_timing_parameters(
... dict(VolumeTiming=[0.0, 1.0, 2.0, 5.0, 6.0, 7.0],
... SliceTiming=[0.0, 0.2, 0.4, 0.6, 0.8])) #doctest: +NORMALIZE_WHITESPACE
{'VolumeTiming': [0.0, 1.0, 2.0, 5.0, 6.0, 7.0], 'SliceTimingCorrected': True,
'AcquisitionDuration': 1.0, 'StartTime': 0.4}
When SliceTiming is available and not used, then ``SliceTimingCorrected`` is ``False``
and TA is indicated with ``DelayTime`` or ``AcquisitionDuration``.
>>> with mock.patch("fmriprep.config.workflow.ignore", ["slicetiming"]):
... prepare_timing_parameters(dict(RepetitionTime=2, SliceTiming=[0.0, 0.2, 0.4, 0.6]))
{'RepetitionTime': 2, 'SliceTimingCorrected': False, 'DelayTime': 1.2}
>>> with mock.patch("fmriprep.config.workflow.ignore", ["slicetiming"]):
... prepare_timing_parameters(
... dict(VolumeTiming=[0.0, 1.0, 2.0, 5.0, 6.0, 7.0],
... SliceTiming=[0.0, 0.2, 0.4, 0.6, 0.8])) #doctest: +NORMALIZE_WHITESPACE
{'VolumeTiming': [0.0, 1.0, 2.0, 5.0, 6.0, 7.0], 'SliceTimingCorrected': False,
'AcquisitionDuration': 1.0}
If SliceTiming metadata is present but empty, then treat it as missing:
>>> with mock.patch("fmriprep.config.workflow.ignore", []):
... prepare_timing_parameters(dict(RepetitionTime=2, SliceTiming=[]))
{'RepetitionTime': 2, 'SliceTimingCorrected': False}
>>> with mock.patch("fmriprep.config.workflow.ignore", []):
... prepare_timing_parameters(dict(RepetitionTime=2, SliceTiming=[0.0]))
{'RepetitionTime': 2, 'SliceTimingCorrected': False}
"""
timing_parameters = {
key: metadata[key]
for key in (
"RepetitionTime",
"VolumeTiming",
"DelayTime",
"AcquisitionDuration",
"SliceTiming",
)
if key in metadata
}
# Treat SliceTiming of [] or length 1 as equivalent to missing and remove it in any case
slice_timing = timing_parameters.pop("SliceTiming", [])
run_stc = len(slice_timing) > 1 and 'slicetiming' not in config.workflow.ignore
timing_parameters["SliceTimingCorrected"] = bool(run_stc)
if len(slice_timing) > 1:
st = sorted(slice_timing)
TA = st[-1] + (st[1] - st[0]) # Final slice onset + slice duration
# For constant TR paradigms, use DelayTime
if "RepetitionTime" in timing_parameters:
TR = timing_parameters["RepetitionTime"]
if not np.isclose(TR, TA) and TA < TR:
timing_parameters["DelayTime"] = TR - TA
# For variable TR paradigms, use AcquisitionDuration
elif "VolumeTiming" in timing_parameters:
timing_parameters["AcquisitionDuration"] = TA
if run_stc:
first, last = st[0], st[-1]
frac = config.workflow.slice_time_ref
tzero = np.round(first + frac * (last - first), 3)
timing_parameters["StartTime"] = tzero
return timing_parameters
def init_func_derivatives_wf(
bids_root: str,
cifti_output: bool,
freesurfer: bool,
project_goodvoxels: bool,
all_metadata: ty.List[dict],
multiecho: bool,
output_dir: str,
spaces: SpatialReferences,
name='func_derivatives_wf',
):
"""
Set up a battery of datasinks to store derivatives in the right location.
Parameters
----------
bids_root : :obj:`str`
Original BIDS dataset path.
cifti_output : :obj:`bool`
Whether the ``--cifti-output`` flag was set.
freesurfer : :obj:`bool`
Whether FreeSurfer anatomical processing was run.
project_goodvoxels : :obj:`bool`
Whether the option was used to exclude voxels with
locally high coefficient of variation, or that lie outside the
cortical surfaces, from the surface projection.
metadata : :obj:`dict`
Metadata dictionary associated to the BOLD run.
multiecho : :obj:`bool`
Derivatives were generated from multi-echo time series.
output_dir : :obj:`str`
Where derivatives should be written out to.
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
name : :obj:`str`
This workflow's identifier (default: ``func_derivatives_wf``).
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.utility import KeySelect
from smriprep.workflows.outputs import _bids_relative
metadata = all_metadata[0]
timing_parameters = prepare_timing_parameters(metadata)
nonstd_spaces = set(spaces.get_nonstandard())
workflow = Workflow(name=name)
# BOLD series will generally be unmasked unless multiecho,
# as the optimal combination is undefined outside a bounded mask
masked = multiecho
t2star_meta = {
'Units': 's',
'EstimationReference': 'doi:10.1002/mrm.20900',
'EstimationAlgorithm': 'monoexponential decay model',
}
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
'bold_aparc_std',
'bold_aparc_t1',
'bold_aseg_std',
'bold_aseg_t1',
'bold_cifti',
'bold_mask_std',
'bold_mask_t1',
'bold_std',
'bold_std_ref',
'bold_t1',
'bold_t1_ref',
'bold_native',
'bold_native_ref',
'bold_mask_native',
'bold_echos_native',
'cifti_metadata',
'cifti_density',
'confounds',
'confounds_metadata',
'goodvoxels_mask',
'source_file',
'all_source_files',
'surf_files',
'surf_refs',
'template',
'spatial_reference',
't2star_bold',
't2star_t1',
't2star_std',
'bold2anat_xfm',
'anat2bold_xfm',
'hmc_xforms',
'acompcor_masks',
'tcompcor_mask',
]
),
name='inputnode',
)
raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources')
raw_sources.inputs.bids_root = bids_root
ds_confounds = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='confounds',
suffix='timeseries',
dismiss_entities=("echo",),
),
name="ds_confounds",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_ref_t1w_xfm = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
to='T1w',
mode='image',
suffix='xfm',
extension='.txt',
dismiss_entities=('echo',),
**{'from': 'scanner'},
),
name='ds_ref_t1w_xfm',
run_without_submitting=True,
)
ds_ref_t1w_inv_xfm = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
to='scanner',
mode='image',
suffix='xfm',
extension='.txt',
dismiss_entities=('echo',),
**{'from': 'T1w'},
),
name='ds_t1w_tpl_inv_xfm',
run_without_submitting=True,
)
# fmt:off
workflow.connect([
(inputnode, raw_sources, [('all_source_files', 'in_files')]),
(inputnode, ds_confounds, [('source_file', 'source_file'),
('confounds', 'in_file'),
('confounds_metadata', 'meta_dict')]),
(inputnode, ds_ref_t1w_xfm, [('source_file', 'source_file'),
('bold2anat_xfm', 'in_file')]),
(inputnode, ds_ref_t1w_inv_xfm, [('source_file', 'source_file'),
('anat2bold_xfm', 'in_file')]),
])
# fmt:on
# Output HMC and reference volume
ds_bold_hmc_xfm = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
to='boldref',
mode='image',
suffix='xfm',
extension='.txt',
dismiss_entities=('echo',),
**{'from': 'scanner'},
),
name='ds_bold_hmc_xfm',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_native_ref = pe.Node(
DerivativesDataSink(
base_directory=output_dir, suffix='boldref', compress=True, dismiss_entities=("echo",)
),
name='ds_bold_native_ref',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_bold_hmc_xfm, [('source_file', 'source_file'),
('hmc_xforms', 'in_file')]),
(inputnode, ds_bold_native_ref, [('source_file', 'source_file'),
('bold_native_ref', 'in_file')])
])
# fmt:on
bold_output = nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref'))
if bold_output:
ds_bold_native = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='preproc',
compress=True,
SkullStripped=masked,
TaskName=metadata.get('TaskName'),
**timing_parameters,
),
name='ds_bold_native',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt: off
workflow.connect([
(inputnode, ds_bold_native, [('source_file', 'source_file'),
('bold_native', 'in_file')]),
])
# fmt:on
# Save masks and boldref if we're going to save either orig BOLD series or echos
if bold_output or multiecho and config.execution.me_output_echos:
ds_bold_mask_native = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='brain',
suffix='mask',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_mask_native',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_bold_mask_native, [('source_file', 'source_file'),
('bold_mask_native', 'in_file')]),
(raw_sources, ds_bold_mask_native, [('out', 'RawSources')]),
])
# fmt:on
if multiecho:
ds_t2star_bold = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='boldref',
suffix='T2starmap',
compress=True,
dismiss_entities=("echo",),
**t2star_meta,
),
name='ds_t2star_bold',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_t2star_bold, [('source_file', 'source_file'),
('t2star_bold', 'in_file')]),
(raw_sources, ds_t2star_bold, [('out', 'RawSources')]),
])
# fmt:on
if multiecho and config.execution.me_output_echos:
ds_bold_echos_native = pe.MapNode(
DerivativesDataSink(
base_directory=output_dir,
desc='preproc',
compress=True,
SkullStripped=False,
TaskName=metadata.get('TaskName'),
**timing_parameters,
),
iterfield=['source_file', 'in_file', 'meta_dict'],
name='ds_bold_echos_native',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_echos_native.inputs.meta_dict = [
{"EchoTime": md["EchoTime"]} for md in all_metadata
]
# fmt:off
workflow.connect([
(inputnode, ds_bold_echos_native, [
('all_source_files', 'source_file'),
('bold_echos_native', 'in_file')]),
])
# fmt:on
# Resample to T1w space
if nonstd_spaces.intersection(('T1w', 'anat')):
ds_bold_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
desc='preproc',
compress=True,
SkullStripped=masked,
TaskName=metadata.get('TaskName'),
**timing_parameters,
),
name='ds_bold_t1',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_t1_ref = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
suffix='boldref',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_t1_ref',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_mask_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
desc='brain',
suffix='mask',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_mask_t1',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_bold_t1, [('source_file', 'source_file'),
('bold_t1', 'in_file')]),
(inputnode, ds_bold_t1_ref, [('source_file', 'source_file'),
('bold_t1_ref', 'in_file')]),
(inputnode, ds_bold_mask_t1, [('source_file', 'source_file'),
('bold_mask_t1', 'in_file')]),
(raw_sources, ds_bold_mask_t1, [('out', 'RawSources')]),
])
# fmt:on
if freesurfer:
ds_bold_aseg_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
desc='aseg',
suffix='dseg',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_aseg_t1',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_aparc_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
desc='aparcaseg',
suffix='dseg',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_aparc_t1',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_bold_aseg_t1, [('source_file', 'source_file'),
('bold_aseg_t1', 'in_file')]),
(inputnode, ds_bold_aparc_t1, [('source_file', 'source_file'),
('bold_aparc_t1', 'in_file')]),
])
# fmt:on
if multiecho:
ds_t2star_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
suffix='T2starmap',
compress=True,
dismiss_entities=("echo",),
**t2star_meta,
),
name='ds_t2star_t1',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_t2star_t1, [('source_file', 'source_file'),
('t2star_t1', 'in_file')]),
(raw_sources, ds_t2star_t1, [('out', 'RawSources')]),
])
# fmt:on
if getattr(spaces, '_cached') is None:
return workflow
# Store resamplings in standard spaces when listed in --output-spaces
if spaces.cached.references:
from niworkflows.interfaces.space import SpaceDataSource
spacesource = pe.Node(SpaceDataSource(), name='spacesource', run_without_submitting=True)
spacesource.iterables = (
'in_tuple',
[(s.fullname, s.spec) for s in spaces.cached.get_standard(dim=(3,))],
)
fields = ['template', 'bold_std', 'bold_std_ref', 'bold_mask_std']
if multiecho:
fields.append('t2star_std')
select_std = pe.Node(
KeySelect(fields=fields),
name='select_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='preproc',
compress=True,
SkullStripped=masked,
TaskName=metadata.get('TaskName'),
**timing_parameters,
),
name='ds_bold_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_std_ref = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
suffix='boldref',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_std_ref',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_mask_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='brain',
suffix='mask',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_mask_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_bold_std, [('source_file', 'source_file')]),
(inputnode, ds_bold_std_ref, [('source_file', 'source_file')]),
(inputnode, ds_bold_mask_std, [('source_file', 'source_file')]),
(inputnode, select_std, [('bold_std', 'bold_std'),
('bold_std_ref', 'bold_std_ref'),
('bold_mask_std', 'bold_mask_std'),
('t2star_std', 't2star_std'),
('template', 'template'),
('spatial_reference', 'keys')]),
(spacesource, select_std, [('uid', 'key')]),
(select_std, ds_bold_std, [('bold_std', 'in_file')]),
(spacesource, ds_bold_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, ds_bold_std_ref, [('bold_std_ref', 'in_file')]),
(spacesource, ds_bold_std_ref, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, ds_bold_mask_std, [('bold_mask_std', 'in_file')]),
(spacesource, ds_bold_mask_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(raw_sources, ds_bold_mask_std, [('out', 'RawSources')]),
])
# fmt:on
if freesurfer:
select_fs_std = pe.Node(
KeySelect(fields=['bold_aseg_std', 'bold_aparc_std', 'template']),
name='select_fs_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_aseg_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='aseg',
suffix='dseg',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_aseg_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
ds_bold_aparc_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc='aparcaseg',
suffix='dseg',
compress=True,
dismiss_entities=("echo",),
),
name='ds_bold_aparc_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(spacesource, select_fs_std, [('uid', 'key')]),
(inputnode, select_fs_std, [('bold_aseg_std', 'bold_aseg_std'),
('bold_aparc_std', 'bold_aparc_std'),
('template', 'template'),
('spatial_reference', 'keys')]),
(select_fs_std, ds_bold_aseg_std, [('bold_aseg_std', 'in_file')]),
(spacesource, ds_bold_aseg_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_fs_std, ds_bold_aparc_std, [('bold_aparc_std', 'in_file')]),
(spacesource, ds_bold_aparc_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(inputnode, ds_bold_aseg_std, [('source_file', 'source_file')]),
(inputnode, ds_bold_aparc_std, [('source_file', 'source_file')])
])
# fmt:on
if multiecho:
ds_t2star_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
suffix='T2starmap',
compress=True,
dismiss_entities=("echo",),
**t2star_meta,
),
name='ds_t2star_std',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_t2star_std, [('source_file', 'source_file')]),
(select_std, ds_t2star_std, [('t2star_std', 'in_file')]),
(spacesource, ds_t2star_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(raw_sources, ds_t2star_std, [('out', 'RawSources')]),
])
# fmt:on
fs_outputs = spaces.cached.get_fs_spaces()
if freesurfer and fs_outputs:
from niworkflows.interfaces.surf import Path2BIDS
select_fs_surf = pe.Node(
KeySelect(fields=['surfaces', 'surf_kwargs']),
name='select_fs_surf',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
select_fs_surf.iterables = [('key', fs_outputs)]
select_fs_surf.inputs.surf_kwargs = [{'space': s} for s in fs_outputs]
name_surfs = pe.MapNode(
Path2BIDS(pattern=r'(?P<hemi>[lr])h.\w+'),
iterfield='in_file',
name='name_surfs',
run_without_submitting=True,
)
ds_bold_surfs = pe.MapNode(
DerivativesDataSink(
base_directory=output_dir,
extension=".func.gii",
TaskName=metadata.get('TaskName'),
**timing_parameters,
),
iterfield=['in_file', 'hemi'],
name='ds_bold_surfs',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, select_fs_surf, [
('surf_files', 'surfaces'),
('surf_refs', 'keys')]),
(select_fs_surf, name_surfs, [('surfaces', 'in_file')]),
(inputnode, ds_bold_surfs, [('source_file', 'source_file')]),
(select_fs_surf, ds_bold_surfs, [('surfaces', 'in_file'),
('key', 'space')]),
(name_surfs, ds_bold_surfs, [('hemi', 'hemi')]),
])
# fmt:on
if freesurfer and project_goodvoxels:
ds_goodvoxels_mask = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
space='T1w',
desc='goodvoxels',
suffix='mask',
Type='ROI', # Metadata
compress=True,
dismiss_entities=("echo",),
),
name='ds_goodvoxels_mask',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_goodvoxels_mask, [
('source_file', 'source_file'),
('goodvoxels_mask', 'in_file'),
]),
])
# fmt:on
# CIFTI output
if cifti_output:
ds_bold_cifti = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
suffix='bold',
compress=False,
TaskName=metadata.get('TaskName'),
space='fsLR',
**timing_parameters,
),
name='ds_bold_cifti',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
# fmt:off
workflow.connect([
(inputnode, ds_bold_cifti, [(('bold_cifti', _unlist), 'in_file'),
('source_file', 'source_file'),
('cifti_density', 'density'),
(('cifti_metadata', _read_json), 'meta_dict')])
])
# fmt:on
if "compcor" in config.execution.debug:
ds_acompcor_masks = pe.Node(
DerivativesDataSink(
base_directory=output_dir,
desc=[f"CompCor{_}" for _ in "CWA"],
suffix="mask",
compress=True,
),
name="ds_acompcor_masks",
run_without_submitting=True,
)
ds_tcompcor_mask = pe.Node(
DerivativesDataSink(
base_directory=output_dir, desc="CompCorT", suffix="mask", compress=True
),
name="ds_tcompcor_mask",
run_without_submitting=True,
)
# fmt:off
workflow.connect([
(inputnode, ds_acompcor_masks, [("acompcor_masks", "in_file"),
("source_file", "source_file")]),
(inputnode, ds_tcompcor_mask, [("tcompcor_mask", "in_file"),
("source_file", "source_file")]),
])
# fmt:on
return workflow
def init_bold_preproc_report_wf(
mem_gb: float,
reportlets_dir: str,
name: str = 'bold_preproc_report_wf',
):
"""
Generate a visual report.
This workflow generates and saves a reportlet showing the effect of resampling
the BOLD signal using the standard deviation maps.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.resampling import init_bold_preproc_report_wf
wf = init_bold_preproc_report_wf(mem_gb=1, reportlets_dir='.')
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
reportlets_dir : :obj:`str`
Directory in which to save reportlets
name : :obj:`str`, optional
Workflow name (default: bold_preproc_report_wf)
Inputs
------
in_pre
BOLD time-series, before resampling
in_post
BOLD time-series, after resampling
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
"""
from nipype.algorithms.confounds import TSNR
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.reportlets.registration import SimpleBeforeAfterRPT
from ...interfaces import DerivativesDataSink
workflow = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=['in_pre', 'in_post', 'name_source']), name='inputnode'
)
pre_tsnr = pe.Node(TSNR(), name='pre_tsnr', mem_gb=mem_gb * 4.5)
pos_tsnr = pe.Node(TSNR(), name='pos_tsnr', mem_gb=mem_gb * 4.5)
bold_rpt = pe.Node(SimpleBeforeAfterRPT(), name='bold_rpt', mem_gb=0.1)
ds_report_bold = pe.Node(
DerivativesDataSink(
base_directory=reportlets_dir,
desc='preproc',
datatype="figures",
dismiss_entities=("echo",),
),
name='ds_report_bold',
mem_gb=DEFAULT_MEMORY_MIN_GB,
run_without_submitting=True,
)
# fmt:off
workflow.connect([
(inputnode, ds_report_bold, [('name_source', 'source_file')]),
(inputnode, pre_tsnr, [('in_pre', 'in_file')]),
(inputnode, pos_tsnr, [('in_post', 'in_file')]),
(pre_tsnr, bold_rpt, [('stddev_file', 'before')]),
(pos_tsnr, bold_rpt, [('stddev_file', 'after')]),
(bold_rpt, ds_report_bold, [('out_report', 'in_file')]),
])
# fmt:on
return workflow
def _unlist(in_file):
while isinstance(in_file, (list, tuple)) and len(in_file) == 1:
in_file = in_file[0]
return in_file
def _read_json(in_file):
from json import loads
from pathlib import Path
return loads(Path(in_file).read_text())
| 34,824 | 36.977099 | 98 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/stc.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Slice-Timing Correction (STC) of BOLD images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_stc_wf
"""
import nibabel as nb
import numpy as np
from nipype.interfaces import afni
from nipype.interfaces import utility as niu
from nipype.interfaces.base import isdefined
from nipype.pipeline import engine as pe
from ... import config
LOGGER = config.loggers.workflow
class TShift(afni.TShift):
"""Patched version of TShift implementing the "TooShort" behavior."""
def _pre_run_hook(self, runtime):
ignore = self.inputs.ignore if isdefined(self.inputs.ignore) else 0
ntsteps = nb.load(self.inputs.in_file).shape[3]
if ntsteps - ignore < 5:
raise RuntimeError(
f"Insufficient length of BOLD data ({ntsteps} time points) after "
f"discarding {ignore} nonsteady-state (or 'dummy') time points."
)
return runtime
def init_bold_stc_wf(metadata: dict, name='bold_stc_wf'):
"""
Create a workflow for :abbr:`STC (slice-timing correction)`.
This workflow performs :abbr:`STC (slice-timing correction)` over the input
:abbr:`BOLD (blood-oxygen-level dependent)` image.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold import init_bold_stc_wf
wf = init_bold_stc_wf(
metadata={"RepetitionTime": 2.0,
"SliceTiming": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]},
)
Parameters
----------
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_stc_wf``)
Inputs
------
bold_file
BOLD series NIfTI file
skip_vols
Number of non-steady-state volumes detected at beginning of ``bold_file``
Outputs
-------
stc_file
Slice-timing corrected BOLD series NIfTI file
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.header import CopyXForm
slice_times = metadata["SliceTiming"]
first, last = min(slice_times), max(slice_times)
frac = config.workflow.slice_time_ref
tzero = np.round(first + frac * (last - first), 3)
afni_ver = ''.join('%02d' % v for v in afni.Info().version() or [])
workflow = Workflow(name=name)
workflow.__desc__ = f"""\
BOLD runs were slice-time corrected to {tzero:0.3g}s ({frac:g} of slice acquisition range
{first:.3g}s-{last:.3g}s) using `3dTshift` from AFNI {afni_ver} [@afni, RRID:SCR_005927].
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file', 'skip_vols']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['stc_file']), name='outputnode')
LOGGER.log(25, f'BOLD series will be slice-timing corrected to an offset of {tzero:.3g}s.')
# It would be good to fingerprint memory use of afni.TShift
slice_timing_correction = pe.Node(
TShift(
outputtype='NIFTI_GZ',
tr=f"{metadata['RepetitionTime']}s",
slice_timing=metadata['SliceTiming'],
slice_encoding_direction=metadata.get('SliceEncodingDirection', 'k'),
tzero=tzero,
),
name='slice_timing_correction',
)
copy_xform = pe.Node(CopyXForm(), name='copy_xform', mem_gb=0.1)
# fmt:off
workflow.connect([
(inputnode, slice_timing_correction, [('bold_file', 'in_file'),
('skip_vols', 'ignore')]),
(slice_timing_correction, copy_xform, [('out_file', 'in_file')]),
(inputnode, copy_xform, [('bold_file', 'hdr_file')]),
(copy_xform, outputnode, [('out_file', 'stc_file')]),
])
# fmt:on
return workflow
| 4,722 | 33.474453 | 99 | py |
fmriprep | fmriprep-master/fmriprep/workflows/bold/tests/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/workflows/bold/tests/test_base.py | from copy import deepcopy
import bids
from niworkflows.utils.testing import generate_bids_skeleton
from sdcflows.fieldmaps import clear_registry
from sdcflows.utils.wrangler import find_estimators
from ..base import get_estimator
BASE_LAYOUT = {
"01": {
"anat": [{"suffix": "T1w"}],
"func": [
{
"task": "rest",
"run": i,
"suffix": "bold",
"metadata": {"PhaseEncodingDirection": "j", "TotalReadoutTime": 0.6},
}
for i in range(1, 3)
],
"fmap": [
{"suffix": "phasediff", "metadata": {"EchoTime1": 0.005, "EchoTime2": 0.007}},
{"suffix": "magnitude1", "metadata": {"EchoTime": 0.005}},
{
"suffix": "epi",
"direction": "PA",
"metadata": {"PhaseEncodingDirection": "j", "TotalReadoutTime": 0.6},
},
{
"suffix": "epi",
"direction": "AP",
"metadata": {"PhaseEncodingDirection": "j-", "TotalReadoutTime": 0.6},
},
],
},
}
def test_get_estimator_none(tmp_path):
bids_dir = tmp_path / "bids"
# No IntendedFors/B0Fields
generate_bids_skeleton(bids_dir, BASE_LAYOUT)
layout = bids.BIDSLayout(bids_dir)
bold_files = sorted(layout.get(suffix='bold', extension='.nii.gz', return_type='file'))
assert get_estimator(layout, bold_files[0]) == ()
assert get_estimator(layout, bold_files[1]) == ()
def test_get_estimator_b0field_and_intendedfor(tmp_path):
bids_dir = tmp_path / "bids"
# Set B0FieldSource for run 1
spec = deepcopy(BASE_LAYOUT)
spec['01']['func'][0]['metadata']['B0FieldSource'] = 'epi'
spec['01']['fmap'][2]['metadata']['B0FieldIdentifier'] = 'epi'
spec['01']['fmap'][3]['metadata']['B0FieldIdentifier'] = 'epi'
# Set IntendedFor for run 2
spec['01']['fmap'][0]['metadata']['IntendedFor'] = 'func/sub-01_task-rest_run-2_bold.nii.gz'
generate_bids_skeleton(bids_dir, spec)
layout = bids.BIDSLayout(bids_dir)
_ = find_estimators(layout=layout, subject='01')
bold_files = sorted(layout.get(suffix='bold', extension='.nii.gz', return_type='file'))
assert get_estimator(layout, bold_files[0]) == ('epi',)
assert get_estimator(layout, bold_files[1]) == ('auto_00000',)
clear_registry()
def test_get_estimator_overlapping_specs(tmp_path):
bids_dir = tmp_path / "bids"
# Set B0FieldSource for both runs
spec = deepcopy(BASE_LAYOUT)
spec['01']['func'][0]['metadata']['B0FieldSource'] = 'epi'
spec['01']['func'][1]['metadata']['B0FieldSource'] = 'epi'
spec['01']['fmap'][2]['metadata']['B0FieldIdentifier'] = 'epi'
spec['01']['fmap'][3]['metadata']['B0FieldIdentifier'] = 'epi'
# Set IntendedFor for both runs
spec['01']['fmap'][0]['metadata']['IntendedFor'] = [
'func/sub-01_task-rest_run-1_bold.nii.gz',
'func/sub-01_task-rest_run-2_bold.nii.gz',
]
generate_bids_skeleton(bids_dir, spec)
layout = bids.BIDSLayout(bids_dir)
_ = find_estimators(layout=layout, subject='01')
bold_files = sorted(layout.get(suffix='bold', extension='.nii.gz', return_type='file'))
# B0Fields take precedence
assert get_estimator(layout, bold_files[0]) == ('epi',)
assert get_estimator(layout, bold_files[1]) == ('epi',)
clear_registry()
def test_get_estimator_multiple_b0fields(tmp_path):
bids_dir = tmp_path / "bids"
# Set B0FieldSource for both runs
spec = deepcopy(BASE_LAYOUT)
spec['01']['func'][0]['metadata']['B0FieldSource'] = ('epi', 'phasediff')
spec['01']['func'][1]['metadata']['B0FieldSource'] = 'epi'
spec['01']['fmap'][0]['metadata']['B0FieldIdentifier'] = 'phasediff'
spec['01']['fmap'][1]['metadata']['B0FieldIdentifier'] = 'phasediff'
spec['01']['fmap'][2]['metadata']['B0FieldIdentifier'] = 'epi'
spec['01']['fmap'][3]['metadata']['B0FieldIdentifier'] = 'epi'
generate_bids_skeleton(bids_dir, spec)
layout = bids.BIDSLayout(bids_dir)
_ = find_estimators(layout=layout, subject='01')
bold_files = sorted(layout.get(suffix='bold', extension='.nii.gz', return_type='file'))
# Always get an iterable; don't care if it's a list or tuple
assert get_estimator(layout, bold_files[0]) == ['epi', 'phasediff']
assert get_estimator(layout, bold_files[1]) == ('epi',)
clear_registry()
| 4,446 | 34.576 | 96 | py |
fmriprep | fmriprep-master/fmriprep/tests/test_fsl6.py | import shutil
from pathlib import Path
import pytest
import templateflow.api as tf
from looseversion import LooseVersion
from nipype.interfaces import fsl
fslversion = fsl.Info.version()
TEMPLATE = tf.get("MNI152NLin2009cAsym", resolution=2, desc=None, suffix="T1w")
@pytest.mark.skipif(fslversion is None, reason="fsl required")
@pytest.mark.skipif(
fslversion and LooseVersion(fslversion) < LooseVersion("6.0.0"), reason="FSL6 test"
)
@pytest.mark.parametrize(
"path_parent,filename",
[
(".", "brain.nii.gz"),
(
"pneumonoultramicroscopicsilicovolcanoconiosis/floccinaucinihilipilification",
"supercalifragilisticexpialidocious.nii.gz",
),
(
"pneumonoultramicroscopicsilicovolcanoconiosis/floccinaucinihilipilification/"
"antidisestablishmentarianism/pseudopseudohypoparathyroidism/sesquipedalian",
"brain.nii.gz",
),
],
)
def test_fsl6_long_filenames(tmp_path, path_parent, filename):
test_dir = tmp_path / path_parent
test_dir.mkdir(parents=True, exist_ok=True)
in_file = test_dir / filename
out_file = test_dir / "output.nii.gz"
shutil.copy(TEMPLATE, in_file)
bet = fsl.BET(in_file=in_file, out_file=out_file).run()
assert Path(bet.outputs.out_file).exists()
| 1,312 | 31.02439 | 90 | py |
fmriprep | fmriprep-master/fmriprep/tests/test_config.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Check the configuration module and file."""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
from niworkflows.utils.spaces import format_reference
from pkg_resources import resource_filename as pkgrf
from toml import loads
from .. import config
def _reset_config():
"""
Forcibly reload the configuration module to restore defaults.
.. caution::
`importlib.reload` creates new sets of objects, but will not remove
previous references to those objects."""
import importlib
importlib.reload(config)
def test_reset_config():
execution = config.execution
setattr(execution, 'bids_dir', 'TESTING')
assert config.execution.bids_dir == 'TESTING'
_reset_config()
assert config.execution.bids_dir is None
# Even though the config module was reset,
# previous references to config classes
# have not been touched.
assert execution.bids_dir == 'TESTING'
def test_config_spaces():
"""Check that all necessary spaces are recorded in the config."""
filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != 'environment':
section = getattr(config, sectionname)
section.load(configs, init=False)
config.nipype.init()
config.loggers.init()
config.init_spaces()
spaces = config.workflow.spaces
assert "MNI152NLin6Asym:res-1" not in [str(s) for s in spaces.get_standard(full_spec=True)]
assert "MNI152NLin6Asym_res-1" not in [
format_reference((s.fullname, s.spec))
for s in spaces.references
if s.standard and s.dim == 3
]
config.workflow.cifti_output = True
config.init_spaces()
spaces = config.workflow.spaces
assert "MNI152NLin6Asym:res-1" in [str(s) for s in spaces.get_standard(full_spec=True)]
assert "MNI152NLin6Asym_res-1" in [
format_reference((s.fullname, s.spec))
for s in spaces.references
if s.standard and s.dim == 3
]
config.execution.output_spaces = None
config.workflow.cifti_output = False
config.init_spaces()
spaces = config.workflow.spaces
assert [str(s) for s in spaces.get_standard(full_spec=True)] == []
assert [
format_reference((s.fullname, s.spec))
for s in spaces.references
if s.standard and s.dim == 3
] == ['MNI152NLin2009cAsym']
_reset_config()
@pytest.mark.parametrize(
"master_seed,ants_seed,numpy_seed", [(1, 17612, 8272), (100, 19094, 60232)]
)
def test_prng_seed(master_seed, ants_seed, numpy_seed):
"""Ensure seeds are properly tracked"""
seeds = config.seeds
with patch.dict(os.environ, {}):
seeds.load({'_random_seed': master_seed}, init=True)
assert getattr(seeds, 'master') == master_seed
assert seeds.ants == ants_seed
assert seeds.numpy == numpy_seed
assert os.getenv("ANTS_RANDOM_SEED") == str(ants_seed)
_reset_config()
for seed in ('_random_seed', 'master', 'ants', 'numpy'):
assert getattr(config.seeds, seed) is None
| 4,033 | 31.532258 | 95 | py |
fmriprep | fmriprep-master/fmriprep/tests/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/reports/core.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
from pathlib import Path
from niworkflows.reports.core import Report as _Report
# This patch is intended to permit fMRIPrep 20.2.0 LTS to use the YODA-style
# derivatives directory. Ideally, we will remove this in 20.3.x and use an
# updated niworkflows.
class Report(_Report):
def _load_config(self, config):
from yaml import safe_load as load
settings = load(config.read_text())
self.packagename = self.packagename or settings.get("package", None)
# Removed from here: Appending self.packagename to self.root and self.out_dir
# In this version, pass reportlets_dir and out_dir with fmriprep in the path.
if self.subject_id is not None:
self.root = self.root / f"sub-{self.subject_id}"
if "template_path" in settings:
self.template_path = config.parent / settings["template_path"]
self.index(settings["sections"])
#
# The following are the interface used directly by fMRIPrep
#
def run_reports(
out_dir,
subject_label,
run_uuid,
config=None,
reportlets_dir=None,
packagename=None,
):
"""
Run the reports.
.. testsetup::
>>> copytree_or_skip("data/tests/work", testdir)
>>> (testdir / 'fmriprep').mkdir(parents=True, exist_ok=True)
.. doctest::
>>> run_reports(testdir / 'out', '01', 'madeoutuuid', packagename='fmriprep',
... reportlets_dir=testdir / 'work' / 'reportlets' / 'fmriprep')
0
"""
return Report(
out_dir,
run_uuid,
config=config,
subject_id=subject_label,
packagename=packagename,
reportlets_dir=reportlets_dir,
).generate_report()
def generate_reports(
subject_list, output_dir, run_uuid, config=None, work_dir=None, packagename=None
):
"""Execute run_reports on a list of subjects."""
reportlets_dir = None
if work_dir is not None:
reportlets_dir = Path(work_dir) / "reportlets"
report_errors = [
run_reports(
output_dir,
subject_label,
run_uuid,
config=config,
packagename=packagename,
reportlets_dir=reportlets_dir,
)
for subject_label in subject_list
]
errno = sum(report_errors)
if errno:
import logging
logger = logging.getLogger("cli")
error_list = ", ".join(
f"{subid} ({err})" for subid, err in zip(subject_list, report_errors) if err
)
logger.error(
"Preprocessing did not finish successfully. Errors occurred while processing "
"data from participants: %s. Check the HTML reports for details.",
error_list,
)
return errno
| 3,608 | 28.581967 | 90 | py |
fmriprep | fmriprep-master/fmriprep/reports/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/utils/meepi.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Multi-echo EPI utilities."""
def combine_meepi_source(in_files):
"""
Create a new source name when optimally
combining multiple multi-echo EPIs
>>> combine_meepi_source([
... 'sub-01_run-01_echo-1_bold.nii.gz',
... 'sub-01_run-01_echo-2_bold.nii.gz',
... 'sub-01_run-01_echo-3_bold.nii.gz',])
'sub-01_run-01_bold.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
entities = [ent for ent in in_file.split('_') if not ent.startswith('echo-')]
basename = '_'.join(entities)
return os.path.join(base, basename)
| 1,552 | 32.76087 | 81 | py |
fmriprep | fmriprep-master/fmriprep/utils/bids.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Utilities to handle BIDS inputs."""
import json
import os
import sys
from pathlib import Path
def write_bidsignore(deriv_dir):
bids_ignore = (
"*.html",
"logs/",
"figures/", # Reports
"*_xfm.*", # Unspecified transform files
"*.surf.gii", # Unspecified structural outputs
# Unspecified functional outputs
"*_boldref.nii.gz",
"*_bold.func.gii",
"*_mixing.tsv",
"*_timeseries.tsv",
)
ignore_file = Path(deriv_dir) / ".bidsignore"
ignore_file.write_text("\n".join(bids_ignore) + "\n")
def write_derivative_description(bids_dir, deriv_dir):
from ..__about__ import DOWNLOAD_URL, __version__
bids_dir = Path(bids_dir)
deriv_dir = Path(deriv_dir)
desc = {
'Name': 'fMRIPrep - fMRI PREProcessing workflow',
'BIDSVersion': '1.4.0',
'DatasetType': 'derivative',
'GeneratedBy': [
{
'Name': 'fMRIPrep',
'Version': __version__,
'CodeURL': DOWNLOAD_URL,
}
],
'HowToAcknowledge': 'Please cite our paper (https://doi.org/10.1038/s41592-018-0235-4), '
'and include the generated citation boilerplate within the Methods '
'section of the text.',
}
# Keys that can only be set by environment
if 'FMRIPREP_DOCKER_TAG' in os.environ:
desc['GeneratedBy'][0]['Container'] = {
"Type": "docker",
"Tag": f"nipreps/fmriprep:{os.environ['FMRIPREP_DOCKER_TAG']}",
}
if 'FMRIPREP_SINGULARITY_URL' in os.environ:
desc['GeneratedBy'][0]['Container'] = {
"Type": "singularity",
"URI": os.getenv('FMRIPREP_SINGULARITY_URL'),
}
# Keys deriving from source dataset
orig_desc = {}
fname = bids_dir / 'dataset_description.json'
if fname.exists():
orig_desc = json.loads(fname.read_text())
if 'DatasetDOI' in orig_desc:
desc['SourceDatasets'] = [
{'URL': f'https://doi.org/{orig_desc["DatasetDOI"]}', 'DOI': orig_desc['DatasetDOI']}
]
if 'License' in orig_desc:
desc['License'] = orig_desc['License']
Path.write_text(deriv_dir / 'dataset_description.json', json.dumps(desc, indent=4))
def validate_input_dir(exec_env, bids_dir, participant_label):
# Ignore issues and warnings that should not influence FMRIPREP
import subprocess
import tempfile
validator_config_dict = {
"ignore": [
"EVENTS_COLUMN_ONSET",
"EVENTS_COLUMN_DURATION",
"TSV_EQUAL_ROWS",
"TSV_EMPTY_CELL",
"TSV_IMPROPER_NA",
"VOLUME_COUNT_MISMATCH",
"BVAL_MULTIPLE_ROWS",
"BVEC_NUMBER_ROWS",
"DWI_MISSING_BVAL",
"INCONSISTENT_SUBJECTS",
"INCONSISTENT_PARAMETERS",
"BVEC_ROW_LENGTH",
"B_FILE",
"PARTICIPANT_ID_COLUMN",
"PARTICIPANT_ID_MISMATCH",
"TASK_NAME_MUST_DEFINE",
"PHENOTYPE_SUBJECTS_MISSING",
"STIMULUS_FILE_MISSING",
"DWI_MISSING_BVEC",
"EVENTS_TSV_MISSING",
"TSV_IMPROPER_NA",
"ACQTIME_FMT",
"Participants age 89 or higher",
"DATASET_DESCRIPTION_JSON_MISSING",
"FILENAME_COLUMN",
"WRONG_NEW_LINE",
"MISSING_TSV_COLUMN_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_CHANNELS",
"MISSING_TSV_COLUMN_IEEG_ELECTRODES",
"UNUSED_STIMULUS",
"CHANNELS_COLUMN_SFREQ",
"CHANNELS_COLUMN_LOWCUT",
"CHANNELS_COLUMN_HIGHCUT",
"CHANNELS_COLUMN_NOTCH",
"CUSTOM_COLUMN_WITHOUT_DESCRIPTION",
"ACQTIME_FMT",
"SUSPICIOUSLY_LONG_EVENT_DESIGN",
"SUSPICIOUSLY_SHORT_EVENT_DESIGN",
"MALFORMED_BVEC",
"MALFORMED_BVAL",
"MISSING_TSV_COLUMN_EEG_ELECTRODES",
"MISSING_SESSION",
],
"error": ["NO_T1W"],
"ignoredFiles": ['/dataset_description.json', '/participants.tsv'],
}
# Limit validation only to data from requested participants
if participant_label:
all_subs = {s.name[4:] for s in bids_dir.glob('sub-*')}
selected_subs = {s[4:] if s.startswith('sub-') else s for s in participant_label}
bad_labels = selected_subs.difference(all_subs)
if bad_labels:
error_msg = (
'Data for requested participant(s) label(s) not found. Could '
'not find data for participant(s): %s. Please verify the requested '
'participant labels.'
)
if exec_env == 'docker':
error_msg += (
' This error can be caused by the input data not being '
'accessible inside the docker container. Please make sure all '
'volumes are mounted properly (see https://docs.docker.com/'
'engine/reference/commandline/run/#mount-volume--v---read-only)'
)
if exec_env == 'singularity':
error_msg += (
' This error can be caused by the input data not being '
'accessible inside the singularity container. Please make sure '
'all paths are mapped properly (see https://www.sylabs.io/'
'guides/3.0/user-guide/bind_paths_and_mounts.html)'
)
raise RuntimeError(error_msg % ','.join(bad_labels))
ignored_subs = all_subs.difference(selected_subs)
if ignored_subs:
for sub in ignored_subs:
validator_config_dict["ignoredFiles"].append("/sub-%s/**" % sub)
with tempfile.NamedTemporaryFile(mode='w+', suffix='.json') as temp:
temp.write(json.dumps(validator_config_dict))
temp.flush()
try:
subprocess.check_call(['bids-validator', str(bids_dir), '-c', temp.name])
except FileNotFoundError:
print("bids-validator does not appear to be installed", file=sys.stderr)
| 7,052 | 36.515957 | 97 | py |
fmriprep | fmriprep-master/fmriprep/utils/telemetry.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Stripped out routines for telemetry"""
import os
import re
from nibabel.optpkg import optional_package
from niworkflows.utils.misc import read_crashfile
from .. import __version__, config
sentry_sdk = optional_package("sentry_sdk")[0]
migas = optional_package("migas")[0]
CHUNK_SIZE = 16384
# Group common events with pre specified fingerprints
KNOWN_ERRORS = {
'permission-denied': ["PermissionError: [Errno 13] Permission denied"],
'memory-error': [
"MemoryError",
"Cannot allocate memory",
"Return code: 134",
],
'reconall-already-running': ["ERROR: it appears that recon-all is already running"],
'no-disk-space': ["[Errno 28] No space left on device", "[Errno 122] Disk quota exceeded"],
'segfault': [
"Segmentation Fault",
"Segfault",
"Return code: 139",
],
'potential-race-condition': [
"[Errno 39] Directory not empty",
"_unfinished.json",
],
'keyboard-interrupt': [
"KeyboardInterrupt",
],
}
def sentry_setup():
"""Set-up sentry."""
release = config.environment.version or "dev"
environment = (
"dev"
if (
os.getenv('FMRIPREP_DEV', '').lower in ('1', 'on', 'yes', 'y', 'true')
or ('+' in release)
)
else "prod"
)
sentry_sdk.init(
"https://d5a16b0c38d84d1584dfc93b9fb1ade6@sentry.io/1137693",
release=release,
environment=environment,
before_send=before_send,
)
with sentry_sdk.configure_scope() as scope:
for k, v in config.get(flat=True).items():
scope.set_tag(k, v)
def process_crashfile(crashfile):
"""Parse the contents of a crashfile and submit sentry messages."""
crash_info = read_crashfile(str(crashfile))
with sentry_sdk.push_scope() as scope:
scope.level = 'fatal'
# Extract node name
node_name = crash_info.pop('node').split('.')[-1]
scope.set_tag("node_name", node_name)
# Massage the traceback, extract the gist
traceback = crash_info.pop('traceback')
# last line is probably most informative summary
gist = traceback.splitlines()[-1]
exception_text_start = 1
for line in traceback.splitlines()[1:]:
if not line[0].isspace():
break
exception_text_start += 1
exception_text = '\n'.join(traceback.splitlines()[exception_text_start:])
# Extract inputs, if present
inputs = crash_info.pop('inputs', None)
if inputs:
scope.set_extra('inputs', dict(inputs))
# Extract any other possible metadata in the crash file
for k, v in crash_info.items():
strv = list(_chunks(str(v)))
if len(strv) == 1:
scope.set_extra(k, strv[0])
else:
for i, chunk in enumerate(strv):
scope.set_extra('%s_%02d' % (k, i), chunk)
fingerprint = ''
issue_title = f'{node_name}: {gist}'
for new_fingerprint, error_snippets in KNOWN_ERRORS.items():
for error_snippet in error_snippets:
if error_snippet in traceback:
fingerprint = new_fingerprint
issue_title = new_fingerprint
break
if fingerprint:
break
message = issue_title + '\n\n'
message += exception_text[-(8192 - len(message)) :]
if fingerprint:
sentry_sdk.add_breadcrumb(message=fingerprint, level='fatal')
else:
# remove file paths
fingerprint = re.sub(r"(/[^/ ]*)+/?", '', message)
# remove words containing numbers
fingerprint = re.sub(r"([a-zA-Z]*[0-9]+[a-zA-Z]*)+", '', fingerprint)
# adding the return code if it exists
for line in message.splitlines():
if line.startswith("Return code"):
fingerprint += line
break
scope.fingerprint = [fingerprint]
sentry_sdk.capture_message(message, 'fatal')
def before_send(event, hints):
"""Filter log messages about crashed nodes."""
if 'logentry' in event and 'message' in event['logentry']:
msg = event['logentry']['message']
if msg.startswith("could not run node:"):
return None
if msg.startswith("Saving crash info to "):
return None
if re.match("Node .+ failed to run on host .+", msg):
return None
if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list):
fingerprints_to_propagate = [
'no-disk-space',
'memory-error',
'permission-denied',
'keyboard-interrupt',
]
for bc in event['breadcrumbs']:
msg = bc.get('message', 'empty-msg')
if msg in fingerprints_to_propagate:
event['fingerprint'] = [msg]
break
return event
def _chunks(string, length=CHUNK_SIZE):
"""
Split a string into smaller chunks.
>>> list(_chunks('some longer string.', length=3))
['som', 'e l', 'ong', 'er ', 'str', 'ing', '.']
"""
return (string[i : i + length] for i in range(0, len(string), length))
def setup_migas(init_ping: bool = True) -> None:
"""
Prepare the migas python client to communicate with a migas server.
If ``init_ping`` is ``True``, send an initial breadcrumb.
"""
# generate session UUID from generated run UUID
session_id = None
if config.execution.run_uuid:
session_id = config.execution.run_uuid.split('_', 1)[-1]
migas.setup(session_id=session_id)
if init_ping:
send_breadcrumb(status='R', status_desc='workflow start')
def send_breadcrumb(**kwargs) -> dict:
"""
Communicate with the migas telemetry server. This requires `migas.setup()` to be called.
"""
res = migas.add_project("nipreps/fmriprep", __version__, **kwargs)
return res
| 6,916 | 32.095694 | 95 | py |
fmriprep | fmriprep-master/fmriprep/utils/misc.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Miscellaneous utilities."""
def check_deps(workflow):
"""Make sure dependencies are present in this system."""
from nipype.utils.filemanip import which
return sorted(
(node.interface.__class__.__name__, node.interface._cmd)
for node in workflow._get_all_nodes()
if (hasattr(node.interface, '_cmd') and which(node.interface._cmd.split()[0]) is None)
)
def fips_enabled():
"""
Check if FIPS is enabled on the system.
For more information, see:
https://github.com/nipreps/fmriprep/issues/2480#issuecomment-891199276
"""
from pathlib import Path
fips = Path("/proc/sys/crypto/fips_enabled")
return fips.exists() and fips.read_text()[0] != "0"
| 1,600 | 32.354167 | 94 | py |
fmriprep | fmriprep-master/fmriprep/utils/testing.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Class and utilities for testing the workflows module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import logging
import unittest
from networkx.exception import NetworkXUnfeasible
from nipype.interfaces import utility as niu
from nipype.interfaces.base import isdefined
from nipype.pipeline import engine as pe
logging.disable(logging.INFO) # <- do we really want to do this?
class TestWorkflow(unittest.TestCase):
'''Subclass for test within the workflow module.
invoke tests with ``python -m unittest discover test'''
def assertIsAlmostExpectedWorkflow(
self, expected_name, expected_interfaces, expected_inputs, expected_outputs, actual
):
'''somewhat hacky way to confirm workflows are as expected, but with low confidence'''
self.assertIsInstance(actual, pe.Workflow)
self.assertEqual(expected_name, actual.name)
# assert it has the same nodes
actual_nodes = [actual.get_node(name) for name in actual.list_node_names()]
actual_interfaces = [node.interface.__class__.__name__ for node in actual_nodes]
# assert lists equal
self.assertIsSubsetOfList(expected_interfaces, actual_interfaces)
self.assertIsSubsetOfList(actual_interfaces, expected_interfaces)
# assert expected inputs, outputs exist
actual_inputs, actual_outputs = self.get_inputs_outputs(actual_nodes)
self.assertIsSubsetOfList(expected_outputs, actual_outputs)
self.assertIsSubsetOfList(expected_inputs, actual_inputs)
def assertIsSubsetOfList(self, expecteds, actuals):
for expected in expecteds:
self.assertIn(expected, actuals)
def get_inputs_outputs(self, nodes):
def get_io_names(pre, ios):
return [pre + str(io[0]) for io in ios]
actual_inputs = []
actual_outputs = []
node_tuples = [(node.name, node.inputs.items(), node.outputs.items()) for node in nodes]
for name, inputs, outputs in node_tuples:
pre = str(name) + "."
actual_inputs += get_io_names(pre, inputs)
pre = pre if pre[0:-1] != 'inputnode' else ""
actual_outputs += get_io_names(pre, outputs)
return actual_inputs, actual_outputs
def assert_circular(self, workflow, circular_connections):
'''check key paths in workflow by specifying some connections that should induce
circular paths, which trips a NetworkX error.
circular_connections is a list of tuples:
[('from_node_name', 'to_node_name', ('from_node.output_field','to_node.input_field'))]
'''
for from_node, to_node, fields in circular_connections:
from_node = workflow.get_node(from_node)
to_node = workflow.get_node(to_node)
workflow.connect([(from_node, to_node, fields)])
self.assertRaises(NetworkXUnfeasible, workflow.write_graph)
workflow.disconnect([(from_node, to_node, fields)])
def assert_inputs_set(self, workflow, additional_inputs=None):
"""Check that all mandatory inputs of nodes in the workflow (at the first level) are
already set. Additionally, check that inputs in additional_inputs are set. An input is
"set" if it is
a) defined explicitly (e.g., in the Interface declaration)
OR
b) connected to another node's output (e.g., using the workflow.connect method)
additional_inputs is a dict:
{'node_name': ['mandatory', 'input', 'fields']}
"""
additional_inputs = additional_inputs or {}
dummy_node = pe.Node(niu.IdentityInterface(fields=['dummy']), name='DummyNode')
node_names = [name for name in workflow.list_node_names() if name.count('.') == 0]
for node_name in set(node_names + list(additional_inputs.keys())):
node = workflow.get_node(node_name)
mandatory_inputs = list(node.inputs.traits(mandatory=True).keys())
other_inputs = additional_inputs[node_name] if node_name in additional_inputs else []
for field in set(mandatory_inputs + other_inputs):
if isdefined(getattr(node.inputs, field)):
pass
else: # not explicitly defined
# maybe it is connected to an output
with self.assertRaises(Exception):
# throws an error if the input is already connected
workflow.connect([(dummy_node, node, [('dummy', field)])])
| 5,440 | 41.84252 | 98 | py |
fmriprep | fmriprep-master/fmriprep/utils/confounds.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Utilities for confounds manipulation."""
def mask2vf(in_file, zooms=None, out_file=None):
"""
Convert a binary mask on a volume fraction map.
The algorithm simply applies a Gaussian filter with the kernel size scaled
by the zooms given as argument.
"""
import nibabel as nb
import numpy as np
from scipy.ndimage import gaussian_filter
img = nb.load(in_file)
imgzooms = np.array(img.header.get_zooms()[:3], dtype=float)
if zooms is None:
zooms = imgzooms
zooms = np.array(zooms, dtype=float)
sigma = 0.5 * (zooms / imgzooms)
data = gaussian_filter(img.get_fdata(dtype=np.float32), sigma=sigma)
max_data = np.percentile(data[data > 0], 99)
data = np.clip(data / max_data, a_min=0, a_max=1)
if out_file is None:
return data
hdr = img.header.copy()
hdr.set_data_dtype(np.float32)
nb.Nifti1Image(data.astype(np.float32), img.affine, hdr).to_filename(out_file)
return out_file
def acompcor_masks(in_files, is_aseg=False, zooms=None):
"""
Generate aCompCor masks.
This function selects the CSF partial volume map from the input,
and generates the WM and combined CSF+WM masks for aCompCor.
The implementation deviates from Behzadi et al.
Their original implementation thresholded the CSF and the WM partial-volume
masks at 0.99 (i.e., 99% of the voxel volume is filled with a particular tissue),
and then binary eroded that 2 voxels:
> Anatomical data were segmented into gray matter, white matter,
> and CSF partial volume maps using the FAST algorithm available
> in the FSL software package (Smith et al., 2004). Tissue partial
> volume maps were linearly interpolated to the resolution of the
> functional data series using AFNI (Cox, 1996). In order to form
> white matter ROIs, the white matter partial volume maps were
> thresholded at a partial volume fraction of 0.99 and then eroded by
> two voxels in each direction to further minimize partial voluming
> with gray matter. CSF voxels were determined by first thresholding
> the CSF partial volume maps at 0.99 and then applying a threedimensional
> nearest neighbor criteria to minimize multiple tissue
> partial voluming. Since CSF regions are typically small compared
> to white matter regions mask, erosion was not applied.
This particular procedure is not generalizable to BOLD data with different voxel zooms
as the mathematical morphology operations will be scaled by those.
Also, from reading the excerpt above and the tCompCor description, I (@oesteban)
believe that they always operated slice-wise given the large slice-thickness of
their functional data.
Instead, *fMRIPrep*'s implementation deviates from Behzadi's implementation on two
aspects:
* the masks are prepared in high-resolution, anatomical space and then
projected into BOLD space; and,
* instead of using binary erosion, a dilated GM map is generated -- thresholding
the corresponding PV map at 0.05 (i.e., pixels containing at least 5% of GM tissue)
and then subtracting that map from the CSF, WM and CSF+WM (combined) masks.
This should be equivalent to eroding the masks, except that the erosion
only happens at direct interfaces with GM.
When the probseg maps provene from FreeSurfer's ``recon-all`` (i.e., they are
discrete), binary maps are *transformed* into some sort of partial volume maps
by means of a Gaussian smoothing filter with sigma adjusted by the size of the
BOLD data.
"""
from pathlib import Path
import nibabel as nb
import numpy as np
from scipy.ndimage import binary_dilation
from skimage.morphology import ball
csf_file = in_files[2] # BIDS labeling (CSF=2; last of list)
# Load PV maps (fast) or segments (recon-all)
gm_vf = nb.load(in_files[0])
wm_vf = nb.load(in_files[1])
csf_vf = nb.load(csf_file)
# Prepare target zooms
imgzooms = np.array(gm_vf.header.get_zooms()[:3], dtype=float)
if zooms is None:
zooms = imgzooms
zooms = np.array(zooms, dtype=float)
if not is_aseg:
gm_data = gm_vf.get_fdata() > 0.05
wm_data = wm_vf.get_fdata()
csf_data = csf_vf.get_fdata()
else:
csf_file = mask2vf(
csf_file,
zooms=zooms,
out_file=str(Path("acompcor_csf.nii.gz").absolute()),
)
csf_data = nb.load(csf_file).get_fdata()
wm_data = mask2vf(in_files[1], zooms=zooms)
# We do not have partial volume maps (recon-all route)
gm_data = np.asanyarray(gm_vf.dataobj, np.uint8) > 0
# Dilate the GM mask
gm_data = binary_dilation(gm_data, structure=ball(3))
# Output filenames
wm_file = str(Path("acompcor_wm.nii.gz").absolute())
combined_file = str(Path("acompcor_wmcsf.nii.gz").absolute())
# Prepare WM mask
wm_data[gm_data] = 0 # Make sure voxel does not contain GM
nb.Nifti1Image(wm_data, gm_vf.affine, gm_vf.header).to_filename(wm_file)
# Prepare combined CSF+WM mask
comb_data = csf_data + wm_data
comb_data[gm_data] = 0 # Make sure voxel does not contain GM
nb.Nifti1Image(comb_data, gm_vf.affine, gm_vf.header).to_filename(combined_file)
return [csf_file, wm_file, combined_file]
| 6,256 | 38.10625 | 91 | py |
fmriprep | fmriprep-master/fmriprep/utils/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/utils/debug.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
# STATEMENT OF CHANGES: This file is derived from sources licensed under the Apache-2.0 terms,
# and uses the following portion of the original code:
# https://github.com/dandi/dandi-cli/blob/da3b7a726c4a352dfb53a0c6bee59e660de827e6/dandi/utils.py#L49-L82
#
#
# ORIGINAL WORK'S ATTRIBUTION NOTICE:
#
# Copyright 2020 DANDI Client Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
def is_interactive():
"""Return True if all in/outs are tty"""
# TODO: check on windows if hasattr check would work correctly and add value:
#
return sys.stdin.isatty() and sys.stdout.isatty() and sys.stderr.isatty()
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(
mode="Verbose",
# color_scheme='Linux',
call_pdb=is_interactive(),
)
else:
sys.excepthook = _pdb_excepthook
| 2,784 | 33.8125 | 105 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/patches.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Temporary patches
-----------------
"""
from random import randint
from time import sleep
from nipype.algorithms import confounds as nac
from numpy.linalg.linalg import LinAlgError
class RobustACompCor(nac.ACompCor):
"""
Runs aCompCor several times if it suddenly fails with
https://github.com/nipreps/fmriprep/issues/776
"""
def _run_interface(self, runtime):
failures = 0
while True:
try:
runtime = super()._run_interface(runtime)
break
except LinAlgError:
failures += 1
if failures > 10:
raise
start = (failures - 1) * 10
sleep(randint(start + 4, start + 10))
return runtime
class RobustTCompCor(nac.TCompCor):
"""
Runs tCompCor several times if it suddenly fails with
https://github.com/nipreps/fmriprep/issues/940
"""
def _run_interface(self, runtime):
failures = 0
while True:
try:
runtime = super()._run_interface(runtime)
break
except LinAlgError:
failures += 1
if failures > 10:
raise
start = (failures - 1) * 10
sleep(randint(start + 4, start + 10))
return runtime
| 2,239 | 27 | 74 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/multiecho.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Multi-echo EPI
~~~~~~~~~~~~~~
For using multi-echo EPI data.
"""
import os
from nipype import logging
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
)
LOGGER = logging.getLogger('nipype.interface')
class T2SMapInputSpec(CommandLineInputSpec):
in_files = traits.List(
File(exists=True),
argstr='-d %s',
position=1,
mandatory=True,
minlen=3,
desc='multi-echo BOLD EPIs',
)
echo_times = traits.List(
traits.Float, argstr='-e %s', position=2, mandatory=True, minlen=3, desc='echo times'
)
mask_file = File(argstr='--mask %s', position=3, desc='mask file', exists=True)
fittype = traits.Enum(
'curvefit',
'loglin',
argstr='--fittype %s',
position=4,
usedefault=True,
desc=(
'Desired fitting method: '
'"loglin" means that a linear model is fit to the log of the data. '
'"curvefit" means that a more computationally demanding '
'monoexponential model is fit to the raw data.'
),
)
class T2SMapOutputSpec(TraitedSpec):
t2star_map = File(exists=True, desc='limited T2* map')
s0_map = File(exists=True, desc='limited S0 map')
optimal_comb = File(exists=True, desc='optimally combined ME-EPI time series')
class T2SMap(CommandLine):
"""
Runs the tedana T2* workflow to generate an adaptive T2* map and create
an optimally combined ME-EPI time series.
Example
=======
>>> from fmriprep.interfaces import multiecho
>>> t2smap = multiecho.T2SMap()
>>> t2smap.inputs.in_files = ['sub-01_run-01_echo-1_bold.nii.gz',
... 'sub-01_run-01_echo-2_bold.nii.gz',
... 'sub-01_run-01_echo-3_bold.nii.gz']
>>> t2smap.inputs.echo_times = [0.013, 0.027, 0.043]
>>> t2smap.cmdline # doctest: +ELLIPSIS
't2smap -d sub-01_run-01_echo-1_bold.nii.gz sub-01_run-01_echo-2_bold.nii.gz \
sub-01_run-01_echo-3_bold.nii.gz -e 13.0 27.0 43.0 --fittype curvefit'
"""
_cmd = 't2smap'
input_spec = T2SMapInputSpec
output_spec = T2SMapOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'echo_times':
value = [te * 1000 for te in value]
return super()._format_arg(name, trait_spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_dir = os.getcwd()
outputs['t2star_map'] = os.path.join(out_dir, 'T2starmap.nii.gz')
outputs['s0_map'] = os.path.join(out_dir, 'S0map.nii.gz')
outputs['optimal_comb'] = os.path.join(out_dir, 'desc-optcom_bold.nii.gz')
return outputs
| 3,635 | 30.894737 | 93 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/conftest.py | from pathlib import Path
from shutil import copytree
import pytest
try:
from contextlib import chdir as _chdir
except ImportError: # PY310
import os
from contextlib import contextmanager
@contextmanager # type: ignore
def _chdir(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
@pytest.fixture(scope="module")
def data_dir():
return Path(__file__).parent / "tests" / "data"
@pytest.fixture(autouse=True)
def _docdir(request, tmp_path):
# Trigger ONLY for the doctests.
doctest_plugin = request.config.pluginmanager.getplugin("doctest")
if isinstance(request.node, doctest_plugin.DoctestItem):
copytree(Path(__file__).parent / "tests" / "data", tmp_path, dirs_exist_ok=True)
# Chdir only for the duration of the test.
with _chdir(tmp_path):
yield
else:
# For normal tests, we have to yield, since this is a yield-fixture.
yield
| 1,011 | 23.682927 | 88 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/gifti.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces for manipulating GIFTI files."""
import os
import nibabel as nb
import numpy as np
from nipype.interfaces.base import File, SimpleInterface, TraitedSpec, isdefined, traits
class CreateROIInputSpec(TraitedSpec):
subject_id = traits.Str(desc='subject ID')
hemisphere = traits.Enum(
"L",
"R",
mandatory=True,
desc='hemisphere',
)
thickness_file = File(exists=True, mandatory=True, desc='input GIFTI file')
class CreateROIOutputSpec(TraitedSpec):
roi_file = File(desc='output GIFTI file')
class CreateROI(SimpleInterface):
"""Prepare GIFTI shape file for use in"""
input_spec = CreateROIInputSpec
output_spec = CreateROIOutputSpec
def _run_interface(self, runtime):
subject, hemi = self.inputs.subject_id, self.inputs.hemisphere
if not isdefined(subject):
subject = 'sub-XYZ'
img = nb.GiftiImage.from_filename(self.inputs.thickness_file)
# wb_command -set-structure
img.meta["AnatomicalStructurePrimary"] = {'L': 'CortexLeft', 'R': 'CortexRight'}[hemi]
darray = img.darrays[0]
# wb_command -set-map-names
meta = darray.meta
meta['Name'] = f"{subject}_{hemi}_ROI"
# wb_command -metric-palette calls have no effect on ROI files
# Compiling an odd sequence of math operations that works out to:
# wb_command -metric-math "abs(var * -1) > 0"
roi = np.abs(darray.data) > 0
darray = nb.gifti.GiftiDataArray(
roi,
intent=darray.intent,
datatype=darray.datatype,
encoding=darray.encoding,
endian=darray.endian,
coordsys=darray.coordsys,
ordering=darray.ind_ord,
meta=meta,
)
out_filename = os.path.join(runtime.cwd, f"{subject}.{hemi}.roi.native.shape.gii")
img.to_filename(out_filename)
self._results["roi_file"] = out_filename
return runtime
| 2,100 | 31.828125 | 94 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/workbench.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""This module provides interfaces for workbench surface commands."""
import os
from nipype import logging
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
isdefined,
traits,
)
from nipype.interfaces.workbench.base import WBCommand
iflogger = logging.getLogger("nipype.interface")
class OpenMPTraitedSpec(CommandLineInputSpec):
num_threads = traits.Int(desc="allows for specifying more threads")
class OpenMPCommandMixin(CommandLine):
input_spec = OpenMPTraitedSpec
_num_threads = None
def __init__(self, **inputs):
super().__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, "num_threads")
if not self._num_threads:
self._num_threads = os.environ.get("OMP_NUM_THREADS", None)
if not isdefined(self.inputs.num_threads) and self._num_threads:
self.inputs.num_threads = int(self._num_threads)
self._num_threads_update()
def _num_threads_update(self):
if self.inputs.num_threads:
self.inputs.environ.update({"OMP_NUM_THREADS": str(self.inputs.num_threads)})
def run(self, **inputs):
if "num_threads" in inputs:
self.inputs.num_threads = inputs["num_threads"]
self._num_threads_update()
return super().run(**inputs)
class MetricDilateInputSpec(OpenMPTraitedSpec):
in_file = File(
exists=True,
mandatory=True,
argstr="%s ",
position=0,
desc="the metric to dilate",
)
surf_file = File(
exists=True,
mandatory=True,
argstr="%s ",
position=1,
desc="the surface to compute on",
)
distance = traits.Float(
mandatory=True,
argstr="%f ",
position=2,
desc="distance in mm to dilate",
)
out_file = File(
name_source=["in_file"],
name_template="%s.func.gii",
keep_extension=False,
argstr="%s ",
position=3,
desc="output - the output metric",
)
bad_vertex_roi_file = File(
argstr="-bad-vertex-roi %s ",
position=4,
desc="metric file, positive values denote vertices to have their values replaced",
)
data_roi_file = File(
argstr="-data-roi %s ",
position=5,
desc="metric file, positive values denote vertices that have data",
)
column = traits.Int(
position=6,
argstr="-column %d ",
desc="the column number",
)
nearest = traits.Bool(
position=7,
argstr="-nearest ",
desc="use the nearest good value instead of a weighted average",
)
linear = traits.Bool(
position=8,
argstr="-linear ",
desc="fill in values with linear interpolation along strongest gradient",
)
exponent = traits.Float(
argstr="-exponent %f ",
position=9,
default=6.0,
desc="exponent n to use in (area / (distance ^ n)) as the "
"weighting function (default 6)",
)
corrected_areas = File(
argstr="-corrected-areas %s ",
position=10,
desc="vertex areas to use instead of computing them from the surface",
)
legacy_cutoff = traits.Bool(
position=11,
argstr="-legacy-cutoff ",
desc="use the v1.3.2 method of choosing how many vertices to "
"use when calculating the dilated value with weighted method",
)
class MetricDilateOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output file")
class MetricDilate(WBCommand, OpenMPCommandMixin):
"""Dilate a metric file on a surface.
For all data values designated as bad, if they neighbor a good value or
are within the specified distance of a good value in the same kind of
model, replace the value with a distance weighted average of nearby good
values, otherwise set the value to zero. If -nearest is specified, it
will use the value from the closest good value within range instead of a
weighted average. When the input file contains label data, nearest
dilation is used on the surface, and weighted popularity is used in the
volume.
The -corrected-areas options are intended for dilating on group average
surfaces, but it is only an approximate correction for the reduction of
structure in a group average surface.
If -bad-vertex-roi is specified, all values, including those with
value zero, are good, except for locations with a positive value in the
ROI. If it is not specified, only values equal to zero are bad.
"""
input_spec = MetricDilateInputSpec
output_spec = MetricDilateOutputSpec
_cmd = "wb_command -metric-dilate "
class MetricResampleInputSpec(OpenMPTraitedSpec):
in_file = File(
exists=True,
mandatory=True,
argstr="%s",
position=0,
desc="The metric file to resample",
)
current_sphere = File(
exists=True,
mandatory=True,
argstr="%s",
position=1,
desc="A sphere surface with the mesh that the metric is currently on",
)
new_sphere = File(
exists=True,
mandatory=True,
argstr="%s",
position=2,
desc="A sphere surface that is in register with <current-sphere> and"
" has the desired output mesh",
)
method = traits.Enum(
"ADAP_BARY_AREA",
"BARYCENTRIC",
argstr="%s",
mandatory=True,
position=3,
desc="The method name - ADAP_BARY_AREA method is recommended for"
" ordinary metric data, because it should use all data while"
" downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used,"
" exactly one of area_surfs or area_metrics must be specified",
)
out_file = File(
name_source=["new_sphere"],
name_template="%s.out",
keep_extension=True,
argstr="%s",
position=4,
desc="The output metric",
)
area_surfs = traits.Bool(
position=5,
argstr="-area-surfs",
xor=["area_metrics"],
desc="Specify surfaces to do vertex area correction based on",
)
area_metrics = traits.Bool(
position=5,
argstr="-area-metrics",
xor=["area_surfs"],
desc="Specify vertex area metrics to do area correction based on",
)
current_area = File(
exists=True,
position=6,
argstr="%s",
desc="A relevant anatomical surface with <current-sphere> mesh OR"
" a metric file with vertex areas for <current-sphere> mesh",
)
new_area = File(
exists=True,
position=7,
argstr="%s",
desc="A relevant anatomical surface with <current-sphere> mesh OR"
" a metric file with vertex areas for <current-sphere> mesh",
)
roi_metric = File(
exists=True,
position=8,
argstr="-current-roi %s",
desc="Input roi on the current mesh used to exclude non-data vertices",
)
valid_roi_out = traits.Bool(
position=9,
argstr="-valid-roi-out",
desc="Output the ROI of vertices that got data from valid source vertices",
)
largest = traits.Bool(
position=10,
argstr="-largest",
desc="Use only the value of the vertex with the largest weight",
)
class MetricResampleOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="the output metric")
roi_file = File(desc="ROI of vertices that got data from valid source vertices")
class MetricResample(WBCommand, OpenMPCommandMixin):
"""Resample a metric file to a different mesh.
Resamples a metric file, given two spherical surfaces that are in
register. If ``ADAP_BARY_AREA`` is used, exactly one of -area-surfs or
``-area-metrics`` must be specified.
The ``ADAP_BARY_AREA`` method is recommended for ordinary metric data,
because it should use all data while downsampling, unlike ``BARYCENTRIC``.
The recommended areas option for most data is individual midthicknesses
for individual data, and averaged vertex area metrics from individual
midthicknesses for group average data.
The ``-current-roi`` option only masks the input, the output may be slightly
dilated in comparison, consider using ``-metric-mask`` on the output when
using ``-current-roi``.
The ``-largest option`` results in nearest vertex behavior when used with
``BARYCENTRIC``. When resampling a binary metric, consider thresholding at
0.5 after resampling rather than using ``-largest``.
"""
input_spec = MetricResampleInputSpec
output_spec = MetricResampleOutputSpec
_cmd = "wb_command -metric-resample"
def _format_arg(self, opt, spec, val):
if opt in ["current_area", "new_area"]:
if not self.inputs.area_surfs and not self.inputs.area_metrics:
raise ValueError(
"{} was set but neither area_surfs or" " area_metrics were set".format(opt)
)
if opt == "method":
if (
val == "ADAP_BARY_AREA"
and not self.inputs.area_surfs
and not self.inputs.area_metrics
):
raise ValueError("Exactly one of area_surfs or area_metrics" " must be specified")
if opt == "valid_roi_out" and val:
# generate a filename and add it to argstr
roi_out = self._gen_filename(self.inputs.in_file, suffix="_roi")
iflogger.info("Setting roi output file as", roi_out)
spec.argstr += " " + roi_out
return super()._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = super()._list_outputs()
if self.inputs.valid_roi_out:
roi_file = self._gen_filename(self.inputs.in_file, suffix="_roi")
outputs["roi_file"] = os.path.abspath(roi_file)
return outputs
class VolumeToSurfaceMappingInputSpec(OpenMPTraitedSpec):
volume_file = File(
exists=True,
argstr="%s",
mandatory=True,
position=1,
desc="the volume to map data from",
)
surface_file = File(
exists=True,
argstr="%s",
mandatory=True,
position=2,
desc="the surface to map the data onto",
)
out_file = File(
name_source=["surface_file"],
name_template="%s_mapped.func.gii",
keep_extension=False,
argstr="%s",
position=3,
desc="the output metric file",
)
method = traits.Enum(
"trilinear",
"enclosing",
"cubic",
"ribbon-constrained",
"myelin-style",
argstr="-%s",
position=4,
desc="the interpolation method to use",
)
_ribbon_constrained = [
"inner_surface",
"outer_surface",
"volume_roi",
"weighted",
"voxel_subdiv",
"gaussian",
"interpolate",
"bad_vertices_out",
"output_weights",
"output_weights_text",
]
_myelin_style = [
"ribbon_roi",
"thickness",
"sigma",
"legacy_bug",
]
inner_surface = File(
exists=True,
argstr="%s",
position=5,
desc="the inner surface of the ribbon [-ribbon-constrained]",
xor=_myelin_style,
)
outer_surface = File(
exists=True,
argstr="%s",
position=6,
desc="the outer surface of the ribbon [-ribbon-constrained]",
xor=_myelin_style,
)
volume_roi = File(
exists=True,
argstr="-volume-roi %s",
position=7,
desc="use a volume roi [-ribbon-constrained]",
xor=_myelin_style,
)
weighted = traits.Bool(
argstr="-weighted",
position=8,
desc="treat the roi values as weightings rather than binary [-ribbon-constrained]",
requires=["volume_roi"],
xor=_myelin_style,
)
voxel_subdiv = traits.Int(
default_value=3,
argstr="-voxel-subdiv %d",
desc="voxel divisions while estimating voxel weights [-ribbon-constrained]",
xor=_myelin_style,
)
thin_columns = traits.Bool(
argstr="-thin-columns",
desc="use non-overlapping polyhedra [-ribbon-constrained]",
xor=_myelin_style,
)
gaussian = traits.Float(
argstr="-gaussian %g",
desc="reduce weight to voxels that aren't near <surface> [-ribbon-constrained]",
xor=_myelin_style,
)
interpolate = traits.Enum(
"CUBIC",
"TRILINEAR",
"ENCLOSING_VOXEL",
argstr="-interpolate %s",
desc="instead of a weighted average of voxels, "
"interpolate at subpoints inside the ribbon [-ribbon-constrained]",
xor=_myelin_style,
)
bad_vertices_out = File(
argstr="-bad-vertices-out %s",
desc="output an ROI of which vertices didn't intersect any valid voxels",
xor=_myelin_style,
)
output_weights = traits.Int(
argstr="-output-weights %(0)d output_weights.nii.gz",
desc="write the voxel weights for a vertex to a volume file",
xor=_myelin_style,
)
output_weights_text = traits.File(
argstr="-output-weights-text %s",
desc="write the voxel weights for all vertices to a text file",
xor=_myelin_style,
)
ribbon_roi = File(
exists=True,
argstr="%s",
position=5,
desc="an roi volume of the cortical ribbon for this hemisphere [-myelin-style]",
xor=_ribbon_constrained,
)
thickness = File(
exists=True,
argstr="%s",
position=6,
desc="the thickness metric file for this hemisphere [-myelin-style]",
xor=_ribbon_constrained,
)
sigma = traits.Float(
argstr="%g",
position=7,
desc="gaussian kernel in mm for weighting voxels within range [-myelin-style]",
xor=_ribbon_constrained,
)
legacy_bug = traits.Bool(
argstr="-legacy-bug",
position=8,
desc="use the old bug in the myelin-style algorithm [-myelin-style]",
xor=_ribbon_constrained,
)
subvol_select = traits.Int(
argstr="-subvol-select %d",
desc="select a single subvolume to map",
)
"""\
MAP VOLUME TO SURFACE
wb_command -volume-to-surface-mapping
<volume> - the volume to map data from
<surface> - the surface to map the data onto
<metric-out> - output - the output metric file
[-trilinear] - use trilinear volume interpolation
[-enclosing] - use value of the enclosing voxel
[-cubic] - use cubic splines
[-ribbon-constrained] - use ribbon constrained mapping algorithm
<inner-surf> - the inner surface of the ribbon
<outer-surf> - the outer surface of the ribbon
[-volume-roi] - use a volume roi
<roi-volume> - the roi volume file
[-weighted] - treat the roi values as weightings rather than binary
[-voxel-subdiv] - voxel divisions while estimating voxel weights
<subdiv-num> - number of subdivisions, default 3
[-thin-columns] - use non-overlapping polyhedra
[-gaussian] - reduce weight to voxels that aren't near <surface>
<scale> - value to multiply the local thickness by, to get the
gaussian sigma
[-interpolate] - instead of a weighted average of voxels, interpolate
at subpoints inside the ribbon
<method> - interpolation method, must be CUBIC, ENCLOSING_VOXEL, or
TRILINEAR
[-bad-vertices-out] - output an ROI of which vertices didn't intersect
any valid voxels
<roi-out> - output - the output metric file of vertices that have
no data
[-output-weights] - write the voxel weights for a vertex to a volume
file
<vertex> - the vertex number to get the voxel weights for, 0-based
<weights-out> - output - volume to write the weights to
[-output-weights-text] - write the voxel weights for all vertices to a
text file
<text-out> - output - the output text filename
[-myelin-style] - use the method from myelin mapping
<ribbon-roi> - an roi volume of the cortical ribbon for this
hemisphere
<thickness> - a metric file of cortical thickness
<sigma> - gaussian kernel in mm for weighting voxels within range
[-legacy-bug] - emulate old v1.2.3 and earlier code that didn't follow
a cylinder cutoff
[-subvol-select] - select a single subvolume to map
<subvol> - the subvolume number or name
"""
class VolumeToSurfaceMappingOutputSpec(TraitedSpec):
out_file = File(desc="the output metric file")
bad_vertices_file = File(desc="the output metric file of vertices that have no data")
weights_file = File(desc="volume to write the weights to")
weights_text_file = File(desc="the output text filename")
class VolumeToSurfaceMapping(WBCommand, OpenMPCommandMixin):
"""Map a volume to a surface using one of several methods.
From https://humanconnectome.org/software/workbench-command/-volume-to-surface-mapping::
You must specify exactly one mapping method. Enclosing voxel uses the
value from the voxel the vertex lies inside, while trilinear does a 3D
linear interpolation based on the voxels immediately on each side of the
vertex's position.
The ribbon mapping method constructs a polyhedron from the vertex's
neighbors on each surface, and estimates the amount of this polyhedron's
volume that falls inside any nearby voxels, to use as the weights for
sampling. If -thin-columns is specified, the polyhedron uses the edge
midpoints and triangle centroids, so that neighboring vertices do not
have overlapping polyhedra. This may require increasing -voxel-subdiv to
get enough samples in each voxel to reliably land inside these smaller
polyhedra. The volume ROI is useful to exclude partial volume effects of
voxels the surfaces pass through, and will cause the mapping to ignore
voxels that don't have a positive value in the mask. The subdivision
number specifies how it approximates the amount of the volume the
polyhedron intersects, by splitting each voxel into NxNxN pieces, and
checking whether the center of each piece is inside the polyhedron. If
you have very large voxels, consider increasing this if you get zeros in
your output. The -gaussian option makes it act more like the myelin
method, where the distance of a voxel from <surface> is used to
downweight the voxel. The -interpolate suboption, instead of doing a
weighted average of voxels, interpolates from the volume at the
subdivided points inside the ribbon. If using both -interpolate and the
-weighted suboption to -volume-roi, the roi volume weights are linearly
interpolated, unless the -interpolate method is ENCLOSING_VOXEL, in which
case ENCLOSING_VOXEL is also used for sampling the roi volume weights.
The myelin style method uses part of the caret5 myelin mapping command to
do the mapping: for each surface vertex, take all voxels that are in a
cylinder with radius and height equal to cortical thickness, centered on
the vertex and aligned with the surface normal, and that are also within
the ribbon ROI, and apply a gaussian kernel with the specified sigma to
them to get the weights to use. The -legacy-bug flag reverts to the
unintended behavior present from the initial implementation up to and
including v1.2.3, which had only the tangential cutoff and a bounding box
intended to be larger than where the cylinder cutoff should have been.
Examples:
>>> from fmriprep.interfaces.workbench import VolumeToSurfaceMapping
>>> vol2surf = VolumeToSurfaceMapping()
>>> vol2surf.inputs.volume_file = 'bold.nii.gz'
>>> vol2surf.inputs.surface_file = 'lh.midthickness.surf.gii'
>>> vol2surf.inputs.method = 'ribbon-constrained'
>>> vol2surf.inputs.inner_surface = 'lh.white.surf.gii'
>>> vol2surf.inputs.outer_surface = 'lh.pial.surf.gii'
>>> vol2surf.cmdline # doctest: +NORMALIZE_WHITESPACE
'wb_command -volume-to-surface-mapping bold.nii.gz lh.midthickness.surf.gii \
lh.midthickness.surf_mapped.func.gii -ribbon-constrained lh.white.surf.gii lh.pial.surf.gii'
"""
input_spec = VolumeToSurfaceMappingInputSpec
output_spec = VolumeToSurfaceMappingOutputSpec
_cmd = "wb_command -volume-to-surface-mapping"
def _format_arg(self, opt, spec, val):
if opt in self.input_spec._ribbon_constrained:
if self.inputs.method != "ribbon-constrained":
return ""
elif opt in self.input_spec._myelin_style:
if self.inputs.method != "myelin-style":
return ""
return super()._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = super()._list_outputs()
if isdefined(self.inputs.bad_vertices_out):
outputs["bad_vertices_file"] = os.path.abspath(self.inputs.bad_vertices_out)
if isdefined(self.inputs.output_weights):
outputs["weights_file"] = os.path.abspath(self.inputs.output_weights)
if isdefined(self.inputs.output_weights_text):
outputs["weights_text_file"] = os.path.abspath(self.inputs.output_weights_text)
return outputs
class MetricMaskInputSpec(CommandLineInputSpec):
"""MASK A METRIC FILE
wb_command -metric-mask
<metric> - the input metric
<mask> - the mask metric
<metric-out> - output - the output metric
[-column] - select a single column
<column> - the column number or name
By default, the output metric is a copy of the input metric, but with
zeros wherever the mask metric is zero or negative. if -column is
specified, the output contains only one column, the masked version of the
specified input column."""
in_file = File(
exists=True,
argstr="%s",
position=1,
mandatory=True,
desc="input metric file",
)
mask = File(
exists=True,
argstr="%s",
position=2,
mandatory=True,
desc="mask metric file",
)
out_file = File(
name_template="%s_masked.func.gii",
name_source=["in_file"],
keep_extension=False,
argstr="%s",
position=3,
desc="output metric file",
)
column = traits.Either(
traits.Int,
traits.String,
argstr="-column %s",
desc="select a single column by number or name",
)
class MetricMaskOutputSpec(TraitedSpec):
out_file = File(desc="output metric file")
class MetricMask(WBCommand):
"""Mask a metric file.
Examples
>>> from fmriprep.interfaces.workbench import MetricMask
>>> metric_mask = MetricMask()
>>> metric_mask.inputs.in_file = 'lh.bold.func.gii'
>>> metric_mask.inputs.mask = 'lh.roi.shape.gii'
>>> metric_mask.cmdline
'wb_command -metric-mask lh.bold.func.gii lh.roi.shape.gii lh.bold.func_masked.func.gii'
"""
input_spec = MetricMaskInputSpec
output_spec = MetricMaskOutputSpec
_cmd = "wb_command -metric-mask"
class MetricFillHolesInputSpec(TraitedSpec):
"""FILL HOLES IN AN ROI METRIC
wb_command -metric-fill-holes
<surface> - the surface to use for neighbor information
<metric-in> - the input ROI metric
<metric-out> - output - the output ROI metric
[-corrected-areas] - vertex areas to use instead of computing them from
the surface
<area-metric> - the corrected vertex areas, as a metric
Finds all connected areas that are not included in the ROI, and writes
ones into all but the largest one, in terms of surface area."""
surface_file = File(
mandatory=True,
exists=True,
argstr="%s",
position=1,
desc="surface to use for neighbor information",
)
metric_file = File(
mandatory=True,
exists=True,
argstr="%s",
position=2,
desc="input ROI metric",
)
out_file = File(
name_template="%s_filled.shape.gii",
name_source="metric_file",
keep_extension=False,
argstr="%s",
position=3,
desc="output ROI metric",
)
corrected_areas = File(
exists=True,
argstr="-corrected-areas %s",
desc="vertex areas to use instead of computing them from the surface",
)
class MetricFillHolesOutputSpec(TraitedSpec):
out_file = File(desc="output ROI metric")
class MetricFillHoles(WBCommand):
"""Fill holes in an ROI metric.
Examples
>>> from fmriprep.interfaces.workbench import MetricFillHoles
>>> fill_holes = MetricFillHoles()
>>> fill_holes.inputs.surface_file = 'lh.midthickness.surf.gii'
>>> fill_holes.inputs.metric_file = 'lh.roi.shape.gii'
>>> fill_holes.cmdline # doctest: +NORMALIZE_WHITESPACE
'wb_command -metric-fill-holes lh.midthickness.surf.gii lh.roi.shape.gii \
lh.roi.shape_filled.shape.gii'
"""
input_spec = MetricFillHolesInputSpec
output_spec = MetricFillHolesOutputSpec
_cmd = "wb_command -metric-fill-holes"
class MetricRemoveIslandsInputSpec(TraitedSpec):
"""REMOVE ISLANDS IN AN ROI METRIC
wb_command -metric-remove-islands
<surface> - the surface to use for neighbor information
<metric-in> - the input ROI metric
<metric-out> - output - the output ROI metric
[-corrected-areas] - vertex areas to use instead of computing them from
the surface
<area-metric> - the corrected vertex areas, as a metric
Finds all connected areas in the ROI, and zeros out all but the largest
one, in terms of surface area."""
surface_file = File(
mandatory=True,
exists=True,
argstr="%s",
position=1,
desc="surface to use for neighbor information",
)
metric_file = File(
mandatory=True,
exists=True,
argstr="%s",
position=2,
desc="input ROI metric",
)
out_file = File(
name_template="%s_noislands.shape.gii",
name_source="metric_file",
keep_extension=False,
argstr="%s",
position=3,
desc="output ROI metric",
)
corrected_areas = File(
exists=True,
argstr="-corrected-areas %s",
desc="vertex areas to use instead of computing them from the surface",
)
class MetricRemoveIslandsOutputSpec(TraitedSpec):
out_file = File(desc="output ROI metric")
class MetricRemoveIslands(WBCommand):
"""Remove islands in an ROI metric.
Examples
>>> from fmriprep.interfaces.workbench import MetricRemoveIslands
>>> remove_islands = MetricRemoveIslands()
>>> remove_islands.inputs.surface_file = 'lh.midthickness.surf.gii'
>>> remove_islands.inputs.metric_file = 'lh.roi.shape.gii'
>>> remove_islands.cmdline # doctest: +NORMALIZE_WHITESPACE
'wb_command -metric-remove-islands lh.midthickness.surf.gii \
lh.roi.shape.gii lh.roi.shape_noislands.shape.gii'
"""
input_spec = MetricRemoveIslandsInputSpec
output_spec = MetricRemoveIslandsOutputSpec
_cmd = "wb_command -metric-remove-islands"
| 27,812 | 33.379481 | 98 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/maths.py | import os
import numpy as np
from nipype.interfaces.base import File, SimpleInterface, TraitedSpec, traits
from nipype.utils.filemanip import fname_presuffix
class ClipInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc="Input imaging file")
out_file = File(desc="Output file name")
minimum = traits.Float(
-np.inf, usedefault=True, desc="Values under minimum are set to minimum"
)
maximum = traits.Float(np.inf, usedefault=True, desc="Values over maximum are set to maximum")
class ClipOutputSpec(TraitedSpec):
out_file = File(desc="Output file name")
class Clip(SimpleInterface):
"""Simple clipping interface that clips values to specified minimum/maximum
If no values are outside the bounds, nothing is done and the in_file is passed
as the out_file without copying.
"""
input_spec = ClipInputSpec
output_spec = ClipOutputSpec
def _run_interface(self, runtime):
import nibabel as nb
img = nb.load(self.inputs.in_file)
data = img.get_fdata()
out_file = self.inputs.out_file
if out_file:
out_file = os.path.join(runtime.cwd, out_file)
if np.any((data < self.inputs.minimum) | (data > self.inputs.maximum)):
if not out_file:
out_file = fname_presuffix(
self.inputs.in_file, suffix="_clipped", newpath=runtime.cwd
)
np.clip(data, self.inputs.minimum, self.inputs.maximum, out=data)
img.__class__(data, img.affine, img.header).to_filename(out_file)
elif not out_file:
out_file = self.inputs.in_file
self._results["out_file"] = out_file
return runtime
class Label2MaskInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc="Input label file")
label_val = traits.Int(mandatory=True, dec="Label value to create mask from")
class Label2MaskOutputSpec(TraitedSpec):
out_file = File(desc="Output file name")
class Label2Mask(SimpleInterface):
"""Create mask file for a label from a multi-label segmentation"""
input_spec = Label2MaskInputSpec
output_spec = Label2MaskOutputSpec
def _run_interface(self, runtime):
import nibabel as nb
img = nb.load(self.inputs.in_file)
mask = np.uint16(img.dataobj) == self.inputs.label_val
out_img = img.__class__(mask, img.affine, img.header)
out_img.set_data_dtype(np.uint8)
out_file = fname_presuffix(self.inputs.in_file, suffix="_mask", newpath=runtime.cwd)
out_img.to_filename(out_file)
self._results["out_file"] = out_file
return runtime
| 2,680 | 30.541176 | 98 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/reports.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Interfaces to generate reportlets."""
import logging
import os
import re
import time
from collections import Counter
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
Directory,
File,
InputMultiObject,
SimpleInterface,
Str,
TraitedSpec,
isdefined,
traits,
)
from niworkflows.interfaces.reportlets import base as nrb
from smriprep.interfaces.freesurfer import ReconAll
LOGGER = logging.getLogger('nipype.interface')
SUBJECT_TEMPLATE = """\
\t<ul class="elem-desc">
\t\t<li>Subject ID: {subject_id}</li>
\t\t<li>Structural images: {n_t1s:d} T1-weighted {t2w}</li>
\t\t<li>Functional series: {n_bold:d}</li>
{tasks}
\t\t<li>Standard output spaces: {std_spaces}</li>
\t\t<li>Non-standard output spaces: {nstd_spaces}</li>
\t\t<li>FreeSurfer reconstruction: {freesurfer_status}</li>
\t</ul>
"""
FUNCTIONAL_TEMPLATE = """\
\t\t<details open>
\t\t<summary>Summary</summary>
\t\t<ul class="elem-desc">
\t\t\t<li>Original orientation: {ornt}</li>
\t\t\t<li>Repetition time (TR): {tr:.03g}s</li>
\t\t\t<li>Phase-encoding (PE) direction: {pedir}</li>
\t\t\t<li>{multiecho}</li>
\t\t\t<li>Slice timing correction: {stc}</li>
\t\t\t<li>Susceptibility distortion correction: {sdc}</li>
\t\t\t<li>Registration: {registration}</li>
\t\t\t<li>Non-steady-state volumes: {dummy_scan_desc}</li>
\t\t</ul>
\t\t</details>
\t\t<details>
\t\t\t<summary>Confounds collected</summary><br />
\t\t\t<p>{confounds}.</p>
\t\t</details>
"""
ABOUT_TEMPLATE = """\t<ul>
\t\t<li>fMRIPrep version: {version}</li>
\t\t<li>fMRIPrep command: <code>{command}</code></li>
\t\t<li>Date preprocessed: {date}</li>
\t</ul>
</div>
"""
class SummaryOutputSpec(TraitedSpec):
out_report = File(exists=True, desc='HTML segment containing summary')
class SummaryInterface(SimpleInterface):
output_spec = SummaryOutputSpec
def _run_interface(self, runtime):
segment = self._generate_segment()
fname = os.path.join(runtime.cwd, 'report.html')
with open(fname, 'w') as fobj:
fobj.write(segment)
self._results['out_report'] = fname
return runtime
def _generate_segment(self):
raise NotImplementedError
class SubjectSummaryInputSpec(BaseInterfaceInputSpec):
t1w = InputMultiObject(File(exists=True), desc='T1w structural images')
t2w = InputMultiObject(File(exists=True), desc='T2w structural images')
subjects_dir = Directory(desc='FreeSurfer subjects directory')
subject_id = Str(desc='Subject ID')
bold = InputMultiObject(
traits.Either(File(exists=True), traits.List(File(exists=True))),
desc='BOLD functional series',
)
std_spaces = traits.List(Str, desc='list of standard spaces')
nstd_spaces = traits.List(Str, desc='list of non-standard spaces')
class SubjectSummaryOutputSpec(SummaryOutputSpec):
# This exists to ensure that the summary is run prior to the first ReconAll
# call, allowing a determination whether there is a pre-existing directory
subject_id = Str(desc='FreeSurfer subject ID')
class SubjectSummary(SummaryInterface):
input_spec = SubjectSummaryInputSpec
output_spec = SubjectSummaryOutputSpec
def _run_interface(self, runtime):
if isdefined(self.inputs.subject_id):
self._results['subject_id'] = self.inputs.subject_id
return super()._run_interface(runtime)
def _generate_segment(self):
BIDS_NAME = re.compile(
r'^(.*\/)?'
'(?P<subject_id>sub-[a-zA-Z0-9]+)'
'(_(?P<session_id>ses-[a-zA-Z0-9]+))?'
'(_(?P<task_id>task-[a-zA-Z0-9]+))?'
'(_(?P<acq_id>acq-[a-zA-Z0-9]+))?'
'(_(?P<rec_id>rec-[a-zA-Z0-9]+))?'
'(_(?P<run_id>run-[a-zA-Z0-9]+))?'
)
if not isdefined(self.inputs.subjects_dir):
freesurfer_status = 'Not run'
else:
recon = ReconAll(
subjects_dir=self.inputs.subjects_dir,
subject_id='sub-' + self.inputs.subject_id,
T1_files=self.inputs.t1w,
flags='-noskullstrip',
)
if recon.cmdline.startswith('echo'):
freesurfer_status = 'Pre-existing directory'
else:
freesurfer_status = 'Run by fMRIPrep'
t2w_seg = ''
if self.inputs.t2w:
t2w_seg = f'(+ {len(self.inputs.t2w):d} T2-weighted)'
# Add list of tasks with number of runs
bold_series = self.inputs.bold if isdefined(self.inputs.bold) else []
bold_series = [s[0] if isinstance(s, list) else s for s in bold_series]
counts = Counter(
BIDS_NAME.search(series).groupdict()['task_id'][5:] for series in bold_series
)
tasks = ''
if counts:
header = '\t\t<ul class="elem-desc">'
footer = '\t\t</ul>'
lines = [
'\t\t\t<li>Task: {task_id} ({n_runs:d} run{s})</li>'.format(
task_id=task_id, n_runs=n_runs, s='' if n_runs == 1 else 's'
)
for task_id, n_runs in sorted(counts.items())
]
tasks = '\n'.join([header] + lines + [footer])
return SUBJECT_TEMPLATE.format(
subject_id=self.inputs.subject_id,
n_t1s=len(self.inputs.t1w),
t2w=t2w_seg,
n_bold=len(bold_series),
tasks=tasks,
std_spaces=', '.join(self.inputs.std_spaces),
nstd_spaces=', '.join(self.inputs.nstd_spaces),
freesurfer_status=freesurfer_status,
)
class FunctionalSummaryInputSpec(BaseInterfaceInputSpec):
slice_timing = traits.Enum(
False, True, 'TooShort', usedefault=True, desc='Slice timing correction used'
)
distortion_correction = traits.Str(
desc='Susceptibility distortion correction method', mandatory=True
)
pe_direction = traits.Enum(
None,
'i',
'i-',
'j',
'j-',
'k',
'k-',
mandatory=True,
desc='Phase-encoding direction detected',
)
registration = traits.Enum(
'FSL', 'FreeSurfer', mandatory=True, desc='Functional/anatomical registration method'
)
fallback = traits.Bool(desc='Boundary-based registration rejected')
registration_dof = traits.Enum(
6, 9, 12, desc='Registration degrees of freedom', mandatory=True
)
registration_init = traits.Enum(
'register',
'header',
mandatory=True,
desc='Whether to initialize registration with the "header"'
' or by centering the volumes ("register")',
)
confounds_file = File(exists=True, desc='Confounds file')
tr = traits.Float(desc='Repetition time', mandatory=True)
dummy_scans = traits.Either(traits.Int(), None, desc='number of dummy scans specified by user')
algo_dummy_scans = traits.Int(desc='number of dummy scans determined by algorithm')
echo_idx = traits.List([], usedefault=True, desc="BIDS echo identifiers")
orientation = traits.Str(mandatory=True, desc='Orientation of the voxel axes')
class FunctionalSummary(SummaryInterface):
input_spec = FunctionalSummaryInputSpec
def _generate_segment(self):
dof = self.inputs.registration_dof
stc = {
True: 'Applied',
False: 'Not applied',
'TooShort': 'Skipped (too few volumes)',
}[self.inputs.slice_timing]
# #TODO: Add a note about registration_init below?
reg = {
'FSL': [
'FSL <code>flirt</code> with boundary-based registration'
' (BBR) metric - %d dof' % dof,
'FSL <code>flirt</code> rigid registration - 6 dof',
],
'FreeSurfer': [
'FreeSurfer <code>bbregister</code> '
'(boundary-based registration, BBR) - %d dof' % dof,
'FreeSurfer <code>mri_coreg</code> - %d dof' % dof,
],
}[self.inputs.registration][self.inputs.fallback]
pedir = get_world_pedir(self.inputs.orientation, self.inputs.pe_direction)
if isdefined(self.inputs.confounds_file):
with open(self.inputs.confounds_file) as cfh:
conflist = cfh.readline().strip('\n').strip()
dummy_scan_tmp = "{n_dum}"
if self.inputs.dummy_scans == self.inputs.algo_dummy_scans:
dummy_scan_msg = ' '.join(
[dummy_scan_tmp, "(Confirmed: {n_alg} automatically detected)"]
).format(n_dum=self.inputs.dummy_scans, n_alg=self.inputs.algo_dummy_scans)
# the number of dummy scans was specified by the user and
# it is not equal to the number detected by the algorithm
elif self.inputs.dummy_scans is not None:
dummy_scan_msg = ' '.join(
[dummy_scan_tmp, "(Warning: {n_alg} automatically detected)"]
).format(n_dum=self.inputs.dummy_scans, n_alg=self.inputs.algo_dummy_scans)
# the number of dummy scans was not specified by the user
else:
dummy_scan_msg = dummy_scan_tmp.format(n_dum=self.inputs.algo_dummy_scans)
multiecho = "Single-echo EPI sequence."
n_echos = len(self.inputs.echo_idx)
if n_echos == 1:
multiecho = (
f"Multi-echo EPI sequence: only echo {self.inputs.echo_idx[0]} processed "
"in single-echo mode."
)
if n_echos > 2:
multiecho = f"Multi-echo EPI sequence: {n_echos} echoes."
return FUNCTIONAL_TEMPLATE.format(
pedir=pedir,
stc=stc,
sdc=self.inputs.distortion_correction,
registration=reg,
confounds=re.sub(r'[\t ]+', ', ', conflist),
tr=self.inputs.tr,
dummy_scan_desc=dummy_scan_msg,
multiecho=multiecho,
ornt=self.inputs.orientation,
)
class AboutSummaryInputSpec(BaseInterfaceInputSpec):
version = Str(desc='FMRIPREP version')
command = Str(desc='FMRIPREP command')
# Date not included - update timestamp only if version or command changes
class AboutSummary(SummaryInterface):
input_spec = AboutSummaryInputSpec
def _generate_segment(self):
return ABOUT_TEMPLATE.format(
version=self.inputs.version,
command=self.inputs.command,
date=time.strftime("%Y-%m-%d %H:%M:%S %z"),
)
class LabeledHistogramInputSpec(nrb._SVGReportCapableInputSpec):
in_file = traits.File(exists=True, mandatory=True, desc="Image containing values to plot")
label_file = traits.File(
exists=True,
desc="Mask or label image where non-zero values will be used to extract data from in_file",
)
mapping = traits.Dict(desc="Map integer label values onto names of voxels")
xlabel = traits.Str("voxels", usedefault=True, desc="Description of values plotted")
class LabeledHistogram(nrb.ReportingInterface):
input_spec = LabeledHistogramInputSpec
def _generate_report(self):
import nibabel as nb
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from nilearn.image import resample_to_img
report_file = self._out_report
img = nb.load(self.inputs.in_file)
data = img.get_fdata(dtype=np.float32)
if self.inputs.label_file:
label_img = nb.load(self.inputs.label_file)
if label_img.shape != img.shape[:3] or not np.allclose(label_img.affine, img.affine):
label_img = resample_to_img(label_img, img, interpolation="nearest")
labels = np.uint16(label_img.dataobj)
else:
labels = np.uint8(data > 0)
uniq_labels = np.unique(labels[labels > 0])
label_map = self.inputs.mapping or {label: label for label in uniq_labels}
rois = {label_map.get(label, label): data[labels == label] for label in label_map}
with sns.axes_style('whitegrid'):
fig = sns.histplot(rois, bins=50)
fig.set_xlabel(self.inputs.xlabel)
plt.savefig(report_file)
plt.close()
def get_world_pedir(ornt, pe_direction):
"""Return world direction of phase encoding"""
axes = (("Right", "Left"), ("Anterior", "Posterior"), ("Superior", "Inferior"))
ax_idcs = {"i": 0, "j": 1, "k": 2}
if pe_direction is not None:
axcode = ornt[ax_idcs[pe_direction[0]]]
inv = pe_direction[1:] == "-"
for ax in axes:
for flip in (ax, ax[::-1]):
if flip[not inv].startswith(axcode):
return "-".join(flip)
LOGGER.warning(
"Cannot determine world direction of phase encoding. "
f"Orientation: {ornt}; PE dir: {pe_direction}"
)
return "Could not be determined - assuming Anterior-Posterior"
| 13,763 | 35.031414 | 99 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/confounds.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""
Handling confounds.
.. testsetup::
>>> import os
>>> import pandas as pd
"""
import os
import re
import nibabel as nb
import numpy as np
import pandas as pd
from nipype import logging
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
File,
InputMultiObject,
OutputMultiObject,
SimpleInterface,
TraitedSpec,
isdefined,
traits,
)
from nipype.utils.filemanip import fname_presuffix
from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries
from niworkflows.viz.plots import fMRIPlot
LOGGER = logging.getLogger('nipype.interface')
class _aCompCorMasksInputSpec(BaseInterfaceInputSpec):
in_vfs = InputMultiObject(File(exists=True), desc="Input volume fractions.")
is_aseg = traits.Bool(
False, usedefault=True, desc="Whether the input volume fractions come from FS' aseg."
)
bold_zooms = traits.Tuple(
traits.Float, traits.Float, traits.Float, mandatory=True, desc="BOLD series zooms"
)
class _aCompCorMasksOutputSpec(TraitedSpec):
out_masks = OutputMultiObject(
File(exists=True), desc="CSF, WM and combined masks, respectively"
)
class aCompCorMasks(SimpleInterface):
"""Generate masks in T1w space for aCompCor."""
input_spec = _aCompCorMasksInputSpec
output_spec = _aCompCorMasksOutputSpec
def _run_interface(self, runtime):
from ..utils.confounds import acompcor_masks
self._results["out_masks"] = acompcor_masks(
self.inputs.in_vfs,
self.inputs.is_aseg,
self.inputs.bold_zooms,
)
return runtime
class _FilterDroppedInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='input CompCor metadata')
class _FilterDroppedOutputSpec(TraitedSpec):
out_file = File(desc='filtered CompCor metadata')
class FilterDropped(SimpleInterface):
"""Filter dropped components from CompCor metadata files
Uses the boolean ``retained`` column to identify rows to keep or filter.
"""
input_spec = _FilterDroppedInputSpec
output_spec = _FilterDroppedOutputSpec
def _run_interface(self, runtime):
self._results["out_file"] = fname_presuffix(
self.inputs.in_file, suffix='_filtered', use_ext=True, newpath=runtime.cwd
)
metadata = pd.read_csv(self.inputs.in_file, sep='\t')
metadata[metadata.retained].to_csv(self._results["out_file"], sep='\t', index=False)
return runtime
class _RenameACompCorInputSpec(BaseInterfaceInputSpec):
components_file = File(exists=True, desc='input aCompCor components')
metadata_file = File(exists=True, desc='input aCompCor metadata')
class _RenameACompCorOutputSpec(TraitedSpec):
components_file = File(desc='output aCompCor components')
metadata_file = File(desc='output aCompCor metadata')
class RenameACompCor(SimpleInterface):
"""Rename ACompCor components based on their masks
Components from the "CSF" mask are ``c_comp_cor_*``.
Components from the "WM" mask are ``w_comp_cor_*``.
Components from the "combined" mask are ``a_comp_cor_*``.
Each set of components is renumbered to start at ``?_comp_cor_00``.
"""
input_spec = _RenameACompCorInputSpec
output_spec = _RenameACompCorOutputSpec
def _run_interface(self, runtime):
try:
components = pd.read_csv(self.inputs.components_file, sep='\t')
metadata = pd.read_csv(self.inputs.metadata_file, sep='\t')
except pd.errors.EmptyDataError:
# Can occur when testing on short datasets; otherwise rare
self._results["components_file"] = self.inputs.components_file
self._results["metadata_file"] = self.inputs.metadata_file
return runtime
self._results["components_file"] = fname_presuffix(
self.inputs.components_file, suffix='_renamed', use_ext=True, newpath=runtime.cwd
)
self._results["metadata_file"] = fname_presuffix(
self.inputs.metadata_file, suffix='_renamed', use_ext=True, newpath=runtime.cwd
)
all_comp_cor = metadata[metadata["retained"]]
c_comp_cor = all_comp_cor[all_comp_cor["mask"] == "CSF"]
w_comp_cor = all_comp_cor[all_comp_cor["mask"] == "WM"]
a_comp_cor = all_comp_cor[all_comp_cor["mask"] == "combined"]
c_orig = c_comp_cor["component"]
c_new = [f"c_comp_cor_{i:02d}" for i in range(len(c_orig))]
w_orig = w_comp_cor["component"]
w_new = [f"w_comp_cor_{i:02d}" for i in range(len(w_orig))]
a_orig = a_comp_cor["component"]
a_new = [f"a_comp_cor_{i:02d}" for i in range(len(a_orig))]
final_components = components.rename(columns=dict(zip(c_orig, c_new)))
final_components.rename(columns=dict(zip(w_orig, w_new)), inplace=True)
final_components.rename(columns=dict(zip(a_orig, a_new)), inplace=True)
final_components.to_csv(self._results["components_file"], sep='\t', index=False)
metadata.loc[c_comp_cor.index, "component"] = c_new
metadata.loc[w_comp_cor.index, "component"] = w_new
metadata.loc[a_comp_cor.index, "component"] = a_new
metadata.to_csv(self._results["metadata_file"], sep='\t', index=False)
return runtime
class GatherConfoundsInputSpec(BaseInterfaceInputSpec):
signals = File(exists=True, desc='input signals')
dvars = File(exists=True, desc='file containing DVARS')
std_dvars = File(exists=True, desc='file containing standardized DVARS')
fd = File(exists=True, desc='input framewise displacement')
rmsd = File(exists=True, desc='input RMS framewise displacement')
tcompcor = File(exists=True, desc='input tCompCorr')
acompcor = File(exists=True, desc='input aCompCorr')
crowncompcor = File(exists=True, desc='input crown-based regressors')
cos_basis = File(exists=True, desc='input cosine basis')
motion = File(exists=True, desc='input motion parameters')
class GatherConfoundsOutputSpec(TraitedSpec):
confounds_file = File(exists=True, desc='output confounds file')
confounds_list = traits.List(traits.Str, desc='list of headers')
class GatherConfounds(SimpleInterface):
r"""
Combine various sources of confounds in one TSV file
>>> pd.DataFrame({'a': [0.1]}).to_csv('signals.tsv', index=False, na_rep='n/a')
>>> pd.DataFrame({'b': [0.2]}).to_csv('dvars.tsv', index=False, na_rep='n/a')
>>> gather = GatherConfounds()
>>> gather.inputs.signals = 'signals.tsv'
>>> gather.inputs.dvars = 'dvars.tsv'
>>> res = gather.run()
>>> res.outputs.confounds_list
['Global signals', 'DVARS']
>>> pd.read_csv(res.outputs.confounds_file, sep='\s+', index_col=None,
... engine='python') # doctest: +NORMALIZE_WHITESPACE
a b
0 0.1 0.2
"""
input_spec = GatherConfoundsInputSpec
output_spec = GatherConfoundsOutputSpec
def _run_interface(self, runtime):
combined_out, confounds_list = _gather_confounds(
signals=self.inputs.signals,
dvars=self.inputs.dvars,
std_dvars=self.inputs.std_dvars,
fdisp=self.inputs.fd,
rmsd=self.inputs.rmsd,
tcompcor=self.inputs.tcompcor,
acompcor=self.inputs.acompcor,
crowncompcor=self.inputs.crowncompcor,
cos_basis=self.inputs.cos_basis,
motion=self.inputs.motion,
newpath=runtime.cwd,
)
self._results['confounds_file'] = combined_out
self._results['confounds_list'] = confounds_list
return runtime
def _gather_confounds(
signals=None,
dvars=None,
std_dvars=None,
fdisp=None,
rmsd=None,
tcompcor=None,
acompcor=None,
crowncompcor=None,
cos_basis=None,
motion=None,
newpath=None,
):
r"""
Load confounds from the filenames, concatenate together horizontally
and save new file.
>>> pd.DataFrame({'Global Signal': [0.1]}).to_csv('signals.tsv', index=False, na_rep='n/a')
>>> pd.DataFrame({'stdDVARS': [0.2]}).to_csv('dvars.tsv', index=False, na_rep='n/a')
>>> out_file, confound_list = _gather_confounds('signals.tsv', 'dvars.tsv')
>>> confound_list
['Global signals', 'DVARS']
>>> pd.read_csv(out_file, sep='\s+', index_col=None,
... engine='python') # doctest: +NORMALIZE_WHITESPACE
global_signal std_dvars
0 0.1 0.2
"""
def less_breakable(a_string):
'''hardens the string to different envs (i.e., case insensitive, no whitespace, '#' '''
return ''.join(a_string.split()).strip('#')
# Taken from https://stackoverflow.com/questions/1175208/
# If we end up using it more than just here, probably worth pulling in a well-tested package
def camel_to_snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _adjust_indices(left_df, right_df):
# This forces missing values to appear at the beginning of the DataFrame
# instead of the end
index_diff = len(left_df.index) - len(right_df.index)
if index_diff > 0:
right_df.index = range(index_diff, len(right_df.index) + index_diff)
elif index_diff < 0:
left_df.index = range(-index_diff, len(left_df.index) - index_diff)
all_files = []
confounds_list = []
for confound, name in (
(signals, 'Global signals'),
(std_dvars, 'Standardized DVARS'),
(dvars, 'DVARS'),
(fdisp, 'Framewise displacement'),
(rmsd, 'Framewise displacement (RMS)'),
(tcompcor, 'tCompCor'),
(acompcor, 'aCompCor'),
(crowncompcor, 'crownCompCor'),
(cos_basis, 'Cosine basis'),
(motion, 'Motion parameters'),
):
if confound is not None and isdefined(confound):
confounds_list.append(name)
if os.path.exists(confound) and os.stat(confound).st_size > 0:
all_files.append(confound)
confounds_data = pd.DataFrame()
for file_name in all_files: # assumes they all have headings already
try:
new = pd.read_csv(file_name, sep="\t")
except pd.errors.EmptyDataError:
# No data, nothing to concat
continue
for column_name in new.columns:
new.rename(
columns={column_name: camel_to_snake(less_breakable(column_name))}, inplace=True
)
_adjust_indices(confounds_data, new)
confounds_data = pd.concat((confounds_data, new), axis=1)
if newpath is None:
newpath = os.getcwd()
combined_out = os.path.join(newpath, 'confounds.tsv')
confounds_data.to_csv(combined_out, sep='\t', index=False, na_rep='n/a')
return combined_out, confounds_list
class _FMRISummaryInputSpec(BaseInterfaceInputSpec):
in_nifti = File(exists=True, mandatory=True, desc="input BOLD (4D NIfTI file)")
in_cifti = File(exists=True, desc="input BOLD (CIFTI dense timeseries)")
in_segm = File(exists=True, desc="volumetric segmentation corresponding to in_nifti")
confounds_file = File(exists=True, desc="BIDS' _confounds.tsv file")
str_or_tuple = traits.Either(
traits.Str,
traits.Tuple(traits.Str, traits.Either(None, traits.Str)),
traits.Tuple(traits.Str, traits.Either(None, traits.Str), traits.Either(None, traits.Str)),
)
confounds_list = traits.List(
str_or_tuple, minlen=1, desc='list of headers to extract from the confounds_file'
)
tr = traits.Either(None, traits.Float, usedefault=True, desc='the repetition time')
drop_trs = traits.Int(0, usedefault=True, desc="dummy scans")
class _FMRISummaryOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='written file path')
class FMRISummary(SimpleInterface):
"""
Copy the x-form matrices from `hdr_file` to `out_file`.
"""
input_spec = _FMRISummaryInputSpec
output_spec = _FMRISummaryOutputSpec
def _run_interface(self, runtime):
self._results['out_file'] = fname_presuffix(
self.inputs.in_nifti, suffix='_fmriplot.svg', use_ext=False, newpath=runtime.cwd
)
has_cifti = isdefined(self.inputs.in_cifti)
# Read input object and create timeseries + segments object
seg_file = self.inputs.in_segm if isdefined(self.inputs.in_segm) else None
dataset, segments = _nifti_timeseries(
nb.load(self.inputs.in_nifti),
nb.load(seg_file),
remap_rois=False,
labels=(
("WM+CSF", "Edge")
if has_cifti
else ("Ctx GM", "dGM", "sWM+sCSF", "dWM+dCSF", "Cb", "Edge")
),
)
# Process CIFTI
if has_cifti:
cifti_data, cifti_segments = _cifti_timeseries(nb.load(self.inputs.in_cifti))
if seg_file is not None:
# Append WM+CSF and Edge masks
cifti_length = cifti_data.shape[0]
dataset = np.vstack((cifti_data, dataset))
segments = {k: np.array(v) + cifti_length for k, v in segments.items()}
cifti_segments.update(segments)
segments = cifti_segments
else:
dataset, segments = cifti_data, cifti_segments
dataframe = pd.read_csv(
self.inputs.confounds_file,
sep="\t",
index_col=None,
dtype='float32',
na_filter=True,
na_values='n/a',
)
headers = []
units = {}
names = {}
for conf_el in self.inputs.confounds_list:
if isinstance(conf_el, (list, tuple)):
headers.append(conf_el[0])
if conf_el[1] is not None:
units[conf_el[0]] = conf_el[1]
if len(conf_el) > 2 and conf_el[2] is not None:
names[conf_el[0]] = conf_el[2]
else:
headers.append(conf_el)
if not headers:
data = None
units = None
else:
data = dataframe[headers]
data = data.rename(columns=names)
fig = fMRIPlot(
dataset,
segments=segments,
tr=self.inputs.tr,
confounds=data,
units=units,
nskip=self.inputs.drop_trs,
paired_carpet=has_cifti,
).plot()
fig.savefig(self._results["out_file"], bbox_inches="tight")
return runtime
| 15,506 | 33.769058 | 99 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/__init__.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from niworkflows.interfaces.bids import DerivativesDataSink as _DDSink
class DerivativesDataSink(_DDSink):
out_path_base = ""
__all__ = ("DerivativesDataSink",)
| 283 | 24.818182 | 73 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/tests/test_confounds.py | from pathlib import Path
from nipype.pipeline import engine as pe
from fmriprep.interfaces import confounds
def test_RenameACompCor(tmp_path, data_dir):
renamer = pe.Node(confounds.RenameACompCor(), name="renamer", base_dir=str(tmp_path))
renamer.inputs.components_file = data_dir / "acompcor_truncated.tsv"
renamer.inputs.metadata_file = data_dir / "component_metadata_truncated.tsv"
res = renamer.run()
target_components = Path.read_text(data_dir / "acompcor_renamed.tsv")
target_meta = Path.read_text(data_dir / "component_metadata_renamed.tsv")
renamed_components = Path(res.outputs.components_file).read_text()
renamed_meta = Path(res.outputs.metadata_file).read_text()
assert renamed_components == target_components
assert renamed_meta == target_meta
def test_FilterDropped(tmp_path, data_dir):
filt = pe.Node(confounds.FilterDropped(), name="filt", base_dir=str(tmp_path))
filt.inputs.in_file = data_dir / "component_metadata_truncated.tsv"
res = filt.run()
target_meta = Path.read_text(data_dir / "component_metadata_filtered.tsv")
filtered_meta = Path(res.outputs.out_file).read_text()
assert filtered_meta == target_meta
| 1,205 | 36.6875 | 89 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/tests/test_reports.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2023 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
import pytest
from ..reports import get_world_pedir
@pytest.mark.parametrize(
"orientation,pe_dir,expected",
[
('RAS', 'j', 'Posterior-Anterior'),
('RAS', 'j-', 'Anterior-Posterior'),
('RAS', 'i', 'Left-Right'),
('RAS', 'i-', 'Right-Left'),
('RAS', 'k', 'Inferior-Superior'),
('RAS', 'k-', 'Superior-Inferior'),
('LAS', 'j', 'Posterior-Anterior'),
('LAS', 'i-', 'Left-Right'),
('LAS', 'k-', 'Superior-Inferior'),
('LPI', 'j', 'Anterior-Posterior'),
('LPI', 'i-', 'Left-Right'),
('LPI', 'k-', 'Inferior-Superior'),
('SLP', 'k-', 'Posterior-Anterior'),
('SLP', 'k', 'Anterior-Posterior'),
('SLP', 'j-', 'Left-Right'),
('SLP', 'j', 'Right-Left'),
('SLP', 'i', 'Inferior-Superior'),
('SLP', 'i-', 'Superior-Inferior'),
],
)
def test_get_world_pedir(tmpdir, orientation, pe_dir, expected):
assert get_world_pedir(orientation, pe_dir) == expected
| 1,883 | 34.54717 | 74 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/tests/__init__.py | 0 | 0 | 0 | py | |
fmriprep | fmriprep-master/fmriprep/interfaces/tests/test_maths.py | import nibabel as nb
import numpy as np
from nipype.pipeline import engine as pe
from fmriprep.interfaces.maths import Clip
def test_Clip(tmp_path):
in_file = str(tmp_path / "input.nii")
data = np.array([[[-1.0, 1.0], [-2.0, 2.0]]])
nb.Nifti1Image(data, np.eye(4)).to_filename(in_file)
threshold = pe.Node(Clip(in_file=in_file, minimum=0), name="threshold", base_dir=tmp_path)
ret = threshold.run()
assert ret.outputs.out_file == str(tmp_path / "threshold/input_clipped.nii")
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[0.0, 1.0], [0.0, 2.0]]])
threshold2 = pe.Node(Clip(in_file=in_file, minimum=-3), name="threshold2", base_dir=tmp_path)
ret = threshold2.run()
assert ret.outputs.out_file == in_file
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[-1.0, 1.0], [-2.0, 2.0]]])
clip = pe.Node(Clip(in_file=in_file, minimum=-1, maximum=1), name="clip", base_dir=tmp_path)
ret = clip.run()
assert ret.outputs.out_file == str(tmp_path / "clip/input_clipped.nii")
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[-1.0, 1.0], [-1.0, 1.0]]])
nonpositive = pe.Node(Clip(in_file=in_file, maximum=0), name="nonpositive", base_dir=tmp_path)
ret = nonpositive.run()
assert ret.outputs.out_file == str(tmp_path / "nonpositive/input_clipped.nii")
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[-1.0, 0.0], [-2.0, 0.0]]])
| 1,557 | 34.409091 | 98 | py |
fmriprep | fmriprep-master/fmriprep/interfaces/tests/data/__init__.py | 0 | 0 | 0 | py | |
vldbsubmission | vldbsubmission-master/python_src/SirApplication3data.py | from hgDecompose.utils import get_hg, writeHypergraphHg, writeHypergraph, get_random_hg,save_dictascsv
from hgDecompose.Hypergraph import Hypergraph
import random,os
import time
import argparse,pickle,copy
from hgDecompose.optimizedhgDecompose import HGDecompose
from matplotlib import pyplot as plt
import networkx as nx
# random.seed(10)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", type=str, default="default")
parser.add_argument("-a", "--algo", type=str, default="naive_nbr")
parser.add_argument("-n", "--num_delete", type=int, default=20, help="how many vertices are deleted")
parser.add_argument("-l", "--level", help="how many times innermost core is deleted", default=1, type=int)
args = parser.parse_args()
def del_innercore(H, diction):
max_core = max(diction.values())
remainder = {}
deleted = {}
for k,v in diction.items():
if v < max_core:
remainder[k] = v
else:
deleted[k] = v
deleted_list = list(deleted.keys())
print(len(deleted_list))
# quit()
# delete num_delete nodes only
num_delete_local = args.num_delete
if(num_delete_local > len(deleted_list)):
num_delete_local = len(deleted_list)
sampled_list = random.sample(deleted_list, len(deleted_list) - num_delete_local)
# print(sampled_list)
# quit()
for k in sampled_list:
remainder[k] = deleted[k]
return remainder
def gen_nested_hypergraph():
# Delete innermost core 10 times, each time operating on the hypergraph from previous iteration.
pathstring = "sirdata/"
output = {}
os.system('mkdir -p '+pathstring)
os.system("mkdir -p tests/tmp")
name = args.dataset
algoname = args.algo
level = int(args.level)
input_H = get_hg(name)
print(len(input_H.nodes()))
quit()
fname = "tests/tmp/" + name + "_" + algoname + ".pkl"
if(not os.path.isfile(fname)):
hgDecompose = HGDecompose()
if(args.algo == "naive_nbr"):
hgDecompose.naiveNBR(input_H, verbose=False)
if(args.algo == "naive_degree"):
hgDecompose.naiveDeg(input_H, verbose=False)
if(args.algo == "graph_core"):
G = input_H.get_clique_graph()
nx_G = nx.Graph()
# print("N: ",G.get_N())
# print("M: ",G.get_M())
# hgDecompose.naiveDeg(G, verbose=args.verbose)
for e in G.edge_iterator():
nx_G.add_edge(e[0],e[1])
hgDecompose.core = nx.core_number(nx_G)
core_base = hgDecompose.core
else:
with open(fname, 'rb') as handle:
hgDecompose = pickle.load(handle)
core_base = hgDecompose.core
output[0] = {}
output[0]['H'] = input_H
output[0]['core'] = core_base
# output_core_fname = "tests/tmp/" + name + "_" + algoname + "_"+str(0)+".csv"
# output_hg_fname = "tests/tmp/" + name + "_" + algoname + "_"+str(0)+".hyp"
# writeHypergraphHg(output[0]['H'],output_hg_fname)
# save_dictascsv(output[0]['core'],output_core_fname)
remainder_vertices = del_innercore(input_H, core_base)
for i in range(1,level):
print('i: ',i)
output[i] = {}
input_H = input_H.strong_subgraph(remainder_vertices)
_edgedict = {}
for eid,e in input_H.edge_eid_iterator():
_edgedict[eid] = e
output[i]['H'] = Hypergraph(_edgedict)
hgDecompose = HGDecompose()
if(args.algo == "naive_nbr"):
hgDecompose.naiveNBR(copy.deepcopy(output[i]['H']), verbose=False)
if(args.algo == "naive_degree"):
hgDecompose.naiveDeg(copy.deepcopy(output[i]['H']), verbose=False)
if(args.algo == "graph_core"):
G = copy.deepcopy(output[i]['H']).get_clique_graph()
nx_G = nx.Graph()
# print("N: ",G.get_N())
# print("M: ",G.get_M())
# hgDecompose.naiveDeg(G, verbose=args.verbose)
for e in G.edge_iterator():
nx_G.add_edge(e[0],e[1])
hgDecompose.core = nx.core_number(nx_G)
core_base = hgDecompose.core
output[i]['core'] = core_base
remainder_vertices = del_innercore(output[i]['H'], core_base)
# output_core_fname = "tests/tmp/" + name + "_" + algoname + "_"+str(i)+".csv"
# output_hg_fname = "tests/tmp/" + name + "_" + algoname + "_"+str(i)+".hyp"
# writeHypergraphHg(output[i]['H'],output_hg_fname)
# save_dictascsv(output[i]['core'],output_core_fname)
with open(os.path.join(pathstring,name+'_'+algoname + "_" + str(args.num_delete) +'.pkl'), 'wb') as handle:
pickle.dump(output, handle, protocol= 4)
gen_nested_hypergraph()
# pathstring = "data/datasets/sirdata/"
# args = parser.parse_args()
# name = args.dataset
# algoname = args.algo
# level = int(args.level)
# with open(os.path.join(pathstring,name+'_'+algoname+'.pkl'), 'rb') as handle:
# # with open(os.path.join(pathstring,name+'_'+algoname+'.pkl'), 'rb') as handle:
# output = pickle.load(handle)
# print(output.keys())
# print('H0: ',output[0])
# print('H1: ',output[1])
| 5,173 | 36.492754 | 111 | py |
vldbsubmission | vldbsubmission-master/python_src/run.py | # import sys
# sys.path.append("HyperNetX")
# import matplotlib.pyplot as plt
import networkx as nx
# import hypernetx as hnx
# from hgDecompose.hgDecompose import HGDecompose
# from hgDecompose.utils import get_hg_hnx
# from hgDecompose.newhgDecompose import HGDecompose
from hgDecompose.optimizedhgDecompose import HGDecompose
from hgDecompose.utils import get_hg,check_connectivity,writeHypergraphHg
from hgDecompose.influence_propagation import propagate_for_all_vertices, propagate_for_random_seeds, run_intervention_exp2,run_intervention_exp2_explain,run_intervention_exp2_explain_splen, propagate_for_all_vertices_for_kd
import argparse
import pandas as pd
import pickle
import os
from copy import deepcopy
# arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", type=str, default="default")
parser.add_argument("-a", "--algo", type=str, default="naive_nbr")
parser.add_argument("-v", "--verbose", action='store_true')
parser.add_argument("--max_propagation_time", help="number of iterations in sir", default=100, type=int)
parser.add_argument("--iterations", help="number of iterations", default=1, type=int)
parser.add_argument("-sir", "--sir", default=0, type=int)
parser.add_argument("-sir_kd", "--sir_kd", default=0, type=int)
parser.add_argument("-sir_exp2", "--sir_exp2", default=0, type=int)
parser.add_argument("-sir_exp3", "--sir_exp3", default=0, type=int) # intervention
parser.add_argument("-n", "--num_delete", type=int, default=20, help="how many vertices are deleted")
parser.add_argument("-sir_exp3_explanation", "--sir_exp3_explanation", default=0, type=int)
parser.add_argument("-sir_exp3_explanation_splen", "--sir_exp3_explanation_splen", default=0, type=int)
parser.add_argument("-p", "--prob", help="parameter for Probability", default= 0.3, type=float)
parser.add_argument("-g", "--gamma", help="parameter for Probability", default= 0.01, type=float)
args = parser.parse_args()
# Pandemic propagation
if(args.sir or args.sir_kd or args.sir_exp2 or args.sir_exp3 or args.sir_exp3_explanation or args.sir_exp3_explanation_splen):
input_H = get_hg(args.dataset)
H = deepcopy(input_H)
assert H is not None
# Loading/saving to file
os.system("mkdir -p tests/tmp")
os.system("mkdir -p ../output/")
fname = "tests/tmp/" + args.dataset + "_" + args.algo + ".pkl"
if(not os.path.isfile(fname)):
hgDecompose = HGDecompose()
if(args.algo == "naive_nbr"):
hgDecompose.naiveNBR(input_H, verbose=args.verbose)
elif(args.algo == "naive_degree"):
hgDecompose.naiveDeg(input_H, verbose=args.verbose)
elif(args.algo == "graph_core"):
G = H.get_clique_graph()
nx_G = nx.Graph()
# print("N: ",G.get_N())
# print("M: ",G.get_M())
# hgDecompose.naiveDeg(G, verbose=args.verbose)
for e in G.edge_iterator():
nx_G.add_edge(e[0],e[1])
hgDecompose.core = nx.core_number(nx_G)
else:
raise RuntimeError(args.algo + " is not defined or implemented yet")
core_base = hgDecompose.core
# print(core_base)
# dump file
with open(fname, 'wb') as handle:
pickle.dump(hgDecompose, handle, protocol= 4)
else:
# print("Retrieving saved file")
with open(fname, 'rb') as handle:
hgDecompose = pickle.load(handle)
core_base = hgDecompose.core
# quit()
# print(core_base)
entry = {}
entry['dataset'] = args.dataset
entry['p'] = float(args.prob)
entry['algo'] = args.algo
assert not (args.sir and args.sir_exp2)
if(args.sir):
entry['exp'] = "sir"
elif(args.sir_kd):
entry['exp'] = "sir_kd"
elif(args.sir_exp2):
entry['exp'] = "sir_exp2"
elif(args.sir_exp3):
entry['exp'] = "sir_exp3"
elif(args.sir_exp3_explanation):
entry['exp'] = 'sir_exp3_explanation'
elif(args.sir_exp3_explanation_splen):
entry['exp'] = 'sir_exp3_explanation_splen'
else:
raise NotImplementedError()
entry['result'] = None
entry['timestep_results'] = None
entry['intervention_results'] = None
entry['max propagation time'] = None
if(args.sir):
entry['result'] = propagate_for_all_vertices(H, core_base, num_iterations=args.max_propagation_time, p = float(args.prob), verbose=args.verbose)
entry['max propagation time'] = args.max_propagation_time
# import pprint
# pprint.pprint(entry['result'])
result = pd.DataFrame()
result = result.append(entry, ignore_index=True)
result.to_csv('../output/propagation_result.csv', header=False,
index=False, mode='a')
elif(args.sir_exp2):
entry['timestep_results'] = propagate_for_random_seeds(H, core_base, p = float(args.prob), verbose=args.verbose)
result = pd.DataFrame()
result = result.append(entry, ignore_index=True)
result.to_csv('../output/propagation_result.csv', header=False,
index=False, mode='a')
elif(args.sir_exp3):
# entry['result'], entry['timestep_results'] = propagate_for_random_seeds(H, core_base, p = float(args.prob), verbose=args.verbose)
# entry['intervention_results'] = run_intervention_exp(H, core_base, p = float(args.prob),verbose = args.verbose)
# entry['intervention_results'] = run_intervention_exp2(args.dataset+"_"+args.algo, original_n = len(H.nodes()), p = float(args.prob),verbose = args.verbose)
entry['intervention_results'] = run_intervention_exp2(args.dataset+"_"+args.algo + "_" + str(args.num_delete), original_n = None, p = float(args.prob),verbose = args.verbose)
import pprint
pprint.pprint(entry['intervention_results'])
entry['num delete'] = args.num_delete
result = pd.DataFrame()
result = result.append(entry, ignore_index=True)
result.to_csv('../output/propagation_result_recursive_delinner_'+args.dataset+"_"+args.algo+'3.csv', header=False,
index=False, mode='a')
elif(args.sir_exp3_explanation):
run_intervention_exp2_explain(args.dataset+"_"+args.algo,verbose = args.verbose)
quit()
elif(args.sir_exp3_explanation_splen):
run_intervention_exp2_explain_splen(args.dataset+"_"+args.algo,verbose = args.verbose)
quit()
elif(args.sir_kd):
import pandas as pd
kd_result = pd.read_csv("../data/kdcore_" + args.dataset + ".csv", header=None)
kd_result.columns = ['vertex', 'k', 'd']
# kd_result.sort_values(by=['k', 'd'], ascending=False, inplace=True)
kd = {} # a dictionary, where key = (k, d) and value is a list of vertex belonging to that (k, d) core
for key, item in kd_result.groupby(['k', 'd'], as_index=False):
# print(item[''])
assert len(item['vertex'].unique()) == item.shape[0]
kd[key] = list(item['vertex'].unique())
entry['result'] = propagate_for_all_vertices_for_kd(H, kd, num_iterations=args.max_propagation_time, p = float(args.prob), verbose=args.verbose)
entry['max propagation time'] = args.max_propagation_time
result = pd.DataFrame()
result = result.append(entry, ignore_index=True)
result.to_csv('../output/propagation_result.csv', header=False,
index=False, mode='a')
result = pd.DataFrame()
result = result.append(entry, ignore_index=True)
if(args.verbose):
print(entry)
print("\n")
print(", ".join(["\'" + column + "\'" for column in result.columns.tolist()]))
print(result)
# result.to_csv('data/output/propagation_result_exp3.csv', header=False,
# index=False, mode='a')
# result.to_csv('data/output/propagation_result_topk_exp3.csv', header=False,
# index=False, mode='a')
print(", ".join(["\'" + column + "\'" for column in result.columns.tolist()]))
# result.to_csv('data/output/propagation_result_topkpercent_exp3.csv', header=False,
# index=False, mode='a')
# result.to_csv('data/output/propagation_result_recursive_delinner_'+args.dataset+"_"+args.algo+'3.csv', header=False,
# index=False, mode='a')
quit()
| 8,416 | 45.761111 | 224 | py |
vldbsubmission | vldbsubmission-master/python_src/hgDecompose/BasicHypergraph.py | import math
import itertools
import random
class Hypergraph:
"""
Our own hypergraph representation class.
We store hyperedge list in compressed format using two things- 1) e_indices (a dict) 2) e_nodes (a list)
Although edge-centric queries (e.g. edge enumeration) are facilitated in this way, node-centric queries are not convenient.
To support node-centric queries, we also maintain incidence dictionary inc_dict (key = v_ids, values = incident edge ids)
"""
def __init__(self, _edgedict=None):
self.e_indices = {} # (position, position+edge_size) of edge e in e_nodes list
self.e_nodes = [] # flattened edge list
self.inc_dict = {} # key: nodeid, value = ids of incident edges (set)
# degree pre-compute => degree_dict or len_incedge = {}
self.degree_dict = {}
self.init_nbrsize = {} # initial nbrhood sizes. can be precomputed.
self.init_nbr = {}
self.init_eids = {}
self.init_nodes = []
if _edgedict is None or len(_edgedict)==0: # Returns an empty Hypergraph
return
self.i = 0
j = 0
for e_id, e in _edgedict.items():
j+= 1
if j%50000 == 0:
print(j)
_len = len(e)
self.e_indices[e_id] = (self.i, self.i + _len)
self.init_eids[e_id] = (self.i, self.i + _len)
for v in e:
self.e_nodes.append(v)
if v not in self.inc_dict:
self.inc_dict[v] = set() # create incident edge entry for v
self.init_nodes.append(v)
self.inc_dict[v].add(e_id) # incident edge update
self.degree_dict[v] = self.degree_dict.get(v, 0) + 1 # degree update
nbr_v = self.init_nbr.get(v, set()).union(e)
nbr_v.remove(v)
self.init_nbrsize[v] = len(nbr_v) # neighbourhood length update
self.init_nbr[v] = nbr_v # neighbourbood set update
self.i += _len
self.init_nodes = sorted(self.init_nodes)
print('done')
def get_init_nbr(self, v):
return self.init_nbr[v]
def get_init_nbrlen(self, v):
return self.init_nbrsize[v]
def add_edge(self, e_id, e_nodes):
""" Add an edge to the hypergraph. It does not check repeated edge."""
_len = len(e_nodes)
self.e_indices[e_id] = (self.i, self.i + _len)
for v in e_nodes:
self.e_nodes.append(v)
if v not in self.inc_dict:
self.inc_dict[v] = set() # create incident edge entry for v
self.init_nodes.append(v)
self.inc_dict[v].add(e_id) # incident edge update
self.degree_dict[v] = self.degree_dict.get(v, 0) + 1 # degree update
self.i += _len
def hasEdge(self, e_nodes):
Exists = False
for v in e_nodes:
if v in self.inc_dict:
for e_id in self.inc_dict[v]:
if tuple(self.get_edge_byindex(e_id)) == e_nodes:
Exists = True
break
if Exists:
break
return Exists
def sample_v_preferential_attachment(self, num_sample):
"""
Sample num_sample vertices following degree distribution.
Algorithm: Reservoir sampling (WeightedReservoir-Chao wikipedia: https://en.wikipedia.org/wiki/Reservoir_sampling)
"""
# WeightedReservoir-Chao (S[1..n], R[1..k])
WSum = 0
R = []
#fill the reservoir array
k = 0
n = len(self.init_nodes)
# print(self.init_nodes,' num_sample: ',num_sample)
for i in range(num_sample):
R.append(self.init_nodes[i])
WSum = WSum + self.degree(self.init_nodes[i])
k+=1
# print(R)
for i in range(k, n):
# print(i,'-', R)
WSum = WSum + self.degree(self.init_nodes[i])
p = self.degree(self.init_nodes[i])*1.0 / WSum # probability for this item
j = random.random(); # uniformly random between 0 and 1
if j <= p: # select item according to probability
R[random.randint(0,k-1)] = self.init_nodes[i] #uniform selection in reservoir for replacement
return R
def get_edge_byindex(self, e_id):
""" Return edge by edge_id """
e_start, e_end = self.e_indices[e_id]
return self.e_nodes[e_start:e_end]
def edge_iterator(self):
""" returns: iterator """
for e_id in self.e_indices.keys():
yield self.get_edge_byindex(e_id)
def edge_eid_iterator(self):
""" returns: iterator """
for e_id in self.e_indices.keys():
yield (e_id, self.get_edge_byindex(e_id))
def init_node_iterator(self):
"""
Returns: iterator of initial nodes.
Faster than node_iterator() and deterministic.
"""
for v in self.init_nodes:
yield v
def node_iterator(self):
""" returns: iterator """
for v_id in self.inc_dict.keys():
yield v_id
def nodes(self):
""" returns: list of vertices """
return [v for v in self.node_iterator()]
def edges(self):
""" returns: list of edges (each edge is a list of vertex ids) """
return [e for e in self.edge_iterator()]
def degree(self, u):
""" returns: integer """
# assert (len(self.inc_dict.get(u,[])) == self.degree_dict[u])
return self.degree_dict.get(u, 0)
def dim(self, e):
""" returns: integer """
return len(e) - 1
def neighbors(self, v):
return [u for u in self.neighbors_iterator(v)]
def get_number_of_nbrs(self, u):
return len(self.neighbors(u))
def neighbors_iterator(self, v):
""" Returns the set of neighbours of v.
implements a traversal from vertex v to each of its neighbours in contrast to set in neighbors().
It also returns an iterator. So it avoids creating the neighborhood list explicitely.
Overall complexity: O(d(v) * |e_max|), where e_max = largest hyperedge
"""
incident_edges = self.inc_dict.get(v, None) # {O(1)}
if incident_edges:
visited_dict = {}
for e_id in incident_edges: # { O(d(v)) }
for u in self.get_edge_byindex(e_id): # { O(|e|)}
if u != v:
if not visited_dict.get(u, False):
visited_dict[u] = True
yield u
else:
return
def removeV_transform(self, v, verbose=False):
""" removes input vertex v and transforms this hypergraph into a sub-hypergraph strongly induced by V\{v}
Here we do not maintain nbr and len_nbr dictionaries.
"""
incident_eids = set() # set of edge_ids incident on v
for e_id in self.inc_dict.get(v, []):
incident_eids.add(e_id)
if verbose:
print("incident edges on ",v," : ", incident_eids)
# Update incident edges and degree of every nbr of v
for u in self.neighbors_iterator(v): # traverse over neighbours of v
if verbose:
print('traversing nbr: ',u)
self.inc_dict[u] -= incident_eids # remove v's incident edges from u's incident edges.
self.degree_dict[u] = len(self.inc_dict.get(u, []))
if v in self.inc_dict:
del self.inc_dict[v]
if v in self.degree_dict:
del self.degree_dict[v]
# TODO
# def addV_transform(self, S):
# pass
# prev_v = prev_v.union(S)
# for every edge e:
# if e \subset prev_v:
# if e.id not already exist:
# add (e)
def adV_transform(self, S):
"""
S: is a set of vertices
We assume the current hypergraph is already a strong subgraph. meaning the inc_dict and e_indices are maintained.
"""
self.prev_V = self.prev_V.union(S)
for e in self.init_eids: # But this will not give me all the edge_id's
start_e,end_e = self.init_eids[e]
edge = self.e_nodes[start_e:end_e]
if set(edge).issubset(self.prev_V):
self.e_indices[e] = self.init_eids[e]
for u in edge:
self.inc_dict[u].add(e)
def strong_subgraph(self, vertex_list):
""" returns: Hypergraph object. """
H = Hypergraph()
e_indices = {} # (position, edge_size) of edge e in e_nodes list
e_nodes = [] # flattened edge list
inc_dict = {}
H.i = 0
# print('inc_dict: ',self.inc_dict.items())
# print('e_indices: ',self.e_indices.items())
# print('e_nodes: ',self.e_nodes)
for e_id in self.e_indices:
e = self.get_edge_byindex(e_id)
flag = True
for u in e:
if u not in vertex_list:
flag = False
break
if flag:
e_nodes += e
_lene = len(e)
e_indices[e_id] = (H.i, H.i + _lene)
for v in e:
if v not in inc_dict:
inc_dict[v] = set()
if e_id not in inc_dict[v]:
inc_dict[v].add(e_id)
H.degree_dict[v] = H.degree_dict.get(v,0) + 1
H.i += _lene
# else:
# inc_dict[v] = inc_dict.get(v, []) + [e_id]
# print('After: ','inc_dict = ',inc_dict.items(),'\n','e_indices = ',e_indices,'\n',' e_nodes = ',e_nodes)
H.e_nodes = e_nodes
H.inc_dict = inc_dict
H.e_indices = e_indices
return H
def get_hnx_format(self):
import sys
sys.path.append("HyperNetX")
import hypernetx as hnx
_tempH = {}
for e_id in self.e_indices.keys():
_tempH[e_id] = self.get_edge_byindex(e_id)
return hnx.Hypergraph(_tempH)
def get_clique_graph(self):
binary_edges = set()
for e_id in self.e_indices.keys():
e = self.get_edge_byindex(e_id)
# print(e)
if len(e) < 2:
continue
elif len(e) == 2:
y = tuple(sorted(e))
if y not in binary_edges:
binary_edges.add(y)
# else:
# print('not adding ', y)
else:
for x in itertools.combinations(e,2):
y = tuple(sorted(x))
if y not in binary_edges:
binary_edges.add(y)
# else:
# print('(comb) not adding ', y)
scenes = {}
for i, edge in enumerate(binary_edges):
scenes[i] = list(edge)
print(scenes)
return Hypergraph(scenes)
# def weak_subgraph(self, vertex_list):
# """ returns: Hypergraph object. """
# pass
def get_N(self):
""" Return num of vertices """
return len(self.inc_dict)
def get_M(self):
""" Return num of edges """
return len(self.e_indices)
def get_degree_sequence(self):
""" Return the degree sequence in descending order """
degs = []
for v in self.degree_dict:
degs.append(self.degree_dict[v])
return sorted(degs,reverse = True)
def get_degree_distr(self):
""" Return the degree distribution """
degs = {}
N = self.get_N()
for d in self.degree_dict.values():
degs[d] = degs.get(d,0)+ (1.0/N)
return sorted(degs.items(),reverse = True)
# return sorted(degs.items(), reverse = True, key = lambda x: x[1])
def get_dim_sequence(self):
""" Return the dimension sequence in descending order """
dims = []
for e_start,e_end in self.e_indices.values():
dims.append(e_end - e_start)
return sorted(dims,reverse = True)
def get_dim_distr(self):
""" Return the dimension distribution """
assert isinstance(H, hnx.Hypergraph)
dims = {}
M = self.get_M()
for _dim in self.get_dim_sequence():
dims[_dim] = dims.get(_dim,0)+ (1.0/M)
return sorted(dims.items(),reverse = True)
def get_nbr_sequence(self):
""" Return the sequence nbrhood sizes in descending order """
return sorted(self.init_nbrsize.values(),reverse = True)
def get_nbr_distr(self):
""" Return the distribution of nbr sizes """
nbrs = {}
N = self.get_N()
for nbr in self.init_nbrsize.values():
nbrs[nbr] = nbrs.get(nbr,0) + (1.0/N)
return sorted(nbrs,reverse = True)
def get_degree_stats(self):
""" Return the stats of degrees. """
import pandas as pd
deg_seq = self.get_degree_sequence()
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(deg_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def get_dim_stats(self):
""" Return the stats of dimensions. """
import pandas as pd
dim_seq = self.get_dim_sequence()
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(dim_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def get_nbr_stats(self):
""" Return the stats of neighbourhoods. """
import pandas as pd
nbr_seq = self.get_nbr_sequence()
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(nbr_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def WriteDegreeDist(self, filename = "deg_dist"):
import pickle
with open(filename+'.pickle','wb') as wf:
dict_tosave = self.get_degree_distr()
pickle.dump(dict_tosave, wf, protocol=pickle.HIGHEST_PROTOCOL)
def __str__(self):
return ",".join([str(i) for i in self.edge_iterator()]) | 15,113 | 35.773723 | 127 | py |
vldbsubmission | vldbsubmission-master/python_src/hgDecompose/Hypergraph.py | import math
import itertools
import random
# from typing import final
class Hypergraph:
"""
Our own hypergraph representation class.
We store hyperedge list in compressed format using two things- 1) e_indices (a dict) 2) e_nodes (a list)
Although edge-centric queries (e.g. edge enumeration) are facilitated in this way, node-centric queries are not convenient.
To support node-centric queries, we also maintain incidence dictionary inc_dict (key = v_ids, values = incident edge ids)
"""
def __init__(self, _edgedict=None):
self.e_indices = {} # (position, position+edge_size) of edge e in e_nodes list
self.e_nodes = [] # flattened edge list
self.inc_dict = {} # key: nodeid, value = ids of incident edges (set)
# degree pre-compute => degree_dict or len_incedge = {}
self.degree_dict = {}
self.init_nbrsize = {} # initial nbrhood sizes. can be precomputed.
self.init_nbr = {}
self.init_eids = {}
self.init_nodes = []
if _edgedict is None or len(_edgedict)==0: # Returns an empty Hypergraph
return
self.i = 0
for e_id, e in _edgedict.items():
_len = len(e)
self.e_indices[e_id] = (self.i, self.i + _len)
self.init_eids[e_id] = (self.i, self.i + _len)
for v in e:
self.e_nodes.append(v)
if v not in self.inc_dict:
self.inc_dict[v] = set() # create incident edge entry for v
self.init_nodes.append(v)
self.inc_dict[v].add(e_id) # incident edge update
self.degree_dict[v] = self.degree_dict.get(v, 0) + 1 # degree update
nbr_v = self.init_nbr.get(v, set()).union(e)
nbr_v.remove(v)
self.init_nbrsize[v] = len(nbr_v) # neighbourhood length update
self.init_nbr[v] = nbr_v # neighbourbood set update
self.i += _len
self.init_nodes = sorted(self.init_nodes)
# self.compute_Bounds()
self.compute_Bounds2()
def compute_Bounds2(self):
""" Computes Local Upper Bound differently from compute_Bounds() """
# print('Global-local ub')
# Computing global upper and lower bounds
self.glb = math.inf
self.gub = -math.inf
# Computing local lower bounds
# self.precomputedlb2 = {}
# # Computing local upper bounds
self.lub = {}
self.sorted_ub_set = set() # || => upper bound for param_s
for v in self.node_iterator():
len_neighbors_v = self.init_nbrsize[v]
self.glb = min(self.glb,len_neighbors_v)
self.gub = max(self.gub, len_neighbors_v)
self.lub[v] = len_neighbors_v
self.sorted_ub_set.add(len_neighbors_v)
# ## print('---- ',self.glb, self.gub)
# for k in range(self.glb, self.gub+1):
# while len(_bucket.get(k, [])) != 0:
# v = _bucket[k].pop()
# self.lub[v] = k
# self.sorted_ub_set.add(k) # add computed local upper bound to ub_set
# for u in self.init_nbr[v]:
# if u not in self.lub:
# max_value = max(_inv_bucket[u] - 1, k)
# _bucket[_inv_bucket[u]].remove(u)
# if(max_value not in _bucket):
# _bucket[max_value] = set()
# _bucket[max_value].add(u)
# _inv_bucket[u] = max_value
self.llb = {}
_min_llb = math.inf
# Local lower bound computation
for v in self.node_iterator():
_max = -math.inf
for e_id in self.inc_dict[v]:
_max = max(_max, len(self.get_edge_byindex(e_id)) - 1)
self.llb[v] = max(_max, self.glb)
_min_llb = min(_min_llb, self.llb[v])
self.sorted_ub_set.add(_min_llb - 1)
self.sorted_ub_set = sorted(list(self.sorted_ub_set), reverse=True)
def compute_Bounds(self):
# print('Global-local ub')
# Computing global upper and lower bounds
self.glb = math.inf
self.gub = -math.inf
# Computing local lower bounds
# self.precomputedlb2 = {}
# # Computing local upper bounds
self.lub = {}
# Auxiliary variables to assist computation of ub2
_inv_bucket = {}
_bucket = {}
self.sorted_ub_set = set() # || => upper bound for param_s
for v in self.node_iterator():
len_neighbors_v = self.init_nbrsize[v]
self.glb = min(self.glb,len_neighbors_v)
self.gub = max(self.gub, len_neighbors_v)
_inv_bucket[v] = len_neighbors_v
## print(node, neighbors)
if len_neighbors_v not in _bucket:
_bucket[len_neighbors_v] = set()
_bucket[len_neighbors_v].add(v)
## print('---- ',self.glb, self.gub)
for k in range(self.glb, self.gub+1):
while len(_bucket.get(k, [])) != 0:
v = _bucket[k].pop()
self.lub[v] = k
self.sorted_ub_set.add(k) # add computed local upper bound to ub_set
for u in self.init_nbr[v]:
if u not in self.lub:
max_value = max(_inv_bucket[u] - 1, k)
_bucket[_inv_bucket[u]].remove(u)
if(max_value not in _bucket):
_bucket[max_value] = set()
_bucket[max_value].add(u)
_inv_bucket[u] = max_value
self.llb = {}
_min_llb = math.inf
# Local lower bound computation
for v in self.node_iterator():
_max = -math.inf
for e_id in self.inc_dict[v]:
_max = max(_max, len(self.get_edge_byindex(e_id)) - 1)
self.llb[v] = max(_max, self.glb)
_min_llb = min(_min_llb, self.llb[v])
self.sorted_ub_set.add(_min_llb - 1)
self.sorted_ub_set = sorted(list(self.sorted_ub_set), reverse=True)
del _bucket
del _inv_bucket
def get_init_nbr(self, v):
return self.init_nbr[v]
def get_init_nbrlen(self, v):
return self.init_nbrsize[v]
def add_edge(self, e_id, e_nodes):
""" Add an edge to the hypergraph. It does not check repeated edge."""
_len = len(e_nodes)
self.e_indices[e_id] = (self.i, self.i + _len)
for v in e_nodes:
self.e_nodes.append(v)
if v not in self.inc_dict:
self.inc_dict[v] = set() # create incident edge entry for v
self.init_nodes.append(v)
self.inc_dict[v].add(e_id) # incident edge update
self.degree_dict[v] = self.degree_dict.get(v, 0) + 1 # degree update
self.i += _len
def get_stronglyinduced_edgeIds(self, vertex_list):
""" Returns the list of edge_ids strongly induced by vertex_list"""
temp_H = self.strong_subgraph(vertex_list)
assert isinstance(temp_H, Hypergraph)
return [e_id for e_id in temp_H.e_indices.keys()]
def del_edge(self, e_id):
""" Delete an edge given a set of nodes e."""
if e_id not in self.e_indices: # If edge e_id does not exist in the hypergraph, do nothing.
return
for v in self.get_edge_byindex(e_id):
self.inc_dict[v].remove(e_id) # Remove e_id from v's incident edge list.
if len(self.inc_dict[v])==0: # After deleting e_id there is no edge incident on v anymore. Then del v
del self.inc_dict[v]
self.degree_dict[v] -= 1 # decrease degree of v by 1
del self.e_indices[e_id]
def hasEdge(self, e_nodes):
Exists = False
for v in e_nodes:
if v in self.inc_dict:
for e_id in self.inc_dict[v]:
if tuple(self.get_edge_byindex(e_id)) == e_nodes:
Exists = True
break
if Exists:
break
return Exists
def sample_v_preferential_attachment(self, num_sample):
"""
Sample num_sample vertices following degree distribution.
Algorithm: Reservoir sampling (WeightedReservoir-Chao wikipedia: https://en.wikipedia.org/wiki/Reservoir_sampling)
"""
# WeightedReservoir-Chao (S[1..n], R[1..k])
WSum = 0
R = []
#fill the reservoir array
k = 0
n = len(self.init_nodes)
# print(self.init_nodes,' num_sample: ',num_sample)
for i in range(num_sample):
R.append(self.init_nodes[i])
WSum = WSum + self.degree(self.init_nodes[i])
k+=1
# print(R)
for i in range(k, n):
# print(i,'-', R)
WSum = WSum + self.degree(self.init_nodes[i])
p = self.degree(self.init_nodes[i])*1.0 / WSum # probability for this item
j = random.random(); # uniformly random between 0 and 1
if j <= p: # select item according to probability
R[random.randint(0,k-1)] = self.init_nodes[i] #uniform selection in reservoir for replacement
return R
def get_edge_byindex(self, e_id):
""" Return edge by edge_id """
e_start, e_end = self.e_indices[e_id]
return self.e_nodes[e_start:e_end]
def edge_iterator(self):
""" returns: iterator """
for e_id in self.e_indices.keys():
yield self.get_edge_byindex(e_id)
def edge_eid_iterator(self):
""" returns: iterator """
for e_id in self.e_indices.keys():
yield (e_id, self.get_edge_byindex(e_id))
def init_node_iterator(self):
"""
Returns: iterator of initial nodes.
Faster than node_iterator() and deterministic.
"""
for v in self.init_nodes:
yield v
def node_iterator(self):
""" returns: iterator """
for v_id in self.inc_dict.keys():
yield v_id
def nodes(self):
""" returns: list of vertices """
return [v for v in self.node_iterator()]
def edges(self):
""" returns: list of edges (each edge is a list of vertex ids) """
return [e for e in self.edge_iterator()]
def degree(self, u):
""" returns: integer """
# assert (len(self.inc_dict.get(u,[])) == self.degree_dict[u])
return self.degree_dict.get(u, 0)
def dim(self, e):
""" returns: integer """
return len(e) - 1
def neighbors(self, v):
return [u for u in self.neighbors_iterator(v)]
def get_number_of_nbrs(self, u):
return len(self.neighbors(u))
def compute_degree(self, u):
return len(self.inc_dict[u])
def degree_density(self):
# sum_deg = 0
# for u in self.inc_dict:
# sum_deg += self.compute_degree(u)
# return sum_deg/self.get_N()
return self.get_M()*1.0 / self.get_N()
def neighbors_iterator(self, v):
""" Returns the set of neighbours of v.
implements a traversal from vertex v to each of its neighbours in contrast to set in neighbors().
It also returns an iterator. So it avoids creating the neighborhood list explicitely.
Overall complexity: O(d(v) * |e_max|), where e_max = largest hyperedge
"""
incident_edges = self.inc_dict.get(v, None) # {O(1)}
if incident_edges:
visited_dict = {}
for e_id in incident_edges: # { O(d(v)) }
for u in self.get_edge_byindex(e_id): # { O(|e|)}
if u != v:
if not visited_dict.get(u, False):
visited_dict[u] = True
yield u
else:
return
def removeV_transform(self, v, verbose=False):
""" removes input vertex v and transforms this hypergraph into a sub-hypergraph strongly induced by V\{v}
Here we do not maintain nbr and len_nbr dictionaries.
"""
incident_eids = set() # set of edge_ids incident on v
for e_id in self.inc_dict.get(v, []):
incident_eids.add(e_id)
if verbose:
print("incident edges on ",v," : ", incident_eids)
# Update incident edges and degree of every nbr of v
for u in self.neighbors_iterator(v): # traverse over neighbours of v
if verbose:
print('traversing nbr: ',u)
self.inc_dict[u] -= incident_eids # remove v's incident edges from u's incident edges.
self.degree_dict[u] = len(self.inc_dict.get(u, []))
if v in self.inc_dict:
del self.inc_dict[v]
if v in self.degree_dict:
del self.degree_dict[v]
# TODO
# def addV_transform(self, S):
# pass
# prev_v = prev_v.union(S)
# for every edge e:
# if e \subset prev_v:
# if e.id not already exist:
# add (e)
def adV_transform(self, S):
"""
S: is a set of vertices
We assume the current hypergraph is already a strong subgraph. meaning the inc_dict and e_indices are maintained.
"""
self.prev_V = self.prev_V.union(S)
for e in self.init_eids: # But this will not give me all the edge_id's
start_e,end_e = self.init_eids[e]
edge = self.e_nodes[start_e:end_e]
if set(edge).issubset(self.prev_V):
self.e_indices[e] = self.init_eids[e]
for u in edge:
self.inc_dict[u].add(e)
# def removeV_transform(self, v, verbose=False):
# """ removes input vertex v and transforms this hypergraph into a sub-hypergraph strongly induced by V\{v} """
#
# # inc_on_v = self.inc_dict.get(v,[]) # List of incident edges(v) { o(1) }
# # if len(inc_on_v): # If 0 incident edges, the hypergraph won't need any update.
# # for e_id in inc_on_v: # For every edges incident on v { O(d(v)) }
# # spos_e, epos_e = self.e_indices[e_id] # { o(1) }
# # del self.e_indices[e_id] # Delete that edge (to be precise, we remove its index only) { o(1) }
# # # Check if the neighbours of v in e needs to be removed as well.
# # for nbrv_in_e in self.e_nodes[spos_e:epos_e]: # For every u in e { O(|e_max|) < O(N) }
# # if nbrv_in_e in self.inc_dict:
# # if len(self.inc_dict[nbrv_in_e]) <= 1: # If u has no edge except e incident on it
# # del self.inc_dict[nbrv_in_e] # Remove u
# # else:
# # self.inc_dict[nbrv_in_e].remove(e_id) # Otherwise, update the incidence entry of u
#
# incident_edges = self.inc_dict.get(v, None) # {O(1)}
# if (verbose):
# print(incident_edges)
# if incident_edges:
# visited_dict = {}
# # isolated_vertices = set([v]) # the set of nbrs of v who became isolated after removal of v. They can not be in any edge.
# for e_id in incident_edges: # For every incident edge e { O(d(v)) }
# if (verbose):
# print('incident edge: ', e_id)
# if e_id in self.e_indices:
# e = self.get_edge_byindex(e_id)
# for u in e: # For every node u \in e { O(|e|)}
# if u != v: # If u != v
# if (verbose):
# print('visiting ', u)
# if not visited_dict.get(u, False): # { O(1) }
# visited_dict[u] = True # { O(1) }
# # self.nbr[u].remove(v) # v is no more a neighbour of u, because u will be deleted
# # if len(self.inc_dict[u]) <= 1: # If u has no edge except e incident on it {O (d(nbr(v)))}
# # So a neighbour of the vertex we want to delete has degree = 1
# if self.degree_dict[u] <= 1:
# if (verbose):
# print('removing incident edge entry of ', u)
# del self.inc_dict[u] # Remove u, because its deg became 0. { O(1) }
#
# else:
# if (verbose):
# print('removing ', e_id, ' from ', u, ' \'s incident edges')
# self.inc_dict[u].remove(
# e_id) # Otherwise, update the incidence entry of u { O (1) }
#
# self.degree_dict[u] -= 1 # degree of u decreases by 1
# if self.degree_dict[u] == 0:
# if (verbose):
# print(u, ' : a 0-degree vertex')
# # isolated_vertices.add(u) # keep track of isolated vertices.
# # self.nbr[v].remove(u) # keep track of non-isolated vertices.
# del self.nbr[u]
# del self.len_nbr_dict[u]
# del self.degree_dict[u]
# set_e = set(e)
# for u in self.nbr[v]:
# if u in self.nbr:
# self.nbr[u] -= set_e # Remove e from nbrs of v.
# if u in self.len_nbr_dict:
# self.len_nbr_dict[u] = len(self.nbr.get(u, set()))
#
# # for u in isolated_vertices:
# # del self.degree_dict[u]
#
# if (verbose):
# print('deleting ', e_id)
# del self.e_indices[e_id]
#
# del self.inc_dict[v]
# del self.nbr[v]
# del self.len_nbr_dict[v]
# del self.degree_dict[v]
# def strong_subgraph(self, vertex_list):
# """ returns: Hypergraph object. """
# H = Hypergraph()
# e_indices = {} # (position, edge_size) of edge e in e_nodes list
# e_nodes = [] # flattened edge list
# inc_dict = {}
# H.i = 0
#
# # print('inc_dict: ',self.inc_dict.items())
# # print('e_indices: ',self.e_indices.items())
# # print('e_nodes: ',self.e_nodes)
#
# for v in vertex_list:
# for e_id in self.inc_dict.get(v, []):
# if e_id not in e_indices:
# e_start, e_end = self.e_indices[e_id]
# flag = True
# e = self.e_nodes[e_start:e_end]
# for u in e:
# if u not in vertex_list:
# flag = False
# break
# if flag:
# e_nodes += e
# _lene = (e_end - e_start)
# e_indices[e_id] = (H.i, H.i + _lene)
# inc_dict[v] = inc_dict.get(v, []) + [e_id]
# H.i += _lene
# else:
# inc_dict[v] = inc_dict.get(v, []) + [e_id]
#
# # print('After: ','inc_dict = ',inc_dict.items(),'\n','e_indices = ',e_indices,'\n',' e_nodes = ',e_nodes)
# H.e_nodes = e_nodes
# H.inc_dict = inc_dict
# H.e_indices = e_indices
# return H
#
# def strong_subgraph2(self, vertex_list):
# """
# Another implementation of strong_subgraph
# returns: Hypergraph object.
# """
# H = Hypergraph()
# e_indices = {} # (position, edge_size) of edge e in e_nodes list
# e_nodes = [] # flattened edge list
# inc_dict = {}
# H.i = 0
#
# # print('inc_dict: ',self.inc_dict.items())
# # print('e_indices: ',self.e_indices.items())
# # print('e_nodes: ',self.e_nodes)
#
# # for v in vertex_list:
# # for e_id in self.inc_dict.get(v,[]):
# # if e_id not in e_indices:
# # e_start, e_end = self.e_indices[e_id]
# # flag = True
# # e = self.e_nodes[e_start:e_end]
# # for u in e:
# # if u not in vertex_list:
# # flag = False
# # break
# # if flag:
# # e_nodes += e
# # _lene = (e_end - e_start)
# # e_indices[e_id] = (H.i, H.i+ _lene)
# # inc_dict[v] = inc_dict.get(v,[])+[e_id]
# # H.i+= _lene
# # else:
# # inc_dict[v] = inc_dict.get(v,[])+[e_id]
#
# for e_id in self.e_indices:
# flag = True
# e = self.get_edge_byindex(e_id)
# for u in e:
# if u not in vertex_list:
# flag = False
# break
# if flag:
# e_start, e_end = self.e_indices[e_id]
# e_nodes += e
# _lene = (e_end - e_start)
# e_indices[e_id] = (H.i, H.i + _lene)
# H.i += _lene
#
# for e_id in e_indices:
# e = self.get_edge_byindex(e_id)
# for u in e:
# inc_dict[u] = inc_dict.get(u, []) + [e_id]
#
# # print('After: ','inc_dict = ',inc_dict.items(),'\n','e_indices = ',e_indices,'\n',' e_nodes = ',e_nodes)
# H.e_nodes = e_nodes
# H.inc_dict = inc_dict
# H.e_indices = e_indices
# return H
def strong_subgraph(self, vertex_list):
""" returns: Hypergraph object. """
H = Hypergraph()
e_indices = {} # (position, edge_size) of edge e in e_nodes list
e_nodes = [] # flattened edge list
inc_dict = {}
H.i = 0
# print('inc_dict: ',self.inc_dict.items())
# print('e_indices: ',self.e_indices.items())
# print('e_nodes: ',self.e_nodes)
for e_id in self.e_indices:
e = self.get_edge_byindex(e_id)
flag = True
for u in e:
if u not in vertex_list:
flag = False
break
if flag:
e_nodes += e
_lene = len(e)
e_indices[e_id] = (H.i, H.i + _lene)
for v in e:
if v not in inc_dict:
inc_dict[v] = set()
if e_id not in inc_dict[v]:
inc_dict[v].add(e_id)
H.degree_dict[v] = H.degree_dict.get(v,0) + 1
H.i += _lene
# else:
# inc_dict[v] = inc_dict.get(v, []) + [e_id]
# print('After: ','inc_dict = ',inc_dict.items(),'\n','e_indices = ',e_indices,'\n',' e_nodes = ',e_nodes)
H.e_nodes = e_nodes
H.inc_dict = inc_dict
H.e_indices = e_indices
return H
def get_hnx_format(self):
import sys
sys.path.append("HyperNetX")
import hypernetx as hnx
_tempH = {}
for e_id in self.e_indices.keys():
_tempH[e_id] = self.get_edge_byindex(e_id)
return hnx.Hypergraph(_tempH)
def get_clique_graph(self):
binary_edges = set()
for e_id in self.e_indices.keys():
e = self.get_edge_byindex(e_id)
# print(e)
if len(e) < 2:
continue
elif len(e) == 2:
y = tuple(sorted(e))
if y not in binary_edges:
binary_edges.add(y)
# else:
# print('not adding ', y)
else:
for x in itertools.combinations(e,2):
y = tuple(sorted(x))
if y not in binary_edges:
binary_edges.add(y)
# else:
# print('(comb) not adding ', y)
scenes = {}
for i, edge in enumerate(binary_edges):
scenes[i] = list(edge)
# print(scenes)
return Hypergraph(scenes)
# def weak_subgraph(self, vertex_list):
# """ returns: Hypergraph object. """
# pass
def get_N(self):
""" Return num of vertices """
return len(self.inc_dict)
def get_M(self):
""" Return num of edges """
return len(self.e_indices)
def get_degree_sequence(self):
""" Return the degree sequence in descending order """
degs = []
for v in self.degree_dict:
degs.append(self.degree_dict[v])
return sorted(degs,reverse = True)
def get_degree_distr(self):
""" Return the degree distribution """
degs = {}
N = self.get_N()
for d in self.degree_dict.values():
degs[d] = degs.get(d,0)+ (1.0/N)
return sorted(degs.items(),reverse = True)
# return sorted(degs.items(), reverse = True, key = lambda x: x[1])
def get_dim_sequence(self):
""" Return the dimension sequence in descending order """
dims = []
for e_start,e_end in self.e_indices.values():
dims.append(e_end - e_start)
return sorted(dims,reverse = True)
def get_dim_distr(self):
""" Return the dimension distribution """
assert isinstance(H, hnx.Hypergraph)
dims = {}
M = self.get_M()
for _dim in self.get_dim_sequence():
dims[_dim] = dims.get(_dim,0)+ (1.0/M)
return sorted(dims.items(),reverse = True)
def get_nbr_sequence(self):
""" Return the sequence nbrhood sizes in descending order """
return sorted(self.init_nbrsize.values(),reverse = True)
def get_nbr_distr(self):
""" Return the distribution of nbr sizes """
nbrs = {}
N = self.get_N()
for nbr in self.init_nbrsize.values():
nbrs[nbr] = nbrs.get(nbr,0) + (1.0/N)
return sorted(nbrs,reverse = True)
def get_degree_stats(self):
""" Return the stats of degrees. """
import pandas as pd
deg_seq = self.get_degree_sequence()
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(deg_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def get_dim_stats(self):
""" Return the stats of dimensions. """
import pandas as pd
dim_seq = self.get_dim_sequence()
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(dim_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def get_nbr_stats(self):
""" Return the stats of neighbourhoods. """
import pandas as pd
nbr_seq = self.get_nbr_sequence()
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(nbr_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def WriteDegreeDist(self, filename = "deg_dist"):
import pickle
with open(filename+'.pickle','wb') as wf:
dict_tosave = self.get_degree_distr()
pickle.dump(dict_tosave, wf, protocol=pickle.HIGHEST_PROTOCOL)
def __str__(self):
return ",".join([str(i) for i in self.edge_iterator()])
def initBiparite(self):
self.vtoEdgeId = {} # v1 => [e1,e2,...]
self.eidtov = {} # e1 => [v1,v2,..]
for eid, edge in self.edge_eid_iterator():
self.eidtov[eid] = list(edge)
for v in edge:
if v not in self.vtoEdgeId:
self.vtoEdgeId[v] = []
self.vtoEdgeId[v].append(eid)
def bipartiteDeleteu(self, u):
for eid in self.vtoEdgeId[u]:
if eid in self.eidtov:
if u in self.eidtov[eid]:
self.eidtov[eid].remove(u)
del self.vtoEdgeId[u]
def getDist2nbr(self, u):
s = set()
for eid in self.vtoEdgeId.get(u,[]):
for v in self.eidtov.get(eid,[]):
if v!=u:
s.add(v)
return sorted(list(s)) | 29,577 | 39.078591 | 136 | py |
vldbsubmission | vldbsubmission-master/python_src/hgDecompose/utils.py | # from hgDecompose.optimizedhgDecompose import HGDecompose
from numpy.core.fromnumeric import mean
import pandas as pd
from hgDecompose.Hypergraph import Hypergraph
import random
import heapq
from hgDecompose.heapdict import heapdict
from disjoint_set import DisjointSet
import pickle
import math
def strong_subgraph(H, vertex_set):
import sys
sys.path.append("HyperNetX")
import hypernetx as hnx
"""
Returns the strong sub-hypergraph of H induced by vertex_set
Parameters
----------
H: Hypernetx Hypergraph
vertex_set: List/set of vertex label
Returns
-------
Hypernetx Hypergraph object
The strong sub-hypergraph induced by vertex_set
"""
assert isinstance(H, hnx.Hypergraph)
if not isinstance(vertex_set, set):
X = set(vertex_set)
else:
X = vertex_set
_tempdict = {} # dictionary for induced edges
for e_id, e_i in H.incidence_dict.items():
set_e = set(e_i)
if set_e.issubset(X): # If an edge of the original hypergraph is a subset of the vertex set, add it
_tempdict[e_id] = e_i
return hnx.Hypergraph(_tempdict)
def get_number_of_nbrs(H, u):
"""
Returns the number of neighbours of u in hypergraph H
Parameters
----------
H: Hypernetx Hypergraph
u: a vertex label
Returns
-------
Integer
The number of neighbours of u in H
"""
nbrs = H.neighbors(u)
if nbrs is None: # u is not in H
return 0
return len(nbrs)
def get_degree(H, u):
degree = 0
try:
degree = H.degree(u)
except Exception as e:
# print(e)
pass
return degree
def get_nbrs(H, u):
import sys
sys.path.append("HyperNetX")
import hypernetx as hnx
"""
Returns the neighbours of u in hypergraph H
Parameters
----------
H: Hypernetx Hypergraph
u: a vertex label
Returns
-------
List
The neighbours of u in H. [] if u is not in H.
"""
nbrs = H.neighbors(u)
if nbrs is None: # u is not in H
return []
return nbrs
def get_hg(dataset):
H = None
if(dataset == "default"):
dic = {
0: ('FN', 'TH'),
1: ('TH', 'JV'),
2: ('BM', 'FN', 'JA'),
3: ('JV', 'JU', 'CH', 'BM'),
4: ('JU', 'CH', 'BR', 'CN', 'CC', 'JV', 'BM'),
5: ('TH', 'GP'),
6: ('GP', 'MP'),
7: ('MA', 'GP')
}
H = Hypergraph(dic)
elif(dataset in ['enron', "syn", "klay"]):
# file location
dataset_to_filename = {
# real
"enron" : "../data/datasets/real/Enron.hyp",
"klay" : "../data/klay_s.hyp"
}
# # split by
# dataset_to_split = {
# "enron" : ",",
# "congress" : ",",
# "contact" : ",",
# "dblp": ",",
# "amazon": ",",
# "syn" : ",",
# "bin_1" : ",",
# "bin_2" : ",",
# "bin_4" : ",",
# "bin_5" : ",",
# }
dic = {}
# read from file
with open(dataset_to_filename[dataset]) as f:
idx = 0
for line in f:
edge = tuple(line[:-1].split(','))
dic[idx] = edge
idx+=1
# if idx%10000 == 0:
# print(idx)
H = Hypergraph(dic)
else:
raise RuntimeError(dataset + " is not defined or implemented yet")
return H
def get_random_hg(n = 10, m = 5, edge_size_ub = None, seed = 1):
"""
Returns a random hypergraph with n vertices and m edges.
Generate V = {1,2,..,n}
Generate E = Take m randomly chosen subsets of V.
If edge_size_ub is not None, assume every hyperedge can have at most edge_size_ub vertices in it.
"""
random.seed(seed)
V = set(range(1, n+1))
Edict = {}
if m == 1:
if n <= edge_size_ub:
e = tuple([str(i) for i in range(1,n+1)])
Edict[m] = e
return Hypergraph(Edict)
while m:
if edge_size_ub is None:
edge_sz = random.randint(2,n) # 2 because we do not want singletons
else:
edge_sz = random.randint(2, edge_size_ub) # 2 because we do not want singletons
e = random.sample(V, edge_sz)
Edict[m] = tuple([str(v) for v in sorted(list(e))])
m = m-1
return Hypergraph(Edict)
def get_random_graph(n = 10, m = 5, seed = 1):
"""
Returns a random hypergraph with n vertices and m edges.
Generate V = {1,2,..,n}
Generate E = Take m randomly chosen subsets of V.
If edge_size_ub is not None, assume every hyperedge can have at most edge_size_ub vertices in it.
"""
random.seed(seed)
V = set(range(1, n+1))
Edict = {}
exists = {}
while m:
e = random.sample(V, 2)
if e[1]<e[0]:
e[0],e[1] = e[1],e[0]
if not exists.get((e[0],e[1]),False):
exists[(e[0],e[1])] = True
Edict[m] = (e[0],e[1])
m = m-1
return Hypergraph(Edict)
def get_basic_hg(dataset):
from hgDecompose.BasicHypergraph import Hypergraph as HypergraphBasic
H = None
if(dataset == "default"):
dic = {
0: ('FN', 'TH'),
1: ('TH', 'JV'),
2: ('BM', 'FN', 'JA'),
3: ('JV', 'JU', 'CH', 'BM'),
4: ('JU', 'CH', 'BR', 'CN', 'CC', 'JV', 'BM'),
5: ('TH', 'GP'),
6: ('GP', 'MP'),
7: ('MA', 'GP')
}
H = HypergraphBasic(dic)
elif(dataset in ['enron', "syn", "bin_1", "bin_2", "bin_4", "bin_5", "4_sim", "5_sim", "pref", "pref_20000","pref_40000","pref_60000","pref_80000","pref_100000", "congress", "contact","dblp", "amazon"]):
# file location
dataset_to_filename = {
# real
"enron" : "data/datasets/real/Enron.hyp",
"congress" : "data/datasets/real/congress-bills.hyp",
"contact" : "data/datasets/real/contact-primary-school.hyp",
"dblp": "data/datasets/real/DBLP.hyp",
"amazon": "data/datasets/real/amazon-reviews.hyp",
# synthetic
"syn" : "data/datasets/synthetic/syn.hyp",
"bin_1" : "data/datasets/synthetic/binomial_5_100_4_0.200000_sample_1_iter_1.txt",
"bin_2" : "data/datasets/synthetic/binomial_5_500_4_0.200000_sample_2_iter_1.txt",
"bin_4" : "data/datasets/synthetic/binomial_5_100_3_0.200000_sample_4_iter_1.txt",
"bin_5" : "data/datasets/synthetic/binomial_5_500_3_0.200000_sample_5_iter_1.txt",
"4_sim": "data/datasets/synthetic/4simplex.hyp",
"5_sim": "data/datasets/synthetic/5simplex.hyp",
"pref": "data/datasets/synthetic/pref_1000000_3_1.hyp",
"pref_20000": "data/datasets/synthetic/pref_20000_3_1_simple.hyp",
"pref_40000": "data/datasets/synthetic/pref_40000_3_1_simple.hyp",
"pref_60000": "data/datasets/synthetic/pref_60000_3_1_simple.hyp",
"pref_80000": "data/datasets/synthetic/pref_80000_3_1_simple.hyp",
"pref_100000": "data/datasets/synthetic/pref_100000_3_1_simple.hyp"
}
# # split by
# dataset_to_split = {
# "enron" : ",",
# "congress" : ",",
# "contact" : ",",
# "dblp": ",",
# "amazon": ",",
# "syn" : ",",
# "bin_1" : ",",
# "bin_2" : ",",
# "bin_4" : ",",
# "bin_5" : ",",
# }
dic = {}
# read from file
with open(dataset_to_filename[dataset]) as f:
idx = 0
for line in f:
edge = tuple(line[:-1].split(','))
dic[idx] = edge
idx+=1
# if idx%10000 == 0:
# print(idx)
H = HypergraphBasic(dic)
else:
raise RuntimeError(dataset + " is not defined or implemented yet")
return H
def writeHypergraph(edge_dict, out_file):
with open(out_file,'w') as wf:
for edge in edge_dict.values():
edge_str = ",".join([str(node) for node in edge])
wf.write(edge_str+"\n")
def writeHypergraphHg(hg, out_file):
assert isinstance(hg, Hypergraph)
with open(out_file,'w') as wf:
for id in sorted(list(hg.e_indices.keys())):
edge = hg.get_edge_byindex(id)
edge_str = ",".join([str(node) for node in edge])
wf.write(edge_str+"\n")
def get_N(H):
""" Return num of vertices """
return len(H.nodes)
def get_M(H):
""" Return num of edges """
return len(H.edges)
def get_degree_sequence(H):
""" Return the degree sequence in descending order """
assert isinstance(H, hnx.Hypergraph)
degs = []
for v in H.nodes:
degs.append(H.degree(v))
return sorted(degs,reverse = True)
def get_degree_distr(H):
""" Return the degree distribution """
assert isinstance(H, hnx.Hypergraph)
degs = {}
N = get_N(H)
for v in H.nodes:
d = H.degree(v)
degs[d] = degs.get(d,0)+ (1.0/N)
return sorted(degs.items(),reverse = True)
def get_dim_sequence(H):
""" Return the dimension sequence in descending order """
assert isinstance(H, hnx.Hypergraph)
dims = []
for e in H.edges:
dims.append(H.dim(e)+1)
return sorted(dims,reverse = True)
def get_dim_distr(H):
""" Return the dimension distribution """
assert isinstance(H, hnx.Hypergraph)
dims = {}
M = get_M(H)
for _dim in get_dim_sequence(H):
dims[_dim] = dims.get(_dim,0)+ (1.0/M)
return sorted(dims.items(),reverse = True)
def get_nbr_sequence(H):
""" Return the sequence nbrhood sizes in descending order """
assert isinstance(H, hnx.Hypergraph)
nbrs = []
for v in H.nodes:
nbrs.append(get_number_of_nbrs(H,v))
return sorted(nbrs,reverse = True)
def get_nbr_distr(H):
""" Return the distribution of nbr sizes """
assert isinstance(H, hnx.Hypergraph)
nbrs = {}
N = get_N(H)
for nbr in get_nbr_sequence(H):
nbrs[nbr] = nbrs.get(nbr,0) + (1.0/N)
return sorted(nbrs,reverse = True)
def get_degree_stats(H):
""" Return the stats of degrees. """
assert isinstance(H, hnx.Hypergraph)
deg_seq = get_degree_sequence(H)
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(deg_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def get_dim_stats(H):
""" Return the stats of dimensions. """
assert isinstance(H, hnx.Hypergraph)
dim_seq = get_dim_sequence(H)
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(dim_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
def get_nbr_stats(H):
""" Return the stats of neighbourhoods. """
assert isinstance(H, hnx.Hypergraph)
nbr_seq = get_nbr_sequence(H)
stat = {'mean': None, 'max': None, 'min': None, '25%': None, '50%': None, '75%': None, 'std': None}
_temp = pd.Series(nbr_seq).describe()
stat['mean'] = _temp['mean']
stat['std'] = _temp['std']
stat['min'] = _temp['min']
stat['max'] = _temp['max']
stat['25%'] = _temp['25%']
stat['50%'] = _temp['50%']
stat['75%'] = _temp['75%']
return stat
import numpy as np
def quickselect_median(l, pivot_fn=random.choice):
if len(l) % 2 == 1:
return quickselect(l, len(l) // 2, pivot_fn)
else:
return int(0.5 * (quickselect(l, len(l) / 2 - 1, pivot_fn) +
quickselect(l, len(l) / 2, pivot_fn)))
def quickselect(l, k, pivot_fn):
"""
Select the kth element in l (0 based)
:param l: List of numerics
:param k: Index
:param pivot_fn: Function to choose a pivot, defaults to random.choice
:return: The kth element of l
"""
if len(l) == 1:
assert k == 0
return l[0]
pivot = pivot_fn(l)
lows = [el for el in l if el < pivot]
highs = [el for el in l if el > pivot]
pivots = [el for el in l if el == pivot]
if k < len(lows):
return quickselect(lows, k, pivot_fn)
elif k < len(lows) + len(pivots):
# We got lucky and guessed the median
return pivots[0]
else:
return quickselect(highs, k - len(lows) - len(pivots), pivot_fn)
# https://towardsdatascience.com/fastest-way-to-calculate-h-index-of-publications-6fd52e381fee
# Expert algorithm derived from wiki https://en.wikipedia.org/wiki/H-index
def operator_H(citations):
citations = np.array(citations)
n = citations.shape[0]
array = np.arange(1, n+1)
# reverse sorting
citations = np.sort(citations)[::-1]
# print(citations)
# intersection of citations and k
h_idx = np.max(np.minimum(citations, array)) # inside np.minimum is element-wise
# print(np.minimum(citations, array))
return h_idx
def par_operator_H(args):
node, citations = args
citations = np.array(citations)
n = citations.shape[0]
array = np.arange(1, n+1)
# reverse sorting
citations = np.sort(citations)[::-1]
# print(citations)
# intersection of citations and k
h_idx = np.max(np.minimum(citations, array)) # inside np.minimum is element-wise
# print(np.minimum(citations, array))
return (node, h_idx)
def operator_H_new(citations):
len_citations = len(citations)
s = [0 for _ in range(len_citations + 1)]
for i in range(len_citations):
s[min(len_citations, citations[i])] += 1
# the i-th index in s is the count of i's (or the smallest between i and len_citations) in citations
# print(s)
sum = 0
for i in range(len_citations - 1, -1, -1):
sum += s[i]
if(sum >= i):
return i
return 0
def operator_H_quicksort(citations):
len_citations = len(citations)
median = quickselect_median(citations)
print(citations, median)
if(len_citations % 2 == 1):
if(median == (len_citations - 1) / 2):
return median
elif(median > (len_citations - 1) / 2):
return operator_H_new(citations[:median])
elif(median < (len_citations - 1) / 2):
return operator_H_new(citations[median:])
else:
# When len is even, we may not identify median correctly. median([1, 2]) = 1 but median([1,1,2,2]) = 1 raises error
if(median == len_citations / 2):
return median
elif(median > len_citations / 2):
return operator_H_new(citations[:median])
elif(median < len_citations / 2):
return operator_H_new(citations[median:])
def operator_H_priority_queue(citations):
len_citations = len(citations)
citations = [-1 * citation for citation in citations] # value is negated to implement max-heap
heapq.heapify(citations) # O(n)
h = 0
for i in range(1, len_citations + 1):
if(-1 * heapq.heappop(citations) >= i):
h = i
else:
break
return h
# memory profile
def memory_usage_psutil():
# return the memory usage in MB
import psutil, os
process = psutil.Process(os.getpid())
mem = process.memory_info().rss / float(2 ** 20)
return mem
def check_connectivity(hg):
ds = DisjointSet()
for v in hg.nodes():
if v in ds:
continue
else:
for e in hg.inc_dict[v]:
# print(hg.get_edge_byindex(e))
for u in hg.get_edge_byindex(e):
ds.union(v,u)
# print('-----')
# print(list(ds.itersets()),'\n------')
print('Is connected: ', len(list(ds.itersets()))==1)
# print(list(ds.itersets()))
def component_sz(v,hg):
""" Returns the size of the component v is part of in Hg"""
ds = DisjointSet()
queue = [v]
traversed = {v: True}
while len(queue):
v = queue.pop(0)
for e in hg.inc_dict[v]:
for u in hg.get_edge_byindex(e):
ds.union(v,u)
if u not in traversed:
queue.append(u)
traversed[u] = True
if not traversed[u]:
queue.append(u)
traversed[u] = True
# print('traversed= ',len(traversed), '|V| = ', len(hg.inc_dict))
# print(len(list(ds.itersets())))
# assert len(traversed) == len(hg.inc_dict)
return len(traversed)
def avg_shortest_pathlen(source, hg, number_of_targets, V, constant_M, verbose = True):
""" Dijkstra's algorithm on hypergraph """
# V = list(hg.inc_dict.keys())
dist = {}
traversed = {}
for u in V:
dist[u] = math.inf
traversed[u] = False
# for u in random.choices(V,k = number_of_targets):
# # Compute number of edges in between u~v
# dist[u] = math.inf
dist[source] = 0
heap = heapdict()
heap[source] = 0
traversed[source] = True
while len(heap):
v, dist_v = heap.popitem()
for e in hg.inc_dict[v]:
edge = hg.get_edge_byindex(e)
for u in edge:
if not traversed[u]:
dist_root = dist_v + 1
if dist_root < dist[u]:
dist[u] = dist_root
heap[u] = dist_root
# if (verbose):
# print('shortest paths: ',source,' = ')
# print(dist)
# dists = [ dist[u] for u in random.choices(V,k = number_of_targets) ]
dists = [0]*number_of_targets
i = 0
while i < number_of_targets:
u = random.choice(V)
if dist[u] is not math.inf:
dists[i] = dist[u]
else:
dists[i] = constant_M
i+=1
return np.mean(dists)
# while i<number_of_targets:
# u = random.choice(V)
# if dist[u] is not math.inf:
# dists[i] = dist[u]
# i+=1
# return np.mean(dists)
def save_dict(dict,fname = 'tests/tmp/temp.pkl'):
""" Save a dictionary """
print('Saving dictionary to: ',fname)
with open(fname, 'wb') as handle:
pickle.dump(dict, handle, protocol= 4)
def load_dict(fname = 'tests/tmp/temp.pkl'):
""" Load a dictionary """
print('Loading dictionary from: ',fname)
with open(fname, 'rb') as handle:
dict = pickle.load(handle)
return dict
def save_dictascsv(dict,fname = 'tests/tmp/temp.csv'):
""" Save a dictionary """
print('Saving dictionary to: ',fname)
with open(fname, 'w') as handle:
for k,val in sorted(list(dict.items())):
handle.write(str(k)+','+str(val)+'\n') | 19,330 | 29.394654 | 207 | py |
vldbsubmission | vldbsubmission-master/python_src/hgDecompose/optimizedhgDecompose.py | from json.encoder import INFINITY
from time import time
import math
from hgDecompose.Hypergraph import Hypergraph
from copy import deepcopy
from multiprocessing import Pool
from hgDecompose.utils import operator_H, par_operator_H
from hgDecompose.heapdict import heapdict
import pandas as pd
# from tests.verify_kcore import *
class HGDecompose():
def __init__(self):
# self.bucket = {}
self.core = {}
# self._node_to_num_neighbors = {} # inverse bucket
self._node_to_degree = {}
self.execution_time = 0
self.bucket_update_time = 0
self.neighborhood_call_time = 0
self.degree_call_time = 0
self.num_neighborhood_computation = 0
self.num_degree_computation = 0
self.num_bucket_update = 0
self.subgraph_time = 0
self.num_subgraph_call = 0
self.init_time = 0
self.loop_time = 0
self.inner_iteration = 0
self.total_iteration = 0
self.core_correct_time = 0
self.h_index_time = 0
self.max_n = 0 # For #iterations vs dataset barplot
self.core_correctionvol_n = [] # core_corrections volume per iteration => Ammount of core_correction done. => Relation with runtime
self.core_correction_volume = 0 # For core_correction volume vs dataset plot
self.reduction_hhat_n = [] # [ hhat^{n-1} - hhat^{n}, for n \in [1, tau] ] => Convergence plot.
def preprocess(self):
pass
def naiveNBR(self, H, verbose = True):
start_execution_time = time()
num_nodes = 0
_node_to_num_neighbors = {}
bucket = {}
# Initial bucket fill-up
start_init_time = time()
for node in H.init_node_iterator():
len_neighbors = H.get_init_nbrlen(node)
_node_to_num_neighbors[node] = len_neighbors
if len_neighbors not in bucket:
bucket[len_neighbors] = set()
bucket[len_neighbors].add(node)
num_nodes += 1
self.init_time = time() - start_init_time
if(verbose):
# print("\n---------- Initial neighbors -------")
# for node in H.nodes():
# print(node, H.neighbors(node))
# print()
print("\n---------- Initial bucket -------")
print(bucket)
print()
start_loop_time = time()
for k in range(1, num_nodes + 1):
while len(bucket.get(k, [])) != 0:
# print(k)
v = bucket[k].pop() # get first element in the
if(verbose):
print("k:", k, "node:", v)
self.core[v] = k
start_neighborhood_call = time()
nbr_v = H.neighbors(v)
self.neighborhood_call_time += time() - start_neighborhood_call
self.num_neighborhood_computation += 1
start_subgraph_time = time()
H.removeV_transform(v, False)
# H.removeV_transform2(v,verbose)
self.subgraph_time += time() - start_subgraph_time
self.num_subgraph_call += 1
# enumerating over all neighbors of v
for u in nbr_v:
self.inner_iteration += 1
self.total_iteration +=1
if (verbose):
print("node_to_num_neighbours: ",_node_to_num_neighbors)
print("Considering neighbor", u)
start_neighborhood_call = time()
len_neighbors_u = H.get_number_of_nbrs(u)
self.neighborhood_call_time += time() - start_neighborhood_call
self.num_neighborhood_computation += 1
max_value = max(len_neighbors_u, k)
if(verbose):
print("max core between", k, 'and', len_neighbors_u, "is ", max_value)
print("The location of", u, "is updated from", _node_to_num_neighbors[u], "to", max_value)
# Move u to new location in bucket
start_bucket_update = time()
prev_idx = _node_to_num_neighbors[u]
bucket[prev_idx].remove(u)
if max_value not in bucket:
bucket[max_value] = set()
bucket[max_value].add(u)
self.num_bucket_update += 1
self.bucket_update_time += time() - start_bucket_update
_node_to_num_neighbors[u] = max_value
if(verbose):
print("-------- Updated bucket ---------")
print(bucket)
print()
# print(self.core)
self.loop_time = time() - start_loop_time
self.execution_time = time() - start_execution_time
if (verbose):
print("\n\nOutput")
print(self.core)
def naiveDeg(self, H, verbose = True):
assert isinstance(H,Hypergraph)
start_execution_time = time()
bucket = {}
nodes = H.init_nodes
num_nodes = len(nodes)
# Initial bucket fill-up
max_degree = -1
for node in nodes:
degree = H.degree(node)
if(degree > max_degree):
max_degree = degree
self._node_to_degree[node] = degree
# print(node, neighbors)
if degree not in bucket:
bucket[degree] = [node]
else:
bucket[degree].append(node)
if(verbose):
print("\n---------- Initial neighbors -------")
for node in H.nodes():
print(node, H.neighbors(node))
print()
print("\n---------- Initial bucket -------")
print(bucket)
print()
for k in range(1, max_degree + 1):
while len(bucket.get(k, [])) != 0:
v = bucket[k].pop(0) # get first element in the
if(verbose):
print("k:", k, "node:", v)
self.core[v] = k
# temp_nodes = nodes[:]
# temp_nodes.remove(v)
nbr_v = H.neighbors(v)
start_subgraph_time = time()
# H_temp = H.strong_subgraph(temp_nodes) # Store.... + executation time..
H.removeV_transform(v, False)
self.subgraph_time += time() - start_subgraph_time
self.num_subgraph_call += 1
# enumerating over all neighbors of v
for u in nbr_v:
if(verbose):
print(self._node_to_degree)
print("Considering neighbor", u)
start_degree_call = time()
degree_u = H.degree(u)
self.degree_call_time += time() - start_degree_call
self.num_degree_computation += 1
# How many times is neighborhood computation done? and executation time...
max_value = max(degree_u, k)
if(verbose):
print("max core between", k, 'and', degree_u, "is ", max_value)
print("The location of", u, "is updated from", self._node_to_degree[u], "to", max_value)
# Move u to new location in bucket
start_bucket_update = time()
bucket[self._node_to_degree[u]].remove(u)
if max_value not in bucket:
# TODO does code reach here?
bucket[max_value] = [u]
else:
bucket[max_value].append(u)
self.num_bucket_update += 1
self.bucket_update_time += time() - start_bucket_update
# How many times is bucket updated + executation time??? Store...
self._node_to_degree[u] = max_value
if(verbose):
print("-------- Updated bucket ---------")
print(bucket)
print()
# nodes = temp_nodes
# H = H_temp
self.execution_time = time() - start_execution_time | 8,466 | 35.65368 | 140 | py |
vldbsubmission | vldbsubmission-master/python_src/hgDecompose/heapdict.py | try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
def doc(s):
if hasattr(s, '__call__'):
s = s.__doc__
def f(g):
g.__doc__ = s
return g
return f
class heapdict(MutableMapping):
__marker = object()
def __init__(self, *args, **kw):
self.heap = []
self.d = {}
self.update(*args, **kw)
@doc(dict.clear)
def clear(self):
del self.heap[:]
self.d.clear()
@doc(dict.__setitem__)
def __setitem__(self, key, value):
if key in self.d:
self.pop(key)
wrapper = [value, key, len(self)]
self.d[key] = wrapper
self.heap.append(wrapper)
self._decrease_key(len(self.heap)-1)
def _min_heapify(self, i):
n = len(self.heap)
h = self.heap
while True:
# calculate the offset of the left child
l = (i << 1) + 1
# calculate the offset of the right child
r = (i + 1) << 1
if l < n and h[l][0] < h[i][0]:
low = l
else:
low = i
if r < n and h[r][0] < h[low][0]:
low = r
if low == i:
break
self._swap(i, low)
i = low
def _decrease_key(self, i):
while i:
# calculate the offset of the parent
parent = (i - 1) >> 1
if self.heap[parent][0] < self.heap[i][0]:
break
self._swap(i, parent)
i = parent
def _swap(self, i, j):
h = self.heap
h[i], h[j] = h[j], h[i]
h[i][2] = i
h[j][2] = j
@doc(dict.__delitem__)
def __delitem__(self, key):
wrapper = self.d[key]
while wrapper[2]:
# calculate the offset of the parent
parentpos = (wrapper[2] - 1) >> 1
parent = self.heap[parentpos]
self._swap(wrapper[2], parent[2])
self.popitem()
@doc(dict.__getitem__)
def __getitem__(self, key):
return self.d[key][0]
@doc(dict.__iter__)
def __iter__(self):
return iter(self.d)
def popitem(self):
"""D.popitem() -> (k, v), remove and return the (key, value) pair with lowest\nvalue; but raise KeyError if D is empty."""
wrapper = self.heap[0]
if len(self.heap) == 1:
self.heap.pop()
else:
self.heap[0] = self.heap.pop()
self.heap[0][2] = 0
self._min_heapify(0)
del self.d[wrapper[1]]
return wrapper[1], wrapper[0]
@doc(dict.__len__)
def __len__(self):
return len(self.d)
def peekitem(self):
"""D.peekitem() -> (k, v), return the (key, value) pair with lowest value;\n but raise KeyError if D is empty."""
return (self.heap[0][1], self.heap[0][0])
del doc
__all__ = ['heapdict']
| 2,947 | 24.413793 | 130 | py |
vldbsubmission | vldbsubmission-master/python_src/hgDecompose/influence_propagation.py | import random
import numpy as np
from copy import deepcopy
from multiprocessing import Pool
import os
import pickle
from hgDecompose.utils import check_connectivity, component_sz, save_dict, avg_shortest_pathlen
pkl_path = 'sirdata/'
def propagate_for_all_vertices(H, core, num_vertex_per_core=100, top_k=100, p=0.5, num_iterations=100, original_n=None, verbose=True):
result = {} # Entry is a core number. value is a list of percentages of the infected population for all vertices with the same core number
core_to_vertex_map = {}
distinct_core_numbers = []
for v in core:
if(core[v] not in core_to_vertex_map):
core_to_vertex_map[core[v]] = [v]
distinct_core_numbers.append(core[v])
else:
core_to_vertex_map[core[v]].append(v)
distinct_core_numbers.sort(reverse=True)
for core_number in distinct_core_numbers[:top_k]:
# check size
v_sampled = None
if(len(core_to_vertex_map[core_number]) < num_vertex_per_core):
v_sampled = core_to_vertex_map[core_number]
else:
v_sampled = random.choices(core_to_vertex_map[core_number], k=num_vertex_per_core)
for v in v_sampled:
if(core_number not in result):
result[core_number] = [propagate2(
H, starting_vertex=v, p=p, num_iterations=num_iterations, original_n=original_n, verbose=verbose)]
else:
result[core_number].append(propagate2(
H, starting_vertex=v, p=p, num_iterations=num_iterations, original_n=original_n, verbose=verbose))
# print(component_sz(v))
# TODO: Parallelize this loop
# core_v_list = []
# core_numbers = []
# for core_number in distinct_core_numbers[:top_k]:
# for v in random.choices(core_to_vertex_map[core_number], k=num_vertex_per_core):
# core_v_list.append((H,v,p,num_iterations,verbose))
# core_numbers.append(core_number)
# with Pool(processes=8) as pool:
# pool_results = pool.map(propagate, core_v_list)
# # for x in pool.map(propagate, core_v_list):
# # pool_results.append(x[0])
# pool.join()
# for i, core_number in enumerate(core_numbers):
# if(core_number not in result):
# result[core_number] = pool_results[i][0]
# else:
# result[core_number].append(pool_results[i][0])
# # if(core_number not in result):
# # result[core_number] = [propagate(H, starting_vertex=v, p = p, num_iterations = num_iterations, verbose = verbose)[0]]
# # else:
# # result[core_number].append(propagate(H, starting_vertex=v, p = p, num_iterations = num_iterations, verbose = verbose)[0])
return result
def propagate_for_all_vertices_for_kd(H, kd_core, num_vertex_per_core=100, top_k=150, p=0.5, num_iterations=10, original_n=None, verbose=True):
result = {} # Entry is a core number. value is a list of percentages of the infected population for all vertices with the same core number
distinct_core_numbers = sorted(kd_core.keys(), key=lambda tup: (tup[0], tup[1]), reverse=True)
for core_number in distinct_core_numbers[:top_k]:
# check size
v_sampled = None
if(len(kd_core[core_number]) < num_vertex_per_core):
v_sampled = kd_core[core_number]
else:
v_sampled = random.choices(kd_core[core_number], k=num_vertex_per_core)
for v in v_sampled:
if(core_number not in result):
result[core_number] = [propagate2(
H, starting_vertex=str(v), p=p, num_iterations=num_iterations, original_n=original_n, verbose=verbose)]
else:
result[core_number].append(propagate2(
H, starting_vertex=str(v), p=p, num_iterations=num_iterations, original_n=original_n, verbose=verbose))
# print(component_sz(v))
return result
def run_intervention_exp2(name, original_n, p=0.5, verbose=False):
path = pkl_path+name+'.pkl'
with open(os.path.join(path), 'rb') as handle:
data = pickle.load(handle)
print("loaded ", path)
result = {}
for k in data:
print('Core deletion#: ', k)
result[k] = {}
temp_core = data[k]['core']
H = data[k]['H']
# check_connectivity(H)
# result[k] = propagate_for_all_vertices(H, temp_core, p = p, original_n = original_n, verbose=verbose)
core_to_vertex_map = {}
distinct_core_numbers = []
for v in temp_core:
if(temp_core[v] not in core_to_vertex_map):
core_to_vertex_map[temp_core[v]] = [v]
distinct_core_numbers.append(temp_core[v])
else:
core_to_vertex_map[temp_core[v]].append(v)
distinct_core_numbers.sort(reverse=True)
for core_number in distinct_core_numbers[:5]:
print('core: ', core_number)
# result[k][core_number] = {}
if(len(core_to_vertex_map[core_number]) < 100):
v_sampled = core_to_vertex_map[core_number]
else:
v_sampled = random.choices(core_to_vertex_map[core_number], k=100)
num_iterations = 10
result_all_run = []
for _ in range(5):
result_single_run = []
for v in v_sampled:
result_single_run.append(propagate2(
H, starting_vertex=v, p=p, num_iterations=num_iterations, original_n=original_n, verbose=verbose)[0])
result_all_run.append(result_single_run)
result[k][core_number] = list(
np.array(result_all_run).mean(axis=0))
return result
def run_intervention_exp2_explain(name, verbose=False):
""" COnnected components/reachable nodes """
# 'temp.pkl' =>
path = pkl_path+name+'.pkl'
with open(os.path.join(path), 'rb') as handle:
data = pickle.load(handle)
print("loaded ", path)
result = {}
for k in data:
# if (k!=2):
# continue
print('Core deletion#: ', k)
result[k] = {}
temp_core = data[k]['core']
H = data[k]['H']
print('N: ', len(H.inc_dict))
# check_connectivity(H)
# continue
core_to_vertex_map = {}
distinct_core_numbers = []
for v in temp_core:
if(temp_core[v] not in core_to_vertex_map):
core_to_vertex_map[temp_core[v]] = [v]
distinct_core_numbers.append(temp_core[v])
else:
core_to_vertex_map[temp_core[v]].append(v)
distinct_core_numbers.sort(reverse=True)
for core_number in distinct_core_numbers[:100]:
print('core: ', core_number)
# result[k][core_number] = {}
tmp = {}
for v in random.choices(core_to_vertex_map[core_number], k=100):
# if(core_number not in result):
# result[core_number] = [propagate(H, starting_vertex=v, p = p, num_iterations = 100, original_n = original_n, verbose = verbose)[0]]
# else:
# result[core_number].append(propagate(H, starting_vertex=v, p = p, num_iterations = 100, original_n = original_n, verbose = verbose)[0])
tmp[v] = component_sz(v, H)
result[k][core_number] = np.mean(list(tmp.values()))
save_dict(result, '../output/'+name+'_comp3.pkl')
def run_intervention_exp2_explain_splen(name, verbose=False):
""" SHortest path length """
path = pkl_path+name+'.pkl'
with open(os.path.join(path), 'rb') as handle:
data = pickle.load(handle)
print("loaded ", path)
result = {}
# record samples for the innermost core
assert 0 in data # to check if H0 is in data
H0_core = data[0]['core']
constant_M = data[0]['H'].get_M()
H0_V = list(data[0]['H'].inc_dict.keys())
print("Max sp:", constant_M)
for k in data:
print("\n\n\n", k)
# if (k!=2):
# continue
result[k] = {}
temp_core = data[k]['core']
H = data[k]['H']
print('N: ', len(H.inc_dict))
# check_connectivity(H)
# continue
core_to_vertex_map = {}
distinct_core_numbers = []
for v in temp_core:
if(temp_core[v] not in core_to_vertex_map):
core_to_vertex_map[temp_core[v]] = [v]
distinct_core_numbers.append(temp_core[v])
else:
core_to_vertex_map[temp_core[v]].append(v)
distinct_core_numbers.sort(reverse=True)
for core_number in distinct_core_numbers[:200]:
# print('core: ',core_number)
# result[k][core_number] = {}
tmp = {}
for v in random.choices(core_to_vertex_map[core_number], k=100):
# conditional sampling
# if(H0_core[v] != core_number):
# # print("Do not consider", v)
# continue
# if(core_number not in result):
# result[core_number] = [propagate(H, starting_vertex=v, p = p, num_iterations = 100, original_n = original_n, verbose = verbose)[0]]
# else:
# result[core_number].append(propagate(H, starting_vertex=v, p = p, num_iterations = 100, original_n = original_n, verbose = verbose)[0])
# result[k][core_number][v] = avg_shortest_pathlen(v,H,100)
tmp[v] = avg_shortest_pathlen(v, H, 100, H0_V, constant_M)
if (verbose):
print('v ', v, ' avg SP length: ',
result[k][core_number][v])
# print(tmp)
result[k][core_number] = np.mean(list(tmp.values()))
save_dict(result, '../output/'+name+'_sp4.pkl')
def run_intervention_exp(H, core, p=0.5, verbose=False):
# print(core)
# deleted_ids = [2693,2804,3865,1547,2102,2960,2537, 3446, 2120, 2673]
max_core_number = -1
for v in core:
if(max_core_number < core[v]):
max_core_number = core[v]
# print(max_core_number)
nodes_with_max_core = []
for v in core:
if(core[v] == max_core_number):
nodes_with_max_core.append(v)
all_nodes = H.nodes()
result = {}
# print(all_nodes)
strongly_induced_eids = H.get_stronglyinduced_edgeIds(nodes_with_max_core)
if verbose:
print('# potential edges to delete: ', len(strongly_induced_eids))
# Top-k edge-deletion intervention strategy.
temp_core = {}
for node in H.nodes():
temp_core[node] = core[node]
# print(eid, H.get_edge_byindex(eid), temp_H.nodes(), len(temp_H.nodes()))
result['nill'] = propagate_for_all_vertices(
H, temp_core, p=p, original_n=len(all_nodes), verbose=verbose)
# Random k% edge-deletion intervention strategy TODO
for eperc in [5, 10, 15]:
len_dele = int(len(temp_core) * eperc/100.0)
print('will delete ', eperc, '% = ', len_dele, ' edges')
todelete = []
for eid in random.choices(strongly_induced_eids, k=len_dele):
todelete.append(eid)
print('List : ', todelete)
temp_H = deepcopy(H)
for eid in todelete:
# print('deleting ',eid)
temp_H.del_edge(eid)
temp_core = {}
for node in temp_H.nodes():
temp_core[node] = core[node]
result['top'+str(eperc)+'%'] = propagate_for_all_vertices(temp_H,
temp_core, p=p, original_n=len(all_nodes), verbose=verbose)
return result
def propagate_for_random_seeds(H, core, seed_size=1000, p=0.5, num_iterations=100, verbose=False):
# print(core)
result = {}
#
for v in random.choices(H.nodes(), k=seed_size):
# print(v)
_, timestep_of_infection = propagate(
H, starting_vertex=v, p=p, num_iterations=num_iterations, verbose=False)
# print(timestep_of_infection)
# print()
for u in timestep_of_infection:
if(core[u] not in result):
result[core[u]] = [timestep_of_infection[u]]
else:
result[core[u]].append(timestep_of_infection[u])
# print(result)
return result
def propagate(H, starting_vertex, p=0.5, num_iterations=10, original_n=None, verbose=True):
"""
Returns fraction of infected
"""
# print('original_n: ',original_n)
timestep_of_infection = {}
if original_n is None:
len_nodes = H.get_N()
else:
len_nodes = original_n
for v in H.nodes():
timestep_of_infection[v] = num_iterations + 1
suscepted = H.nodes()
suscepted.remove(starting_vertex)
infected = [starting_vertex]
timestep_of_infection[starting_vertex] = 0
recovered = []
for i in range(num_iterations):
if(verbose):
print('\n\n\nIteration:', i)
# print("infected:", infected)
# print("recovered:", recovered)
# print("suscepted:", suscepted)
print()
if(len(infected) == 0):
# if(verbose):
# print("No more propagation is possible")
break
new_infected = []
new_recovered = []
for v in infected:
# if(verbose):
# print("\nPorpagating for", v)
for u in H.neighbors(v):
if(u in suscepted):
if(random.random() <= p):
# if(verbose):
# print(v, "->", u)
new_infected.append(u)
timestep_of_infection[u] = i + 1
suscepted.remove(u)
else:
# if(verbose):
# print(v, "->", u, "not propagated")
pass
# else:
# if(verbose):
# print(u, "is already either infected or recovered")
new_recovered.append(v)
infected += new_infected
recovered += new_recovered
for v in new_recovered:
infected.remove(v)
return 1 - float(len(suscepted) / len_nodes), timestep_of_infection
def propagate2(H, starting_vertex, p=0.5, num_iterations=10, original_n=None, verbose=True):
"""
Returns number of infected
"""
# print('original_n: ',original_n)
timestep_of_infection = {}
if original_n is None:
len_nodes = H.get_N()
else:
len_nodes = original_n
for v in H.nodes():
timestep_of_infection[v] = num_iterations + 1
suscepted = H.nodes()
suscepted.remove(starting_vertex)
infected = [starting_vertex]
timestep_of_infection[starting_vertex] = 0
recovered = []
# print(starting_vertex, len(H.neighbors(starting_vertex)))
# quit()
for i in range(num_iterations):
if(verbose):
print('\n\n\nIteration:', i)
# print("infected:", infected)
# print("recovered:", recovered)
# print("suscepted:", suscepted)
print()
if(len(infected) == 0):
# if(verbose):
# print("No more propagation is possible")
break
new_infected = []
new_recovered = []
for v in infected:
# if(verbose):
# print("\nPorpagating for", v)
for u in H.neighbors(v):
if(u in suscepted):
if(random.random() <= p):
# if(verbose):
# print(v, "->", u)
new_infected.append(u)
timestep_of_infection[u] = i + 1
suscepted.remove(u)
else:
# if(verbose):
# print(v, "->", u, "not propagated")
pass
# else:
# if(verbose):
# print(u, "is already either infected or recovered")
new_recovered.append(v)
infected += new_infected
recovered += new_recovered
for v in new_recovered:
infected.remove(v)
# print(len_nodes, suscepted, len(suscepted))
# print(len_nodes - len(suscepted), timestep_of_infection)
# return len_nodes - len(suscepted), timestep_of_infection
return len_nodes - len(suscepted), len(H.neighbors(starting_vertex))
| 16,654 | 36.093541 | 157 | py |
PIBConv | PIBConv-main/cnn/extract_cifar10.py | # Python 3 program to visualize 4th image
import matplotlib.pyplot as plt
import numpy as np
import cv2
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
return dict
def upSampleCIFAR10():
file = r'/fs2/comm/kpgrp/mhosseini/github/ConvSearch/cnn/data/cifar-10-batches-py/data_batch_2'
data_batch_1 = unpickle(file)
X_train = data_batch_1['data']
meta_file = r'/fs2/comm/kpgrp/mhosseini/github/ConvSearch/cnn/data/cifar-10-batches-py/batches.meta'
meta_data = unpickle(meta_file)
label_name = meta_data['label_names']
image = data_batch_1['data'][0]
image = image.reshape(3,32,32)
image = image.transpose(1,2,0)
im = np.uint8(image)
label = data_batch_1['labels'][0]
plt.title(label_name[label])
plt.imshow(im)
plt.savefig('cifar10.png')
print(image.shape)
imgOriginal = image
imgUpsampled = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC)
print(imgUpsampled.shape)
return imgOriginal, imgUpsampled | 1,090 | 24.97619 | 104 | py |
PIBConv | PIBConv-main/cnn/complexity.py | import torch
from model import *
from genotypes import *
from ptflops import get_model_complexity_info
def print_complexity(network):
macs, params = get_model_complexity_info(network, (3, 32, 32), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
if __name__ == "__main__":
network = NetworkCIFAR(
C=36,
num_classes=10,
layers=20,
auxiliary=False,
genotype=DARTS_newconv_epoch50)
network.drop_path_prob = 0.2 # Placeholder - value is only for functionality and should not change complexity at all
print_complexity(network)
| 788 | 31.875 | 127 | py |
PIBConv | PIBConv-main/cnn/train_cpath.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import genotypes
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkADP as Network
# for ADP dataset only
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("cpath")
####################
# Model details
parser.add_argument('--arch', type=str, default='DARTS_ADP_N4', help='choose network architecture')
parser.add_argument('--layers', type=int, default=4, help='total number of layers')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
####################
# Training details
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
####################
# Datasets
parser.add_argument('--data', type=str, default='./data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP', help='choose dataset: ADP, BCSS, BACH, OS')
parser.add_argument('--image_size', type=int, default=272, help='CPATH image size')
# color augmentation
parser.add_argument('--color_aug', action='store_true', default=False, help='use color augmentation')
parser.add_argument('--color_distortion', type=float, default=0.3, help='color distortion param')
# ADP only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP label level')
####################
# Others
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
args.save = 'Eval-{}-data-{}-arch-{}-{}'.format(args.save, args.dataset, args.arch, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.dataset == 'ADP':
n_classes = classesADP[args.adp_level]['numClasses']
elif args.dataset == 'BCSS':
n_classes = 10
elif args.dataset == 'BACH' or args.dataset == 'OS':
n_classes = 4
else:
logging.info('Unknown dataset!')
sys.exit(1)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
logging.info('genotype = %s', genotype)
# dataset
if args.dataset == 'ADP':
train_transform, valid_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train')
valid_data = utils.ADP_dataset(level=args.adp_level, transform=valid_transform, root=args.data, split='valid')
test_data = utils.ADP_dataset(level=args.adp_level, transform=valid_transform, root=args.data, split='test')
elif args.dataset == 'BCSS':
train_transform, valid_transform = utils._data_transforms_bcss(args)
train_data = utils.BCSSDataset(root=args.data, split='train', transform=train_transform)
valid_data = utils.BCSSDataset(root=args.data, split='valid', transform=valid_transform)
test_data = utils.BCSSDataset(root=args.data, split='test', transform=valid_transform)
elif args.dataset == 'BACH':
train_transform, valid_transform = utils._data_transforms_bach(args)
train_data = utils.BACH_transformed(root=args.data, split='train', transform=train_transform)
valid_data = utils.BACH_transformed(root=args.data, split='valid', transform=valid_transform)
test_data = utils.BACH_transformed(root=args.data, split='test', transform=valid_transform)
elif args.dataset == 'OS':
train_transform, valid_transform = utils._data_transforms_os(args)
train_data = utils.OS_transformed(root=args.data, split='train', transform=train_transform)
valid_data = utils.OS_transformed(root=args.data, split='valid', transform=valid_transform)
test_data = utils.OS_transformed(root=args.data, split='test', transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
dataset_size = len(train_queue.dataset)
print('train dataset size:', len(train_queue.dataset))
print('valid dataset size:', len(valid_queue.dataset))
print('test dataset size:', len(test_queue.dataset))
# criterion
# ADP and BCSS are multi-label datasets
# Use MultiLabelSoftMarginLoss
if args.dataset == 'ADP' or args.dataset == 'BCSS':
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
# BACH and OS are single-label datasets
# Use CrossEntropyLoss
elif args.dataset == 'BACH' or args.dataset == 'OS':
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# model
model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fM", utils.count_parameters_in_MB(model))
# optimizer and scheduler
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
# train
best_acc = 0
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc_1, train_acc_5, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc_1 %f, train_acc_5 %f', train_acc_1, train_acc_5)
valid_acc_1, valid_acc_5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_1 %f, valid_acc_5 %f', valid_acc_1, valid_acc_5)
if valid_acc_1 > best_acc:
best_acc = valid_acc_1
utils.save(model, os.path.join(args.save, 'best_weights.pt'))
utils.save(model, os.path.join(args.save, 'last_weights.pt'))
# test
# use last weights
logging.info("Test using last weights ...")
model_test = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model_test = model_test.cuda()
utils.load(model_test, os.path.join(args.save, 'last_weights.pt'))
model_test.drop_path_prob = args.drop_path_prob
test_acc1, test_acc5, test_obj = infer(test_queue, model_test, criterion)
logging.info('test_acc_1 %f, test_acc_5 %f', test_acc1, test_acc5)
# use best weights on valid set
logging.info("Test using best weights ...")
model_test = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model_test = model_test.cuda()
utils.load(model_test, os.path.join(args.save, 'best_weights.pt'))
model_test.drop_path_prob = args.drop_path_prob
test_acc1, test_acc5, test_obj = infer(test_queue, model_test, criterion)
logging.info('test_acc_1 %f, test_acc_5 %f', test_acc1, test_acc5)
def train(train_queue, model, criterion, optimizer):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.train()
trained_data_size = 0
for step, (input, target) in enumerate(train_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight * loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
n = input.size(0)
trained_data_size += n
if args.dataset == 'ADP' or args.dataset == 'BCSS':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
elif args.dataset == 'BACH' or args.dataset == 'OS':
prec1, prec5 = utils.accuracy(logits, target, topk=(1, min(5, n_classes)))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# report training loss
if step % args.report_freq == 0:
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (trained_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / trained_data_size)
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
logging.info('train %03d %e %f %f', step, objs.avg, top1_avg, top5_avg)
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (len(train_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(train_queue.dataset))
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
return top1_avg, top5_avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
infered_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
n = input.size(0)
infered_data_size += n
if args.dataset == 'ADP' or args.dataset == 'BCSS':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
elif args.dataset == 'BACH' or args.dataset == 'OS':
prec1, prec5 = utils.accuracy(logits, target, topk=(1, min(5, n_classes)))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# report validation loss
if step % args.report_freq == 0:
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (infered_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / infered_data_size)
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
logging.info('valid %03d %e %f %f', step, objs.avg, top1_avg, top5_avg)
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
return top1_avg, top5_avg, objs.avg
if __name__ == '__main__':
main()
| 13,664 | 43.366883 | 118 | py |
PIBConv | PIBConv-main/cnn/apply_gradcam.py | import argparse
import cv2
import numpy as np
import torch
from torchvision import models
from pytorch_grad_cam import GradCAM, \
HiResCAM, \
ScoreCAM, \
GradCAMPlusPlus, \
AblationCAM, \
XGradCAM, \
EigenCAM, \
EigenGradCAM, \
LayerCAM, \
FullGrad, \
GradCAMElementWise
from model import *
from genotypes import *
import extract_cifar10
import matplotlib.pyplot as plt
from pytorch_grad_cam import GuidedBackpropReLUModel
from pytorch_grad_cam.utils.image import show_cam_on_image, \
deprocess_image, \
preprocess_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument(
'--image-path',
type=str,
default='./examples/both.png',
help='Input image path')
parser.add_argument('--aug_smooth', action='store_true',
help='Apply test time augmentation to smooth the CAM')
parser.add_argument(
'--eigen_smooth',
action='store_true',
help='Reduce noise by taking the first principle componenet'
'of cam_weights*activations')
parser.add_argument('--method', type=str, default='gradcam',
choices=['gradcam', 'hirescam', 'gradcam++',
'scorecam', 'xgradcam',
'ablationcam', 'eigencam',
'eigengradcam', 'layercam', 'fullgrad'],
help='Can be gradcam/gradcam++/scorecam/xgradcam'
'/ablationcam/eigencam/eigengradcam/layercam')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print('Using GPU for acceleration')
else:
print('Using CPU for computation')
return args
if __name__ == '__main__':
""" python cam.py -image-path <path_to_image>
Example usage of loading an image, and computing:
1. CAM
2. Guided Back Propagation
3. Combining both
"""
args = get_args()
methods = \
{"gradcam": GradCAM,
"hirescam": HiResCAM,
"scorecam": ScoreCAM,
"gradcam++": GradCAMPlusPlus,
"ablationcam": AblationCAM,
"xgradcam": XGradCAM,
"eigencam": EigenCAM,
"eigengradcam": EigenGradCAM,
"layercam": LayerCAM,
"fullgrad": FullGrad,
"gradcamelementwise": GradCAMElementWise}
model = NetworkCIFAR(
C=36,
num_classes=10,
layers=20,
auxiliary=True,
genotype=DARTS_newconv_epoch50)
model.drop_path_prob = 0.2
# Choose the target layer you want to compute the visualization for.
# Usually this will be the last convolutional layer in the model.
# Some common choices can be:
# Resnet18 and 50: model._layers[-1]
# VGG, densenet161: model.features[-1]
# mnasnet1_0: model.layers[-1]
# You can print the model to help chose the layer
# You can pass a list with several target layers,
# in that case the CAMs will be computed per layer and then aggregated.
# You can also try selecting all layers of a certain type, with e.g:
# from pytorch_grad_cam.utils.find_layers import find_layer_types_recursive
# find_layer_types_recursive(model, [torch.nn.ReLU])
target_layers = [model.cells[-1]._ops[-1]]
NUM_CLASSES = 10
# Onehot encode labels
# imgUpsampled = extract_cifar10.upSampleCIFAR10()
# image = imgUpsampled.transpose(1,2,0)
# fig = plt.figure()
# plt.plot(image)
# fig.savefig('temp.png', dpi=fig.dpi)
rgb_img = cv2.imread(args.image_path, 1)[:, :, ::-1]
rgb_img = np.float32(rgb_img) / 255
input_tensor = preprocess_image(rgb_img,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# We have to specify the target we want to generate
# the Class Activation Maps for.
# If targets is None, the highest scoring category (for every member in the batch) will be used.
# You can target specific categories by
# targets = [e.g ClassifierOutputTarget(281)]
targets = None
# Using the with statement ensures the context is freed, and you can
# recreate different CAM objects in a loop.
cam_algorithm = methods[args.method]
with cam_algorithm(model=model,
target_layers=target_layers,
use_cuda=args.use_cuda) as cam:
# AblationCAM and ScoreCAM have batched implementations.
# You can override the internal batch size for faster computation.
cam.batch_size = 32
grayscale_cam = cam(input_tensor=input_tensor,
targets=targets,
aug_smooth=args.aug_smooth,
eigen_smooth=args.eigen_smooth)
# Here grayscale_cam has only one image in the batch
grayscale_cam = grayscale_cam[0, :]
cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
# cam_image is RGB encoded whereas "cv2.imwrite" requires BGR encoding.
cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
gb = gb_model(input_tensor, target_category=None)
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite(f'{args.method}_cam.jpg', cam_image)
cv2.imwrite(f'{args.method}_gb.jpg', gb)
cv2.imwrite(f'{args.method}_cam_gb.jpg', cam_gb)
| 5,853 | 34.26506 | 100 | py |
PIBConv | PIBConv-main/cnn/test_cpath.py | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkADP as Network
# for ADP dataset
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("cpath")
parser.add_argument('--data', type=str, default='./data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP', help='choose dataset: ADP, BCSS, BACH, OS')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--model_path', type=str, default='EXP/weights.pt', help='path of trained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS_ADP_N4', help='choose network architecture: DARTS_ADP_N2, DARTS_ADP_N3, DARTS_ADP_N4')
parser.add_argument('--image_size', type=int, default=272, help='ADP image size')
# ADP only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP level')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
if args.dataset == 'ADP':
n_classes = classesADP[args.adp_level]['numClasses']
elif args.dataset == 'BCSS':
n_classes = 10
elif args.dataset == 'BACH' or args.dataset == 'OS':
n_classes = 4
else:
logging.info('Unknown dataset!')
sys.exit(1)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
logging.info('genotype = %s', genotype)
# dataset
if args.dataset == 'ADP':
train_transform, test_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train')
test_data = utils.ADP_dataset(level=args.adp_level, transform=test_transform, root=args.data, split='test')
elif args.dataset == 'BCSS':
train_transform, test_transform = utils._data_transforms_bcss(args)
train_data = utils.BCSSDataset(root=args.data, split='train', transform=train_transform)
test_data = utils.BCSSDataset(root=args.data, split='test', transform=test_transform)
elif args.dataset == 'BACH':
train_transform, test_transform = utils._data_transforms_bach(args)
train_data = utils.BACH_transformed(root=args.data, split='train', transform=train_transform)
test_data = utils.BACH_transformed(root=args.data, split='test', transform=test_transform)
elif args.dataset == 'OS':
train_transform, test_transform = utils._data_transforms_os(args)
train_data = utils.OS_transformed(root=args.data, split='train', transform=train_transform)
test_data = utils.OS_transformed(root=args.data, split='test', transform=test_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
dataset_size = len(train_queue.dataset)
# criterion
# ADP and BCSS are multi-label datasets
# Use MultiLabelSoftMarginLoss
if args.dataset == 'ADP' or args.dataset == 'BCSS':
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
# BACH and OS are single-label datasets
# Use CrossEntropyLoss
elif args.dataset == 'BACH' or args.dataset == 'OS':
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# model
model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
model.drop_path_prob = args.drop_path_prob
logging.info("param size = %fM", utils.count_parameters_in_MB(model))
test_acc1, test_acc5, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc_1 %f, test_acc_5 %f', test_acc1, test_acc5)
def infer(valid_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
infered_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
n = input.size(0)
infered_data_size += n
if args.dataset == 'ADP' or args.dataset == 'BCSS':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
elif args.dataset == 'BACH' or args.dataset == 'OS':
prec1, prec5 = utils.accuracy(logits, target, topk=(1, min(5, n_classes)))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# report validation loss
if step % args.report_freq == 0:
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (infered_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / infered_data_size)
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
logging.info('valid %03d %e %f %f', step, objs.avg, top1_avg, top5_avg)
print('infered_data_size:', infered_data_size)
print('valid_data_size:', len(valid_queue.dataset))
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
return top1_avg, top5_avg, objs.avg
if __name__ == '__main__':
main()
| 7,551 | 42.154286 | 141 | py |
PIBConv | PIBConv-main/cnn/train_search_rmsgd.py | from operator import index
import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
print("bruv")
import sys
import time
import glob
import utils
import logging
import argparse
import numpy as np
import pandas as pd
import pickle
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from copy import deepcopy
from numpy import linalg as LA
from torch.autograd import Variable
from model_search import Network
from architect import Architect
from adas import Adas
from adas.metrics import Metrics
from rmsgd import RMSGD
# for ADP dataset
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("adaptive_darts")
####################
# Dataset
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP-Release1', help='valid datasets: cifar10, cifar100, ADP-Release1')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--image_size', type=int, default=64, help='CPATH image size')
# color augmentation
parser.add_argument('--color_aug', action='store_true', default=False, help='use color augmentation')
parser.add_argument('--color_distortion', type=float, default=0.3, help='color distortion param')
# For ADP dataset only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP level')
####################
# Training details
parser.add_argument('--gpu', type=str, default='0', help='gpu device id')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--learnable_bn', action='store_true', default=False, help='learnable parameters in batch normalization')
# Gumbel-softmax
parser.add_argument('--gumbel', action='store_true', default=False, help='use or not Gumbel-softmax trick')
parser.add_argument('--tau_max', type=float, default=10.0, help='initial tau')
parser.add_argument('--tau_min', type=float, default=1.0, help='minimum tau')
# Adas optimizer
parser.add_argument('--adas', action='store_true', default=False, help='whether or not to use adas optimizer')
parser.add_argument('--scheduler_beta', type=float, default=0.98, help='beta for lr scheduler')
parser.add_argument('--scheduler_p', type=int, default=1, help='p for lr scheduler')
parser.add_argument('--step_size', type=int, default=50, help='step_size for dropping lr')
parser.add_argument('--gamma', type=float, default=1.0, help='gamma for dropping lr')
# RM-SGD optimizer
parser.add_argument('--rmsgd', action='store_true', default=False, help='whether or not to use RM-SGD optimizer')
####################
# Model details
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=4, help='total number of layers')
parser.add_argument('--node', type=int, default=4, help='number of nodes in a cell')
####################
# Others
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--file_name', type=str, default='_', help='metrics and weights data file name')
args = parser.parse_args()
args.save = 'Search-{}-data-{}-{}'.format(args.save, args.dataset, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.dataset == 'cifar100':
n_classes = 100
data_folder = 'cifar-100-python'
elif args.dataset == 'cifar10':
n_classes = 10
data_folder = 'cifar-10-batches-py'
elif args.dataset == 'ADP-Release1':
n_classes = classesADP[args.adp_level]['numClasses']
else:
logging.info('dataset not supported')
sys.exit(1)
is_multi_gpu = False
def main():
global is_multi_gpu
gpus = [int(i) for i in args.gpu.split(',')]
logging.info('gpus = %s' % gpus)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if len(gpus) == 1:
torch.cuda.set_device(int(args.gpu))
#with torch.cuda.device('cuda:0'):
#torch.cuda.empty_cache()
else:
print("Let's use", torch.cuda.device_count(), "GPUs!")
is_multi_gpu = True
if args.layers <= 2:
logging.info('Minimmum number of layers is 2')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %s' % args.gpu)
logging.info("args = %s", args)
# load dataset
if args.dataset == 'cifar100':
train_transform, valid_transform = utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'cifar10':
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'ADP-Release1':
train_transform, valid_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train_search', portion=args.train_portion)
valid_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='valid_search', portion=args.train_portion)
if args.dataset in ['cifar100', 'cifar10']:
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0)
elif args.dataset == 'ADP-Release1':
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(train_data),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(valid_data),
pin_memory=True, num_workers=0)
# build network
if args.dataset in ['cifar100', 'cifar10']:
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
elif args.dataset == 'ADP-Release1':
dataset_size = len(train_queue.dataset)
print('train dataset size:', len(train_queue.dataset))
print('valid dataset size:', len(valid_queue.dataset))
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
model = Network(args.init_channels, n_classes, args.layers, criterion, learnable_bn=args.learnable_bn, steps=args.node, multiplier=args.node)
if is_multi_gpu:
model = nn.DataParallel(model)
model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# Optimizer for model weights update
# Use Adas: optimizer and scheduler
if args.adas:
optimizer = Adas(params=list(model_params),
lr=args.learning_rate,
beta=args.scheduler_beta,
step_size=args.step_size,
gamma=args.gamma,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Use SGD: default in DARTS paper
elif args.rmsgd:
optimizer = RMSGD(
params=list(model_params),
lr=args.learning_rate,
beta=args.scheduler_beta,
step_size=args.step_size,
linear=False,
gamma=args.gamma,
momentum=args.momentum,
dampening=0,
weight_decay=args.weight_decay,
nesterov=False
)
else:
optimizer = torch.optim.SGD(
model_params,
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, criterion, args)
if not args.adas and not args.rmsgd:
# record probing metrics
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
metrics = Metrics(params=list(model_params))
# files to record searching results
performance_statistics = {}
arch_statistics = {}
genotype_statistics = {}
index = 0
dir_path=r'../save_data'
while(os.path.isdir(dir_path+str(index))):
index += 1
print(dir_path+str(index))
dir_path=dir_path+str(index)
#make the new dir without overwriting previous data
os.mkdir(dir_path)
metrics_path = dir_path+ '/metrics_stat_{}.xlsx'.format(args.file_name)
weights_path = dir_path+'/weights_stat_{}.xlsx'.format(args.file_name)
genotypes_path = dir_path+'/genotypes_stat_{}.xlsx'.format(args.file_name)
print(genotypes_path)
errors_dict = {'train_acc_1': [], 'train_loss': [], 'valid_acc_1': [], 'valid_loss': [],
'train_acc_5': [], 'valid_acc_5':[]}
for epoch in range(args.epochs):
if args.adas:
lr = optimizer.lr_vector
elif args.rmsgd:
lr = optimizer.lr_vector
#logging.info('epoch %d lr %e', epoch, lr)
else:
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.module.genotype() if is_multi_gpu else model.genotype()
logging.info('epoch: %d', epoch)
logging.info('genotype = %s', genotype)
# training
train_acc_1, train_acc_5, train_obj = train(epoch, train_queue, valid_queue,
model, architect, criterion,
optimizer, lr)
print('\n')
logging.info('train_acc_1 %f, train_acc_5 %f', train_acc_1, train_acc_5)
# validation
valid_acc_1, valid_acc_5, valid_obj = infer(valid_queue, model, criterion)
print('\n')
logging.info('valid_acc_1 %f, valid_acc_5 %f', valid_acc_1, valid_acc_5)
# update the errors dictionary
errors_dict['train_acc_1'].append(train_acc_1)
errors_dict['train_loss'].append(train_obj)
errors_dict['valid_acc_1'].append(valid_acc_1)
errors_dict['valid_loss'].append(valid_obj)
errors_dict['valid_acc_5'].append(valid_acc_5)
errors_dict['train_acc_5'].append(train_acc_5)
# update network metrics (knowledge gain, condition mapping, etc)
if args.adas:
# AdaS: update learning rates
optimizer.epoch_step(epoch)
io_metrics = optimizer.KG
lr_metrics = optimizer.velocity
# added RM-SGD optimizer
elif args.rmsgd:
optimizer.epoch_step()
io_metrics = optimizer.KG
lr_metrics = optimizer.velocity
else:
metrics()
io_metrics = metrics.KG(epoch)
lr_metrics = None
# weights
weights_normal = F.softmax(model.module.alphas_normal if is_multi_gpu else model.alphas_normal, dim=-1).detach().cpu().numpy()
weights_reduce = F.softmax(model.module.alphas_reduce if is_multi_gpu else model.alphas_reduce, dim=-1).detach().cpu().numpy()
# write data to excel files
write_data(epoch, io_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
performance_statistics, arch_statistics, genotype_statistics,
metrics_path, weights_path, genotypes_path)
# save model parameters
save_model = model.module if is_multi_gpu else model
utils.save(save_model, os.path.join(args.save, 'weights.pt'))
"""
ADDED BY LOUIS:
"""
# save errors_dict to pickle file
with open(dir_path + '/errors_dict.pkl', 'wb') as f:
pickle.dump(errors_dict, f)
def train(epoch, train_queue, valid_queue, model, architect, criterion, optimizer, lr):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
trained_data_size = 0
for step, (input, target) in enumerate(train_queue):
# one mini-batch
print('\rtrain mini batch {:03d}'.format(step), end=' ')
model.train()
n = input.size(0)
trained_data_size += n
if args.gumbel:
model.module.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min)) if is_multi_gpu \
else model.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min))
input = input.cuda()
target = target.cuda()
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda()
# logging.info('update arch...')
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
# logging.info('update weights...')
optimizer.zero_grad()
logits = model(input, gumbel=args.gumbel)
loss = criterion(logits, target)
loss.backward()
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
nn.utils.clip_grad_norm_(model_params, args.grad_clip)
optimizer.step()
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (trained_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / trained_data_size)
logging.info('train %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(train_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(train_queue.dataset))
return top1_avg, top5_avg, objs_avg
def infer(valid_queue, model, criterion):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
# for ADP dataset
preds = 0
valided_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
print('\rinfer mini batch {:03d}'.format(step), end=' ')
input = input.cuda()
target = target.cuda()
logits = model(input)
loss = criterion(logits, target)
n = input.size(0)
valided_data_size += n
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (valided_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / valided_data_size)
logging.info('valid %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
return top1_avg, top5_avg, objs_avg
def write_data(epoch, net_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
perform_stat, arch_stat, genotype_stat, metrics_path, weights_path, genotypes_path):
# genotype
#if epoch % 5 == 0 or epoch == args.epochs - 1:
genotype_stat['epoch_{}'.format(epoch)] = [genotype]
genotypes_df = pd.DataFrame(data=genotype_stat)
genotypes_df.to_excel(genotypes_path)
# io metrics
perform_stat['S_epoch_{}'.format(epoch)] = net_metrics
#perform_stat['out_S_epoch_{}'.format(epoch)] = net_metrics.output_channel_S
#perform_stat['fc_S_epoch_{}'.format(epoch)] = net_metrics.fc_S
# perform_stat['in_rank_epoch_{}'.format(epoch)] = net_metrics.input_channel_rank
# perform_stat['out_rank_epoch_{}'.format(epoch)] = net_metrics.output_channel_rank
# perform_stat['fc_rank_epoch_{}'.format(epoch)] = net_metrics.fc_rank
# perform_stat['in_condition_epoch_{}'.format(epoch)] = net_metrics.input_channel_condition
# perform_stat['out_condition_epoch_{}'.format(epoch)] = net_metrics.output_channel_condition
if args.adas:
# lr metrics
# perform_stat['rank_velocity_epoch_{}'.format(epoch)] = lr_metrics.rank_velocity
perform_stat['learning_rate_epoch_{}'.format(epoch)] = lr_metrics
# write metrics data to xls file
metrics_df = pd.DataFrame(data=perform_stat)
metrics_df.to_excel(metrics_path)
# alpha weights
# normal
arch_stat['normal_none_epoch{}'.format(epoch)] = weights_normal[:, 0]
arch_stat['normal_skip_connect_epoch{}'.format(epoch)] = weights_normal[:, 1]
arch_stat['normal_sep_conv1_3x3_epoch{}'.format(epoch)] = weights_normal[:, 2]
arch_stat['normal_sep_conv1_5x5_epoch{}'.format(epoch)] = weights_normal[:, 3]
arch_stat['normal_sep_conv1_7x7_epoch{}'.format(epoch)] = weights_normal[:, 4]
arch_stat['normal_sep_conv2_3x3_epoch{}'.format(epoch)] = weights_normal[:, 5]
arch_stat['normal_sep_conv2_5x5_epoch{}'.format(epoch)] = weights_normal[:, 6]
arch_stat['normal_sep_conv2_7x7_epoch{}'.format(epoch)] = weights_normal[:, 7]
arch_stat['normal_sep_conv3_3x3_epoch{}'.format(epoch)] = weights_normal[:, 8]
arch_stat['normal_sep_conv3_5x5_epoch{}'.format(epoch)] = weights_normal[:, 9]
arch_stat['normal_sep_conv3_7x7_epoch{}'.format(epoch)] = weights_normal[:, 10]
arch_stat['normal_sep_conv4_3x3_epoch{}'.format(epoch)] = weights_normal[:, 11]
arch_stat['normal_sep_conv4_5x5_epoch{}'.format(epoch)] = weights_normal[:, 12]
arch_stat['normal_sep_conv4_7x7_epoch{}'.format(epoch)] = weights_normal[:, 13]
# reduce
arch_stat['reduce_none_epoch{}'.format(epoch)] = weights_reduce[:, 0]
arch_stat['reduce_skip_connect_epoch{}'.format(epoch)] = weights_reduce[:, 1]
arch_stat['reduce_sep_conv1_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 2]
arch_stat['reduce_sep_conv1_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 3]
arch_stat['reduce_sep_conv1_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 4]
arch_stat['reduce_sep_conv2_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 5]
arch_stat['reduce_sep_conv2_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 6]
arch_stat['reduce_sep_conv2_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 7]
arch_stat['reduce_sep_conv3_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 8]
arch_stat['reduce_sep_conv3_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 9]
arch_stat['reduce_sep_conv3_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 10]
arch_stat['reduce_sep_conv4_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 11]
arch_stat['reduce_sep_conv4_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 12]
arch_stat['reduce_sep_conv4_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 13]
# write weights data to xls file
weights_df = pd.DataFrame(data=arch_stat)
weights_df.to_excel(weights_path)
if __name__ == '__main__':
main()
| 24,566 | 42.713523 | 153 | py |
PIBConv | PIBConv-main/cnn/architect.py | import torch
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from collections import OrderedDict
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
class Architect(object):
def __init__(self, model, criterion, args):
gpus = [int(i) for i in args.gpu.split(',')]
self.is_multi_gpu = True if len(gpus) > 1 else False
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.criterion = criterion
self.adas = args.adas
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
self.optimizer = torch.optim.Adam(arch_parameters,
lr=args.arch_learning_rate, betas=(0.5, 0.999),
weight_decay=args.arch_weight_decay)
self.gumbel = args.gumbel
self.grad_clip = args.grad_clip
def _compute_unrolled_model(self, input, target, lr_vector, network_optimizer):
logits = self.model(input, self.gumbel)
loss = self.criterion(logits, target)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_theta = _concat(arch_parameters).data
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
model_theta = _concat(model_params).data
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
try:
moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in model_params).mul_(
self.network_momentum)
except:
moment = torch.zeros_like(model_theta)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = list(filter(lambda p: id(p) not in arch_params, model_parameters))
# using gumbel-softmax:
# for unused ops there will be no gradient and this needs to be handled
if self.gumbel:
dtheta = _concat([grad_i + self.network_weight_decay * theta_i if grad_i is not None
else self.network_weight_decay * theta_i
for grad_i, theta_i in
zip(torch.autograd.grad(loss, model_params, allow_unused=True), model_params)])
# not using gumbel-softmax
else:
dtheta = _concat([grad_i + self.network_weight_decay * theta_i
for grad_i, theta_i in
zip(torch.autograd.grad(loss, model_params), model_params)])
# Adas
if self.adas:
iteration_p = 0
offset_p = 0
offset_dp = 0
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
for p in model_params:
p_length = np.prod(p.size())
lr = lr_vector[iteration_p]
d_p = moment[offset_p: offset_p + p_length] + \
dtheta[offset_dp: offset_dp + p_length]
model_theta[offset_p: offset_p + p_length].sub_(d_p, alpha=lr)
offset_p += p_length
offset_dp += p_length
iteration_p += 1
# original DARTS
else:
model_theta.sub_(lr_vector, moment + dtheta)
theta = torch.cat([arch_theta, model_theta])
unrolled_model = self._construct_model_from_theta(theta)
return unrolled_model
def step(self, input_train, target_train, input_valid, target_valid, lr, network_optimizer, unrolled):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, lr, network_optimizer)
else:
self._backward_step(input_valid, target_valid)
# Add gradient clipping for gumbel-softmax because it leads to gradients with high magnitude
if self.gumbel:
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
torch.nn.utils.clip_grad_norm_(arch_parameters, self.grad_clip)
self.optimizer.step()
def _backward_step(self, input_valid, target_valid):
logits = self.model(input_valid, self.gumbel)
loss = self.criterion(logits, target_valid)
loss.backward()
def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, lr, network_optimizer):
# eqn(6):dαLval(w',α) ,where w' = w − ξ*dwLtrain(w, α)
# compute w'
unrolled_model = self._compute_unrolled_model(input_train, target_train, lr,
network_optimizer) # unrolled_model: w -> w'
# compute Lval: validation loss
logits = unrolled_model(input_valid, self.gumbel)
unrolled_loss = self.criterion(logits, target_valid)
unrolled_loss.backward()
# compute dαLval(w',α)
unrolled_arch_parameters = unrolled_model.module.arch_parameters() if self.is_multi_gpu else unrolled_model.arch_parameters()
dalpha = [v.grad for v in unrolled_arch_parameters] # grad wrt alpha
# compute dw'Lval(w',α)
# gumbel-softmax
unrolled_arch_parameters = unrolled_model.module.arch_parameters() if self.is_multi_gpu else unrolled_model.arch_parameters()
unrolled_arch_params = list(map(id, unrolled_arch_parameters))
unrolled_model_parameters = unrolled_model.module.parameters() if self.is_multi_gpu else unrolled_model.parameters()
unrolled_model_params = filter(lambda p: id(p) not in unrolled_arch_params, unrolled_model_parameters)
if self.gumbel:
vector = []
for v in unrolled_model_params:
if v.grad is not None:
# used operation by Gumbel-softmax
vector.append(v.grad.data)
else:
# unused operation by Gumbel-softmax
vector.append(torch.zeros_like(v))
else:
vector = [v.grad.data for v in unrolled_model_params]
# Adas: use different etas for different w's
if self.adas:
for i, p in enumerate(vector):
p.mul_(lr[i])
# eqn(8): (dαLtrain(w+,α)-dαLtrain(w-,α))/(2*epsilon)
# where w+=w+dw'Lval(w',α)*epsilon w- = w-dw'Lval(w',α)*epsilon
implicit_grads = self._hessian_vector_product(vector, input_train, target_train)
# eqn(6)-eqn(8): dαLval(w',α)-(dαLtrain(w+,α)-dαLtrain(w-,α))/(2*epsilon)
for g, ig in zip(dalpha, implicit_grads):
# g.data.sub_(ig.data, alpha=eta)
g.data.sub_(ig.data)
# update α
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
for v, g in zip(arch_parameters, dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta):
model_new = self.model.module.new() if self.is_multi_gpu else self.model.new()
model_dict = self.model.module.state_dict() if self.is_multi_gpu else self.model.state_dict()
params, offset = {}, 0
named_parameters = self.model.module.named_parameters() if self.is_multi_gpu else self.model.named_parameters()
for k, v in named_parameters:
v_length = np.prod(v.size())
params[k] = theta[offset: offset + v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
if self.is_multi_gpu:
new_state_dict = OrderedDict()
for k, v in model_dict.items():
if 'module' not in k:
k = 'module.' + k
else:
k = k.replace('features.module.', 'module.features.')
new_state_dict[k] = v
else:
new_state_dict = model_dict
if self.is_multi_gpu:
model_new = nn.DataParallel(model_new)
cudnn.benchmark = True
model_new.load_state_dict(new_state_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, input, target, r=1e-2):
R = r / _concat(vector).norm()
# eqn(8): dαLtrain(w+,α)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# compute w+ in eqn(8): w+ = w + dw'Lval(w',α) * epsilon
for p, v in zip(model_params, vector):
p.data.add_(v, alpha=R)
logits = self.model(input, self.gumbel)
loss = self.criterion(logits, target)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
grads_p = torch.autograd.grad(loss, arch_parameters)
# eqn(8): dαLtrain(w-,α)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# compute w- in eqn(8): w- = w - dw'Lval(w',α) * epsilon
for p, v in zip(model_params, vector):
p.data.sub_(v, alpha=2 * R)
logits = self.model(input, self.gumbel)
loss = self.criterion(logits, target)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
grads_n = torch.autograd.grad(loss, arch_parameters)
# recover w back
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
for p, v in zip(model_params, vector):
p.data.add_(v, alpha=R)
return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]
| 11,819 | 48.456067 | 133 | py |
PIBConv | PIBConv-main/cnn/train_imagenet.py | import os
import sys
import numpy as np
import time
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay')
parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays')
parser.add_argument('--parallel', action='store_true',default=False, help='data parallelism')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(
1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + \
self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES,
args.layers, args.auxiliary, genotype)
if args.parallel:
model = nn.DataParallel(model).cuda()
else:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, args.decay_period, gamma=args.gamma)
best_acc_top1 = 0
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(
train_queue, model, criterion_smooth, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = infer(
valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc_top1': best_acc_top1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save)
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(async=True)
input = input.cuda()
input = Variable(input)
target = Variable(target)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step,
objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step,
objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 8,636 | 34.9875 | 106 | py |
PIBConv | PIBConv-main/cnn/train_search_adas.py | import os
import sys
import time
import glob
import utils
import logging
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import pickle
import gc
from copy import deepcopy
from numpy import linalg as LA
from torch.autograd import Variable
from model_search import Network
from architect import Architect
from adas import Adas
from adas.metrics import Metrics
# for ADP dataset
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("adaptive_darts")
####################
# Dataset
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP-Release1', help='valid datasets: cifar10, cifar100, ADP-Release1')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--image_size', type=int, default=64, help='CPATH image size')
# color augmentation
parser.add_argument('--color_aug', action='store_true', default=False, help='use color augmentation')
parser.add_argument('--color_distortion', type=float, default=0.3, help='color distortion param')
# For ADP dataset only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP level')
####################
# Training details
parser.add_argument('--gpu', type=str, default='0', help='gpu device id')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.175, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--learnable_bn', action='store_true', default=False, help='learnable parameters in batch normalization')
# Gumbel-softmax
parser.add_argument('--gumbel', action='store_true', default=False, help='use or not Gumbel-softmax trick')
parser.add_argument('--tau_max', type=float, default=10.0, help='initial tau')
parser.add_argument('--tau_min', type=float, default=1.0, help='minimum tau')
# Adas optimizer
parser.add_argument('--adas', action='store_true', default=False, help='whether or not to use adas optimizer')
parser.add_argument('--scheduler_beta', type=float, default=0.98, help='beta for lr scheduler')
parser.add_argument('--scheduler_p', type=int, default=1, help='p for lr scheduler')
parser.add_argument('--step_size', type=int, default=50, help='step_size for dropping lr')
parser.add_argument('--gamma', type=float, default=1.0, help='gamma for dropping lr')
####################
# Model details
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=4, help='total number of layers')
parser.add_argument('--node', type=int, default=4, help='number of nodes in a cell')
####################
# Others
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--file_name', type=str, default='_', help='metrics and weights data file name')
args = parser.parse_args()
args.save = 'Search-{}-data-{}-{}'.format(args.save, args.dataset, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.dataset == 'cifar100':
n_classes = 100
data_folder = 'cifar-100-python'
elif args.dataset == 'cifar10':
n_classes = 10
data_folder = 'cifar-10-batches-py'
elif args.dataset == 'ADP-Release1':
n_classes = classesADP[args.adp_level]['numClasses']
else:
logging.info('dataset not supported')
sys.exit(1)
is_multi_gpu = False
def main():
gc.collect()
torch.cuda.empty_cache()
global is_multi_gpu
gpus = [int(i) for i in args.gpu.split(',')]
logging.info('gpus = %s' % gpus)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if len(gpus) == 1:
torch.cuda.set_device(int(args.gpu))
else:
print("Let's use", torch.cuda.device_count(), "GPUs!")
is_multi_gpu = True
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %s' % args.gpu)
logging.info("args = %s", args)
# load dataset
if args.dataset == 'cifar100':
train_transform, valid_transform = utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'cifar10':
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'ADP-Release1':
train_transform, valid_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train_search', portion=args.train_portion)
valid_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='valid_search', portion=args.train_portion)
if args.dataset in ['cifar100', 'cifar10']:
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0)
elif args.dataset == 'ADP-Release1':
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(train_data),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(valid_data),
pin_memory=True, num_workers=0)
# build network
if args.dataset in ['cifar100', 'cifar10']:
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
elif args.dataset == 'ADP-Release1':
dataset_size = len(train_queue.dataset)
print('train dataset size:', len(train_queue.dataset))
print('valid dataset size:', len(valid_queue.dataset))
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
model = Network(args.init_channels, n_classes, args.layers, criterion, learnable_bn=args.learnable_bn, steps=args.node, multiplier=args.node)
if is_multi_gpu:
model = nn.DataParallel(model)
model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# Optimizer for model weights update
# Use Adas: optimizer and scheduler
if args.adas:
optimizer = Adas(params=list(model_params),
lr=args.learning_rate,
beta=args.scheduler_beta,
step_size=args.step_size,
gamma=args.gamma,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Use SGD: default in DARTS paper
else:
optimizer = torch.optim.SGD(
model_params,
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, criterion, args)
if not args.adas:
# record probing metrics
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
metrics = Metrics(params=list(model_params))
# files to record searching results
performance_statistics = {}
arch_statistics = {}
genotype_statistics = {}
# make new save dir (will raise error if already exists - so make sure to specify args.file_name)
dir_path = f'../save_data_{args.file_name}'
os.mkdir(dir_path)
metrics_path = dir_path + '/metrics_stat_{}.xlsx'.format(args.file_name)
weights_path = dir_path +'/weights_stat_{}.xlsx'.format(args.file_name)
genotypes_path = dir_path +'/genotypes_stat_{}.xlsx'.format(args.file_name)
errors_dict = {'train_acc_1': [], 'train_loss': [], 'valid_acc_1': [], 'valid_loss': [], 'train_acc_5': [], 'valid_acc_5':[]}
# training
for epoch in range(args.epochs):
if args.adas:
lr = optimizer.lr_vector
else:
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.module.genotype() if is_multi_gpu else model.genotype()
logging.info('epoch: %d', epoch)
logging.info('genotype = %s', genotype)
# training
train_acc_1, train_acc_5, train_obj = train(epoch, train_queue, valid_queue,
model, architect, criterion,
optimizer, lr)
print('\n')
logging.info('train_acc_1 %f, train_acc_5 %f', train_acc_1, train_acc_5)
# validation
valid_acc_1, valid_acc_5, valid_obj = infer(valid_queue, model, criterion)
print('\n')
logging.info('valid_acc_1 %f, valid_acc_5 %f', valid_acc_1, valid_acc_5)
# update the errors dictionary
errors_dict['train_acc_1'].append(train_acc_1)
errors_dict['train_loss'].append(train_obj)
errors_dict['valid_acc_1'].append(valid_acc_1)
errors_dict['valid_loss'].append(valid_obj)
errors_dict['valid_acc_5'].append(valid_acc_5)
errors_dict['train_acc_5'].append(train_acc_5)
# update network metrics (knowledge gain, condition mapping, etc)
if args.adas:
# AdaS: update learning rates
optimizer.epoch_step(epoch)
io_metrics = optimizer.KG
lr_metrics = optimizer.velocity
else:
metrics()
io_metrics = metrics.KG(epoch)
lr_metrics = None
# weights
weights_normal = F.softmax(model.module.alphas_normal if is_multi_gpu else model.alphas_normal, dim=-1).detach().cpu().numpy()
weights_reduce = F.softmax(model.module.alphas_reduce if is_multi_gpu else model.alphas_reduce, dim=-1).detach().cpu().numpy()
# write data to excel files
write_data(epoch, io_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
performance_statistics, arch_statistics, genotype_statistics,
metrics_path, weights_path, genotypes_path)
# save model parameters
save_model = model.module if is_multi_gpu else model
utils.save(save_model, os.path.join(args.save, 'weights.pt'))
# save errors_dict to pickle file
with open(dir_path + '/errors_dict.pkl', 'wb') as f:
pickle.dump(errors_dict, f)
def train(epoch, train_queue, valid_queue, model, architect, criterion, optimizer, lr):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
trained_data_size = 0
for step, (input, target) in enumerate(train_queue):
# one mini-batch
print('\rtrain mini batch {:03d}'.format(step), end=' ')
model.train()
n = input.size(0)
trained_data_size += n
if args.gumbel:
model.module.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min)) if is_multi_gpu \
else model.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min))
input = input.cuda()
target = target.cuda()
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda()
# logging.info('update arch...')
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
# logging.info('update weights...')
optimizer.zero_grad()
logits = model(input, gumbel=args.gumbel)
loss = criterion(logits, target)
loss.backward()
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
nn.utils.clip_grad_norm_(model_params, args.grad_clip)
optimizer.step()
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (trained_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / trained_data_size)
logging.info('train %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(train_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(train_queue.dataset))
return top1_avg, top5_avg, objs_avg
def infer(valid_queue, model, criterion):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
# for ADP dataset
preds = 0
valided_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
print('\rinfer mini batch {:03d}'.format(step), end=' ')
input = input.cuda()
target = target.cuda()
logits = model(input)
loss = criterion(logits, target)
n = input.size(0)
valided_data_size += n
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (valided_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / valided_data_size)
logging.info('valid %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
return top1_avg, top5_avg, objs_avg
def write_data(epoch, net_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
perform_stat, arch_stat, genotype_stat, metrics_path, weights_path, genotypes_path):
# genotype
#if epoch % 5 == 0 or epoch == args.epochs - 1:
genotype_stat['epoch_{}'.format(epoch)] = [genotype]
genotypes_df = pd.DataFrame(data=genotype_stat)
genotypes_df.to_excel(genotypes_path)
# io metrics
perform_stat['S_epoch_{}'.format(epoch)] = net_metrics
# perform_stat['out_S_epoch_{}'.format(epoch)] = net_metrics.output_channel_S
# perform_stat['fc_S_epoch_{}'.format(epoch)] = net_metrics.fc_S
# perform_stat['in_rank_epoch_{}'.format(epoch)] = net_metrics.input_channel_rank
# perform_stat['out_rank_epoch_{}'.format(epoch)] = net_metrics.output_channel_rank
# perform_stat['fc_rank_epoch_{}'.format(epoch)] = net_metrics.fc_rank
# perform_stat['in_condition_epoch_{}'.format(epoch)] = net_metrics.input_channel_condition
# perform_stat['out_condition_epoch_{}'.format(epoch)] = net_metrics.output_channel_condition
if args.adas:
# lr metrics
# perform_stat['rank_velocity_epoch_{}'.format(epoch)] = lr_metrics.rank_velocity
perform_stat['learning_rate_epoch_{}'.format(epoch)] = lr_metrics
# write metrics data to xls file
metrics_df = pd.DataFrame(data=perform_stat)
metrics_df.to_excel(metrics_path)
# weights
# normal
arch_stat['normal_none_epoch{}'.format(epoch)] = weights_normal[:, 0]
arch_stat['normal_max_epoch{}'.format(epoch)] = weights_normal[:, 1]
arch_stat['normal_avg_epoch{}'.format(epoch)] = weights_normal[:, 2]
arch_stat['normal_skip_epoch{}'.format(epoch)] = weights_normal[:, 3]
arch_stat['normal_sep_3_epoch{}'.format(epoch)] = weights_normal[:, 4]
arch_stat['normal_sep_5_epoch{}'.format(epoch)] = weights_normal[:, 5]
arch_stat['normal_dil_3_epoch{}'.format(epoch)] = weights_normal[:, 6]
arch_stat['normal_dil_5_epoch{}'.format(epoch)] = weights_normal[:, 7]
# reduce
arch_stat['reduce_none_epoch{}'.format(epoch)] = weights_reduce[:, 0]
arch_stat['reduce_max_epoch{}'.format(epoch)] = weights_reduce[:, 1]
arch_stat['reduce_avg_epoch{}'.format(epoch)] = weights_reduce[:, 2]
arch_stat['reduce_skip_epoch{}'.format(epoch)] = weights_reduce[:, 3]
arch_stat['reduce_sep_3_epoch{}'.format(epoch)] = weights_reduce[:, 4]
arch_stat['reduce_sep_5_epoch{}'.format(epoch)] = weights_reduce[:, 5]
arch_stat['reduce_dil_3_epoch{}'.format(epoch)] = weights_reduce[:, 6]
arch_stat['reduce_dil_5_epoch{}'.format(epoch)] = weights_reduce[:, 7]
# write weights data to xls file
weights_df = pd.DataFrame(data=arch_stat)
weights_df.to_excel(weights_path)
if __name__ == '__main__':
main()
| 22,044 | 43.445565 | 153 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.