repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
pycbc
|
pycbc-master/pycbc/libutils.py
|
# Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This module provides a simple interface for loading a shared library via ctypes,
allowing it to be specified in an OS-independent way and searched for preferentially
according to the paths that pkg-config specifies.
"""
import importlib, inspect
import os, fnmatch, ctypes, sys, subprocess
from ctypes.util import find_library
from collections import deque
from subprocess import getoutput
# Be careful setting the mode for opening libraries! Some libraries (e.g.
# libgomp) seem to require the DEFAULT_MODE is used. Others (e.g. FFTW when
# MKL is also present) require that os.RTLD_DEEPBIND is used. If seeing
# segfaults around this code, play about this this!
DEFAULT_RTLD_MODE = ctypes.DEFAULT_MODE
def pkg_config(pkg_libraries):
"""Use pkg-config to query for the location of libraries, library directories,
and header directories
Arguments:
pkg_libries(list): A list of packages as strings
Returns:
libraries(list), library_dirs(list), include_dirs(list)
"""
libraries=[]
library_dirs=[]
include_dirs=[]
# Check that we have the packages
for pkg in pkg_libraries:
if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0:
pass
else:
print("Could not find library {0}".format(pkg))
sys.exit(1)
# Get the pck-config flags
if len(pkg_libraries)>0 :
# PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths.
# On system-wide LAL installs, this is needed for swig to find lalswig.i
for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split():
if token.startswith("-l"):
libraries.append(token[2:])
elif token.startswith("-L"):
library_dirs.append(token[2:])
elif token.startswith("-I"):
include_dirs.append(token[2:])
return libraries, library_dirs, include_dirs
def pkg_config_header_strings(pkg_libraries):
""" Returns a list of header strings that could be passed to a compiler
"""
_, _, header_dirs = pkg_config(pkg_libraries)
header_strings = []
for header_dir in header_dirs:
header_strings.append("-I" + header_dir)
return header_strings
def pkg_config_check_exists(package):
return (os.system('pkg-config --exists {0} 2>/dev/null'.format(package)) == 0)
def pkg_config_libdirs(packages):
"""
Returns a list of all library paths that pkg-config says should be included when
linking against the list of packages given as 'packages'. An empty return list means
that the package may be found in the standard system locations, irrespective of
pkg-config.
"""
# don't try calling pkg-config if NO_PKGCONFIG is set in environment
if os.environ.get("NO_PKGCONFIG", None):
return []
# if calling pkg-config failes, don't continue and don't try again.
with open(os.devnull, "w") as FNULL:
try:
subprocess.check_call(["pkg-config", "--version"], stdout=FNULL)
except:
print(
"PyCBC.libutils: pkg-config call failed, "
"setting NO_PKGCONFIG=1",
file=sys.stderr,
)
os.environ['NO_PKGCONFIG'] = "1"
return []
# First, check that we can call pkg-config on each package in the list
for pkg in packages:
if not pkg_config_check_exists(pkg):
raise ValueError("Package {0} cannot be found on the pkg-config search path".format(pkg))
libdirs = []
for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs-only-L {0}".format(' '.join(packages))).split():
if token.startswith("-L"):
libdirs.append(token[2:])
return libdirs
def get_libpath_from_dirlist(libname, dirs):
"""
This function tries to find the architecture-independent library given by libname in the first
available directory in the list dirs. 'Architecture-independent' means omitting any prefix such
as 'lib' or suffix such as 'so' or 'dylib' or version number. Within the first directory in which
a matching pattern can be found, the lexicographically first such file is returned, as a string
giving the full path name. The only supported OSes at the moment are posix and mac, and this
function does not attempt to determine which is being run. So if for some reason your directory
has both '.so' and '.dylib' libraries, who knows what will happen. If the library cannot be found,
None is returned.
"""
dirqueue = deque(dirs)
while (len(dirqueue) > 0):
nextdir = dirqueue.popleft()
possible = []
# Our directory might be no good, so try/except
try:
for libfile in os.listdir(nextdir):
if fnmatch.fnmatch(libfile,'lib'+libname+'.so*') or \
fnmatch.fnmatch(libfile,'lib'+libname+'.dylib*') or \
fnmatch.fnmatch(libfile,'lib'+libname+'.*.dylib*') or \
fnmatch.fnmatch(libfile,libname+'.dll') or \
fnmatch.fnmatch(libfile,'cyg'+libname+'-*.dll'):
possible.append(libfile)
except OSError:
pass
# There might be more than one library found, we want the highest-numbered
if (len(possible) > 0):
possible.sort()
return os.path.join(nextdir,possible[-1])
# If we get here, we didn't find it...
return None
def get_ctypes_library(libname, packages, mode=DEFAULT_RTLD_MODE):
"""
This function takes a library name, specified in architecture-independent fashion (i.e.
omitting any prefix such as 'lib' or suffix such as 'so' or 'dylib' or version number) and
a list of packages that may provide that library, and according first to LD_LIBRARY_PATH,
then the results of pkg-config, and falling back to the system search path, will try to
return a CDLL ctypes object. If 'mode' is given it will be used when loading the library.
"""
libdirs = []
# First try to get from LD_LIBRARY_PATH
if "LD_LIBRARY_PATH" in os.environ:
libdirs += os.environ["LD_LIBRARY_PATH"].split(":")
# Next try to append via pkg_config
try:
libdirs += pkg_config_libdirs(packages)
except ValueError:
pass
# We might be using conda/pip/virtualenv or some combination. This can
# leave lib files in a directory that LD_LIBRARY_PATH or pkg_config
# can miss.
libdirs.append(os.path.join(sys.prefix, "lib"))
# Note that the function below can accept an empty list for libdirs, in
# which case it will return None
fullpath = get_libpath_from_dirlist(libname, libdirs)
if fullpath is None:
# This won't actually return a full-path, but it should be something
# that can be found by CDLL
fullpath = find_library(libname)
if fullpath is None:
# We got nothin'
return None
else:
if mode is None:
return ctypes.CDLL(fullpath)
else:
return ctypes.CDLL(fullpath, mode=mode)
def import_optional(library_name):
""" Try to import library but and return stub if not found
Parameters
----------
library_name: str
The name of the python library to import
Returns
-------
library: library or stub
Either returns the library if importing is sucessful or it returns
a stub which raises an import error and message when accessed.
"""
try:
return importlib.import_module(library_name)
except ImportError:
# module wasn't found so let's return a stub instead to inform
# the user what has happened when they try to use related functions
class no_module(object):
def __init__(self, library):
self.library = library
def __getattribute__(self, attr):
if attr == 'library':
return super().__getattribute__(attr)
lib = self.library
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
fun = calframe[1][3]
msg =""" The function {} tried to access
'{}' of library '{}', however,
'{}' is not currently installed. To enable this
functionality install '{}' (e.g. through pip
/ conda / system packages / source).
""".format(fun, attr, lib, lib, lib)
raise ImportError(inspect.cleandoc(msg))
return no_module(library_name)
| 9,494
| 39.063291
| 131
|
py
|
pycbc
|
pycbc-master/pycbc/sensitivity.py
|
""" This module contains utilities for calculating search sensitivity
"""
import numpy
from pycbc.conversions import chirp_distance
from . import bin_utils
def compute_search_efficiency_in_bins(
found, total, ndbins,
sim_to_bins_function=lambda sim: (sim.distance,)):
"""
Calculate search efficiency in the given ndbins.
The first dimension of ndbins must be bins over injected distance.
sim_to_bins_function must map an object to a tuple indexing the ndbins.
"""
bins = bin_utils.BinnedRatios(ndbins)
# increment the numerator and denominator with found / found+missed injs
[bins.incnumerator(sim_to_bins_function(sim)) for sim in found]
[bins.incdenominator(sim_to_bins_function(sim)) for sim in total]
# regularize by setting denoms to 1 to avoid nans
bins.regularize()
# efficiency array is the ratio
eff = bin_utils.BinnedArray(bin_utils.NDBins(ndbins), array=bins.ratio())
# compute binomial uncertainties in each bin
err_arr = numpy.sqrt(eff.array * (1-eff.array)/bins.denominator.array)
err = bin_utils.BinnedArray(bin_utils.NDBins(ndbins), array=err_arr)
return eff, err
def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function):
"""
Calculate search sensitive volume by integrating efficiency in distance bins
No cosmological corrections are applied: flat space is assumed.
The first dimension of ndbins must be bins over injected distance.
sim_to_bins_function must maps an object to a tuple indexing the ndbins.
"""
eff, err = compute_search_efficiency_in_bins(
found, total, ndbins, sim_to_bins_function)
dx = ndbins[0].upper() - ndbins[0].lower()
r = ndbins[0].centres()
# volume and errors have one fewer dimension than the input NDBins
vol = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:]))
errors = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:]))
# integrate efficiency to obtain volume
vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)
# propagate errors in eff to errors in V
errors.array = numpy.sqrt(
((4 * numpy.pi * r**2 * err.array.T * dx)**2).sum(axis=-1)
)
return vol, errors
def volume_to_distance_with_errors(vol, vol_err):
""" Return the distance and standard deviation upper and lower bounds
Parameters
----------
vol: float
vol_err: float
Returns
-------
dist: float
ehigh: float
elow: float
"""
dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0)
ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist
delta = numpy.where(vol >= vol_err, vol - vol_err, 0)
elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0)
return dist, ehigh, elow
def volume_montecarlo(found_d, missed_d, found_mchirp, missed_mchirp,
distribution_param, distribution, limits_param,
min_param=None, max_param=None):
"""
Compute sensitive volume and standard error via direct Monte Carlo integral
Injections should be made over a range of distances such that sensitive
volume due to signals closer than D_min is negligible, and efficiency at
distances above D_max is negligible
TODO : Replace this function by Collin's formula given in Usman et al. ?
OR get that coded as a new function?
Parameters
-----------
found_d: numpy.ndarray
The distances of found injections
missed_d: numpy.ndarray
The distances of missed injections
found_mchirp: numpy.ndarray
Chirp mass of found injections
missed_mchirp: numpy.ndarray
Chirp mass of missed injections
distribution_param: string
Parameter D of the injections used to generate a distribution over
distance, may be 'distance', 'chirp_distance'.
distribution: string
form of the distribution over the parameter, may be
'log' (uniform in log D)
'uniform' (uniform in D)
'distancesquared' (uniform in D**2)
'volume' (uniform in D**3)
limits_param: string
Parameter Dlim specifying limits inside which injections were made
may be 'distance', 'chirp distance'
min_param: float
minimum value of Dlim at which injections were made; only used for
log distribution, then if None the minimum actually injected value
will be used
max_param: float
maximum value of Dlim out to which injections were made; if None
the maximum actually injected value will be used
Returns
--------
volume: float
Volume estimate
volume_error: float
The standard error in the volume
"""
d_power = {
'log' : 3.,
'uniform' : 2.,
'distancesquared' : 1.,
'volume' : 0.
}[distribution]
mchirp_power = {
'log' : 0.,
'uniform' : 5. / 6.,
'distancesquared' : 5. / 3.,
'volume' : 15. / 6.
}[distribution]
# establish maximum physical distance: first for chirp distance distribution
if limits_param == 'chirp_distance':
mchirp_standard_bns = 1.4 * 2.**(-1. / 5.)
all_mchirp = numpy.concatenate((found_mchirp, missed_mchirp))
max_mchirp = all_mchirp.max()
if max_param is not None:
# use largest injected mchirp to convert back to distance
max_distance = max_param * \
(max_mchirp / mchirp_standard_bns)**(5. / 6.)
else:
max_distance = max(found_d.max(), missed_d.max())
elif limits_param == 'distance':
if max_param is not None:
max_distance = max_param
else:
# if no max distance given, use max distance actually injected
max_distance = max(found_d.max(), missed_d.max())
else:
raise NotImplementedError("%s is not a recognized parameter"
% limits_param)
# volume of sphere
montecarlo_vtot = (4. / 3.) * numpy.pi * max_distance**3.
# arrays of weights for the MC integral
if distribution_param == 'distance':
found_weights = found_d ** d_power
missed_weights = missed_d ** d_power
elif distribution_param == 'chirp_distance':
# weight by a power of mchirp to rescale injection density to the
# target mass distribution
found_weights = found_d ** d_power * \
found_mchirp ** mchirp_power
missed_weights = missed_d ** d_power * \
missed_mchirp ** mchirp_power
else:
raise NotImplementedError("%s is not a recognized distance parameter"
% distribution_param)
all_weights = numpy.concatenate((found_weights, missed_weights))
# measured weighted efficiency is w_i for a found inj and 0 for missed
# MC integral is volume of sphere * (sum of found weights)/(sum of all weights)
# over injections covering the sphere
mc_weight_samples = numpy.concatenate((found_weights, 0 * missed_weights))
mc_sum = sum(mc_weight_samples)
if limits_param == 'distance':
mc_norm = sum(all_weights)
elif limits_param == 'chirp_distance':
# if injections are made up to a maximum chirp distance, account for
# extra missed injections that would occur when injecting up to
# maximum physical distance : this works out to a 'chirp volume' factor
mc_norm = sum(all_weights * (max_mchirp / all_mchirp) ** (5. / 2.))
# take out a constant factor
mc_prefactor = montecarlo_vtot / mc_norm
# count the samples
if limits_param == 'distance':
Ninj = len(mc_weight_samples)
elif limits_param == 'chirp_distance':
# find the total expected number after extending from maximum chirp
# dist up to maximum physical distance
if distribution == 'log':
# only need minimum distance in this one case
if min_param is not None:
min_distance = min_param * \
(numpy.min(all_mchirp) / mchirp_standard_bns) ** (5. / 6.)
else:
min_distance = min(numpy.min(found_d), numpy.min(missed_d))
logrange = numpy.log(max_distance / min_distance)
Ninj = len(mc_weight_samples) + (5. / 6.) * \
sum(numpy.log(max_mchirp / all_mchirp) / logrange)
else:
Ninj = sum((max_mchirp / all_mchirp) ** mchirp_power)
# sample variance of efficiency: mean of the square - square of the mean
mc_sample_variance = sum(mc_weight_samples ** 2.) / Ninj - \
(mc_sum / Ninj) ** 2.
# return MC integral and its standard deviation; variance of mc_sum scales
# relative to sample variance by Ninj (Bienayme' rule)
vol = mc_prefactor * mc_sum
vol_err = mc_prefactor * (Ninj * mc_sample_variance) ** 0.5
return vol, vol_err
def chirp_volume_montecarlo(
found_d, missed_d, found_mchirp, missed_mchirp,
distribution_param, distribution, limits_param, min_param, max_param):
assert distribution_param == 'chirp_distance'
assert limits_param == 'chirp_distance'
found_dchirp = chirp_distance(found_d, found_mchirp)
missed_dchirp = chirp_distance(missed_d, missed_mchirp)
# treat chirp distances in MC volume estimate as physical distances
return volume_montecarlo(found_dchirp, missed_dchirp, found_mchirp,
missed_mchirp, 'distance', distribution,
'distance', min_param, max_param)
def volume_binned_pylal(f_dist, m_dist, bins=15):
""" Compute the sensitive volume using a distance binned efficiency estimate
Parameters
-----------
f_dist: numpy.ndarray
The distances of found injections
m_dist: numpy.ndarray
The distances of missed injections
Returns
--------
volume: float
Volume estimate
volume_error: float
The standard error in the volume
"""
def sims_to_bin(sim):
return (sim, 0)
total = numpy.concatenate([f_dist, m_dist])
ndbins = bin_utils.NDBins([bin_utils.LinearBins(min(total), max(total), bins),
bin_utils.LinearBins(0., 1, 1)])
vol, verr = compute_search_volume_in_bins(f_dist, total, ndbins, sims_to_bin)
return vol.array[0], verr.array[0]
def volume_shell(f_dist, m_dist):
""" Compute the sensitive volume using sum over spherical shells.
Parameters
-----------
f_dist: numpy.ndarray
The distances of found injections
m_dist: numpy.ndarray
The distances of missed injections
Returns
--------
volume: float
Volume estimate
volume_error: float
The standard error in the volume
"""
f_dist.sort()
m_dist.sort()
distances = numpy.concatenate([f_dist, m_dist])
dist_sorting = distances.argsort()
distances = distances[dist_sorting]
low = 0
vol = 0
vol_err = 0
for i in range(len(distances)):
if i == len(distances) - 1:
break
high = (distances[i+1] + distances[i]) / 2
bin_width = high - low
if dist_sorting[i] < len(f_dist):
vol += 4 * numpy.pi * distances[i]**2.0 * bin_width
vol_err += (4 * numpy.pi * distances[i]**2.0 * bin_width)**2.0
low = high
vol_err = vol_err ** 0.5
return vol, vol_err
| 11,609
| 35.624606
| 83
|
py
|
pycbc
|
pycbc-master/pycbc/bin_utils.py
|
from bisect import bisect_right
try:
from fpconst import PosInf, NegInf
except ImportError:
# fpconst is not part of the standard library and might not be available
PosInf = float("+inf")
NegInf = float("-inf")
import numpy
import math
class Bins(object):
"""
Parent class for 1-dimensional binnings.
Not intended to be used directly, but to be subclassed for use in real
bins classes.
"""
def __init__(self, minv, maxv, n):
"""
Initialize a Bins instance. The three arguments are the
minimum and maximum of the values spanned by the bins, and
the number of bins to place between them. Subclasses may
require additional arguments, or different arguments
altogether.
"""
# convenience code to do some common initialization and
# input checking
if not isinstance(n, int):
raise TypeError(n)
if n < 1:
raise ValueError(n)
if maxv <= minv:
raise ValueError((minv, maxv))
self.minv = minv
self.maxv = maxv
self.n = n
def __len__(self):
return self.n
def __getitem__(self, x):
"""
Convert a co-ordinate to a bin index. The co-ordinate can
be a single value, or a Python slice instance describing a
range of values. If a single value is given, it is mapped
to the bin index corresponding to that value. If a slice
is given, it is converted to a slice whose lower bound is
the index of the bin in which the slice's lower bound
falls, and whose upper bound is 1 greater than the index of
the bin in which the slice's upper bound falls. Steps are
not supported in slices.
"""
if isinstance(x, slice):
if x.step is not None:
raise NotImplementedError("step not supported: %s" % repr(x))
return slice(self[x.start] if x.start is not None
else 0, self[x.stop] + 1 if x.stop is not None
else len(self))
raise NotImplementedError
def __iter__(self):
"""
If __iter__ does not exist, Python uses __getitem__ with
range(0) as input to define iteration. This is nonsensical
for bin objects, so explicitly unsupport iteration.
"""
raise NotImplementedError
def lower(self):
"""
Return an array containing the locations of the lower
boundaries of the bins.
"""
raise NotImplementedError
def centres(self):
"""
Return an array containing the locations of the bin
centres.
"""
raise NotImplementedError
def upper(self):
"""
Return an array containing the locations of the upper
boundaries of the bins.
"""
raise NotImplementedError
class IrregularBins(Bins):
"""
Bins with arbitrary, irregular spacing. We only require strict
monotonicity of the bin boundaries. N boundaries define N-1 bins.
Example:
>>> x = IrregularBins([0.0, 11.0, 15.0, numpy.inf])
>>> len(x)
3
>>> x[1]
0
>>> x[1.5]
0
>>> x[13]
1
>>> x[25]
2
>>> x[4:17]
slice(0, 3, None)
>>> IrregularBins([0.0, 15.0, 11.0])
Traceback (most recent call last):
...
ValueError: non-monotonic boundaries provided
>>> y = IrregularBins([0.0, 11.0, 15.0, numpy.inf])
>>> x == y
True
"""
def __init__(self, boundaries):
"""
Initialize a set of custom bins with the bin boundaries.
This includes all left edges plus the right edge. The
boundaries must be monotonic and there must be at least two
elements.
"""
# check pre-conditions
if len(boundaries) < 2:
raise ValueError("less than two boundaries provided")
boundaries = tuple(boundaries)
if any(a > b for a, b in zip(boundaries[:-1], boundaries[1:])):
raise ValueError("non-monotonic boundaries provided")
self.boundaries = boundaries
self.n = len(boundaries) - 1
self.minv = boundaries[0]
self.maxv = boundaries[-1]
def __getitem__(self, x):
if isinstance(x, slice):
return super(IrregularBins, self).__getitem__(x)
if self.minv <= x < self.maxv:
return bisect_right(self.boundaries, x) - 1
# special measure-zero edge case
if x == self.maxv:
return len(self.boundaries) - 2
raise IndexError(x)
def lower(self):
return numpy.array(self.boundaries[:-1])
def upper(self):
return numpy.array(self.boundaries[1:])
def centres(self):
return (self.lower() + self.upper()) / 2.0
class LinearBins(Bins):
"""
Linearly-spaced bins. There are n bins of equal size, the first
bin starts on the lower bound and the last bin ends on the upper
bound inclusively.
Example:
>>> x = LinearBins(1.0, 25.0, 3)
>>> x.lower()
array([ 1., 9., 17.])
>>> x.upper()
array([ 9., 17., 25.])
>>> x.centres()
array([ 5., 13., 21.])
>>> x[1]
0
>>> x[1.5]
0
>>> x[10]
1
>>> x[25]
2
>>> x[0:27]
Traceback (most recent call last):
...
IndexError: 0
>>> x[1:25]
slice(0, 3, None)
>>> x[:25]
slice(0, 3, None)
>>> x[10:16.9]
slice(1, 2, None)
>>> x[10:17]
slice(1, 3, None)
>>> x[10:]
slice(1, 3, None)
"""
def __init__(self, minv, maxv, n):
super(LinearBins, self).__init__(minv, maxv, n)
self.delta = float(maxv - minv) / n
def __getitem__(self, x):
if isinstance(x, slice):
return super(LinearBins, self).__getitem__(x)
if self.minv <= x < self.maxv:
return int(math.floor((x - self.minv) / self.delta))
if x == self.maxv:
# special "measure zero" corner case
return len(self) - 1
raise IndexError(x)
def lower(self):
return numpy.linspace(self.minv, self.maxv - self.delta, len(self))
def centres(self):
return numpy.linspace(self.minv + self.delta / 2.,
self.maxv - self.delta / 2., len(self))
def upper(self):
return numpy.linspace(self.minv + self.delta, self.maxv, len(self))
class LinearPlusOverflowBins(Bins):
"""
Linearly-spaced bins with overflow at the edges.
There are n-2 bins of equal size. The bin 1 starts on the lower bound and
bin n-2 ends on the upper bound. Bins 0 and n-1 are overflow going from
-infinity to the lower bound and from the upper bound to +infinity
respectively. Must have n >= 3.
Example:
>>> x = LinearPlusOverflowBins(1.0, 25.0, 5)
>>> x.centres()
array([-inf, 5., 13., 21., inf])
>>> x.lower()
array([-inf, 1., 9., 17., 25.])
>>> x.upper()
array([ 1., 9., 17., 25., inf])
>>> x[float("-inf")]
0
>>> x[0]
0
>>> x[1]
1
>>> x[10]
2
>>> x[24.99999999]
3
>>> x[25]
4
>>> x[100]
4
>>> x[float("+inf")]
4
>>> x[float("-inf"):9]
slice(0, 3, None)
>>> x[9:float("+inf")]
slice(2, 5, None)
"""
def __init__(self, minv, maxv, n):
if n < 3:
raise ValueError("n must be >= 3")
super(LinearPlusOverflowBins, self).__init__(minv, maxv, n)
self.delta = float(maxv - minv) / (n - 2)
def __getitem__(self, x):
if isinstance(x, slice):
return super(LinearPlusOverflowBins, self).__getitem__(x)
if self.minv <= x < self.maxv:
return int(math.floor((x - self.minv) / self.delta)) + 1
if x >= self.maxv:
# +infinity overflow bin
return len(self) - 1
if x < self.minv:
# -infinity overflow bin
return 0
raise IndexError(x)
def lower(self):
return numpy.concatenate(
(numpy.array([NegInf]),
self.minv + self.delta * numpy.arange(len(self) - 2),
numpy.array([self.maxv]))
)
def centres(self):
return numpy.concatenate(
(numpy.array([NegInf]),
self.minv + self.delta * (numpy.arange(len(self) - 2) + 0.5),
numpy.array([PosInf]))
)
def upper(self):
return numpy.concatenate(
(numpy.array([self.minv]),
self.minv + self.delta * (numpy.arange(len(self) - 2) + 1),
numpy.array([PosInf]))
)
class LogarithmicBins(Bins):
"""
Logarithmically-spaced bins.
There are n bins, each of whose upper and lower bounds differ by the same
factor. The first bin starts on the lower bound, and the last bin ends on
the upper bound inclusively.
Example:
>>> x = LogarithmicBins(1.0, 25.0, 3)
>>> x[1]
0
>>> x[5]
1
>>> x[25]
2
"""
def __init__(self, minv, maxv, n):
super(LogarithmicBins, self).__init__(minv, maxv, n)
self.delta = (math.log(maxv) - math.log(minv)) / n
def __getitem__(self, x):
if isinstance(x, slice):
return super(LogarithmicBins, self).__getitem__(x)
if self.minv <= x < self.maxv:
return int(math.floor((math.log(x) - math.log(self.minv)) /
self.delta))
if x == self.maxv:
# special "measure zero" corner case
return len(self) - 1
raise IndexError(x)
def lower(self):
return numpy.exp(
numpy.linspace(math.log(self.minv), math.log(self.maxv) -
self.delta, len(self))
)
def centres(self):
return numpy.exp(
numpy.linspace(math.log(self.minv), math.log(self.maxv) -
self.delta, len(self)) + self.delta / 2.
)
def upper(self):
return numpy.exp(
numpy.linspace(math.log(self.minv) + self.delta,
math.log(self.maxv), len(self))
)
class LogarithmicPlusOverflowBins(Bins):
"""
Logarithmically-spaced bins plus one bin at each end that goes to
zero and positive infinity respectively. There are n-2 bins each
of whose upper and lower bounds differ by the same factor. Bin 1
starts on the lower bound, and bin n-2 ends on the upper bound
inclusively. Bins 0 and n-1 are overflow bins extending from 0 to
the lower bound and from the upper bound to +infinity respectively.
Must have n >= 3.
Example:
>>> x = LogarithmicPlusOverflowBins(1.0, 25.0, 5)
>>> x[0]
0
>>> x[1]
1
>>> x[5]
2
>>> x[24.999]
3
>>> x[25]
4
>>> x[100]
4
>>> x.lower()
array([ 0. , 1. , 2.92401774, 8.54987973, 25. ])
>>> x.upper()
array([ 1. , 2.92401774, 8.54987973, 25. , inf])
>>> x.centres()
array([ 0. , 1.70997595, 5. , 14.62008869, inf])
"""
def __init__(self, minv, maxv, n):
if n < 3:
raise ValueError("n must be >= 3")
super(LogarithmicPlusOverflowBins, self).__init__(minv, maxv, n)
self.delta = (math.log(maxv) - math.log(minv)) / (n - 2)
def __getitem__(self, x):
if isinstance(x, slice):
return super(LogarithmicPlusOverflowBins, self).__getitem__(x)
if self.minv <= x < self.maxv:
return 1 + int(math.floor((math.log(x) - math.log(self.minv)) /
self.delta))
if x >= self.maxv:
# infinity overflow bin
return len(self) - 1
if x < self.minv:
# zero overflow bin
return 0
raise IndexError(x)
def lower(self):
return numpy.concatenate(
(numpy.array([0.]),
numpy.exp(numpy.linspace(math.log(self.minv), math.log(self.maxv),
len(self) - 1))
)
)
def centres(self):
return numpy.concatenate(
(numpy.array([0.]),
numpy.exp(numpy.linspace(math.log(self.minv), math.log(self.maxv) -
self.delta, len(self) - 2) + self.delta / 2.),
numpy.array([PosInf])
)
)
def upper(self):
return numpy.concatenate(
(numpy.exp(numpy.linspace(math.log(self.minv), math.log(self.maxv),
len(self) - 1)),
numpy.array([PosInf])
)
)
class NDBins(tuple):
"""
Multi-dimensional co-ordinate binning. An instance of this object
is used to convert a tuple of co-ordinates into a tuple of bin
indices. This can be used to allow the contents of an array object
to be accessed with real-valued coordinates.
NDBins is a subclass of the tuple builtin, and is initialized with
an iterable of instances of subclasses of Bins. Each Bins subclass
instance describes the binning to apply in the corresponding
co-ordinate direction, and the number of them sets the dimensions
of the binning.
Example:
>>> x = NDBins((LinearBins(1, 25, 3), LogarithmicBins(1, 25, 3)))
>>> x[1, 1]
(0, 0)
>>> x[1.5, 1]
(0, 0)
>>> x[10, 1]
(1, 0)
>>> x[1, 5]
(0, 1)
>>> x[1, 1:5]
(0, slice(0, 2, None))
>>> x.centres()
(array([ 5., 13., 21.]), array([ 1.70997595, 5. , 14.62008869]))
Note that the co-ordinates to be converted must be a tuple, even if
it is only a 1-dimensional co-ordinate.
"""
def __new__(cls, *args):
new = tuple.__new__(cls, *args)
new.minv = tuple(b.minv for b in new)
new.maxv = tuple(b.maxv for b in new)
new.shape = tuple(len(b) for b in new)
return new
def __getitem__(self, coords):
"""
When coords is a tuple, it is interpreted as an
N-dimensional co-ordinate which is converted to an N-tuple
of bin indices by the Bins instances in this object.
Otherwise coords is interpeted as an index into the tuple,
and the corresponding Bins instance is returned.
Example:
>>> x = NDBins((LinearBins(1, 25, 3), LogarithmicBins(1, 25, 3)))
>>> x[1, 1]
(0, 0)
>>> type(x[1])
<class 'pylal.rate.LogarithmicBins'>
When used to convert co-ordinates to bin indices, each
co-ordinate can be anything the corresponding Bins instance
will accept. Note that the co-ordinates to be converted
must be a tuple, even if it is only a 1-dimensional
co-ordinate.
"""
if isinstance(coords, tuple):
if len(coords) != len(self):
raise ValueError("dimension mismatch")
return tuple(map(lambda b, c: b[c], self, coords))
else:
return tuple.__getitem__(self, coords)
def lower(self):
"""
Return a tuple of arrays, where each array contains the
locations of the lower boundaries of the bins in the
corresponding dimension.
"""
return tuple(b.lower() for b in self)
def centres(self):
"""
Return a tuple of arrays, where each array contains the
locations of the bin centres for the corresponding
dimension.
"""
return tuple(b.centres() for b in self)
def upper(self):
"""
Return a tuple of arrays, where each array contains the
locations of the upper boundaries of the bins in the
corresponding dimension.
"""
return tuple(b.upper() for b in self)
class BinnedArray(object):
"""
A convenience wrapper, using the NDBins class to provide access to
the elements of an array object. Technical reasons preclude
providing a subclass of the array object, so the array data is made
available as the "array" attribute of this class.
Examples:
Note that even for 1 dimensional arrays the index must be a tuple.
>>> x = BinnedArray(NDBins((LinearBins(0, 10, 5),)))
>>> x.array
array([ 0., 0., 0., 0., 0.])
>>> x[0,] += 1
>>> x[0.5,] += 1
>>> x.array
array([ 2., 0., 0., 0., 0.])
>>> x.argmax()
(1.0,)
Note the relationship between the binning limits, the bin centres,
and the co-ordinates of the BinnedArray
>>> x = BinnedArray(NDBins((LinearBins(-0.5, 1.5, 2), \
LinearBins(-0.5, 1.5, 2))))
>>> x.bins.centres()
(array([ 0., 1.]), array([ 0., 1.]))
>>> x[0, 0] = 0
>>> x[0, 1] = 1
>>> x[1, 0] = 2
>>> x[1, 1] = 4
>>> x.array
array([[ 0., 1.],
[ 2., 4.]])
>>> x[0, 0]
0.0
>>> x[0, 1]
1.0
>>> x[1, 0]
2.0
>>> x[1, 1]
4.0
>>> x.argmin()
(0.0, 0.0)
>>> x.argmax()
(1.0, 1.0)
"""
def __init__(self, bins, array=None, dtype="double"):
self.bins = bins
if array is None:
self.array = numpy.zeros(bins.shape, dtype=dtype)
else:
if array.shape != bins.shape:
raise ValueError("input array and input bins must have the "
"same shape")
self.array = array
def __getitem__(self, coords):
return self.array[self.bins[coords]]
def __setitem__(self, coords, val):
self.array[self.bins[coords]] = val
def __len__(self):
return len(self.array)
def copy(self):
"""
Return a copy of the BinnedArray. The .bins attribute is
shared with the original.
"""
return type(self)(self.bins, self.array.copy())
def centres(self):
"""
Return a tuple of arrays containing the bin centres for
each dimension.
"""
return self.bins.centres()
def argmin(self):
"""
Return the co-ordinates of the bin centre containing the
minimum value. Same as numpy.argmin(), converting the
indexes to bin co-ordinates.
"""
return tuple(centres[index] for centres, index in
zip(self.centres(), numpy.unravel_index(self.array.argmin(),
self.array.shape)))
def argmax(self):
"""
Return the co-ordinates of the bin centre containing the
maximum value. Same as numpy.argmax(), converting the
indexes to bin co-ordinates.
"""
return tuple(centres[index] for centres, index in
zip(self.centres(), numpy.unravel_index(self.array.argmax(),
self.array.shape)))
def logregularize(self, epsilon=2**-1074):
"""
Find bins <= 0, and set them to epsilon, This has the
effect of allowing the logarithm of the array to be
evaluated without error.
"""
self.array[self.array <= 0] = epsilon
return self
class BinnedRatios(object):
"""
Like BinnedArray, but provides a numerator array and a denominator
array. The incnumerator() method increments a bin in the numerator
by the given weight, and the incdenominator() method increments a
bin in the denominator by the given weight. There are no methods
provided for setting or decrementing either, but the they are
accessible as the numerator and denominator attributes, which are
both BinnedArray objects.
"""
def __init__(self, bins, dtype="double"):
self.numerator = BinnedArray(bins, dtype=dtype)
self.denominator = BinnedArray(bins, dtype=dtype)
def __getitem__(self, coords):
return self.numerator[coords] / self.denominator[coords]
def bins(self):
return self.numerator.bins
def incnumerator(self, coords, weight=1):
"""
Add weight to the numerator bin at coords.
"""
self.numerator[coords] += weight
def incdenominator(self, coords, weight=1):
"""
Add weight to the denominator bin at coords.
"""
self.denominator[coords] += weight
def ratio(self):
"""
Compute and return the array of ratios.
"""
return self.numerator.array / self.denominator.array
def regularize(self):
"""
Find bins in the denominator that are 0, and set them to 1.
Presumably the corresponding bin in the numerator is also
0, so this has the effect of allowing the ratio array to be
evaluated without error, returning zeros in those bins that
have had no weight added to them.
"""
self.denominator.array[self.denominator.array == 0] = 1
return self
def logregularize(self, epsilon=2**-1074):
"""
Find bins in the denominator that are 0, and set them to 1,
while setting the corresponding bin in the numerator to
float epsilon. This has the effect of allowing the
logarithm of the ratio array to be evaluated without error.
"""
self.numerator.array[self.denominator.array == 0] = epsilon
self.denominator.array[self.denominator.array == 0] = 1
return self
def centres(self):
"""
Return a tuple of arrays containing the bin centres for
each dimension.
"""
return self.numerator.bins.centres()
| 21,555
| 29.317862
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/opt.py
|
# Copyright (C) 2015 Joshua Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This module defines optimization flags and determines hardware features that some
other modules and packages may use in addition to some optimized utilities.
"""
import os, sys
import logging
from collections import OrderedDict
# Work around different Python versions to get runtime
# info on hardware cache sizes
_USE_SUBPROCESS = False
HAVE_GETCONF = False
if os.environ.get("LEVEL2_CACHE_SIZE", None) or os.environ.get("NO_GETCONF", None):
HAVE_GETCONF = False
elif sys.platform == 'darwin':
# Mac has getconf, but we can do nothing useful with it
HAVE_GETCONF = False
else:
import subprocess
_USE_SUBPROCESS = True
HAVE_GETCONF = True
if os.environ.get("LEVEL2_CACHE_SIZE", None):
LEVEL2_CACHE_SIZE = int(os.environ["LEVEL2_CACHE_SIZE"])
logging.info("opt: using LEVEL2_CACHE_SIZE %d from environment" % LEVEL2_CACHE_SIZE)
elif HAVE_GETCONF:
if _USE_SUBPROCESS:
def getconf(confvar):
return int(subprocess.check_output(['getconf', confvar]))
else:
def getconf(confvar):
retlist = commands.getstatusoutput('getconf ' + confvar)
return int(retlist[1])
LEVEL1_DCACHE_SIZE = getconf('LEVEL1_DCACHE_SIZE')
LEVEL1_DCACHE_ASSOC = getconf('LEVEL1_DCACHE_ASSOC')
LEVEL1_DCACHE_LINESIZE = getconf('LEVEL1_DCACHE_LINESIZE')
LEVEL2_CACHE_SIZE = getconf('LEVEL2_CACHE_SIZE')
LEVEL2_CACHE_ASSOC = getconf('LEVEL2_CACHE_ASSOC')
LEVEL2_CACHE_LINESIZE = getconf('LEVEL2_CACHE_LINESIZE')
LEVEL3_CACHE_SIZE = getconf('LEVEL3_CACHE_SIZE')
LEVEL3_CACHE_ASSOC = getconf('LEVEL3_CACHE_ASSOC')
LEVEL3_CACHE_LINESIZE = getconf('LEVEL3_CACHE_LINESIZE')
def insert_optimization_option_group(parser):
"""
Adds the options used to specify optimization-specific options.
Parameters
----------
parser : object
OptionParser instance
"""
optimization_group = parser.add_argument_group("Options for selecting "
"optimization-specific settings")
optimization_group.add_argument("--cpu-affinity", help="""
A set of CPUs on which to run, specified in a format suitable
to pass to taskset.""")
optimization_group.add_argument("--cpu-affinity-from-env", help="""
The name of an enivornment variable containing a set
of CPUs on which to run, specified in a format suitable
to pass to taskset.""")
def verify_optimization_options(opt, parser):
"""Parses the CLI options, verifies that they are consistent and
reasonable, and acts on them if they are
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes
parser : object
OptionParser instance.
"""
# Pin to specified CPUs if requested
requested_cpus = None
if opt.cpu_affinity_from_env is not None:
if opt.cpu_affinity is not None:
logging.error("Both --cpu_affinity_from_env and --cpu_affinity specified")
sys.exit(1)
requested_cpus = os.environ.get(opt.cpu_affinity_from_env)
if requested_cpus is None:
logging.error("CPU affinity requested from environment variable %s "
"but this variable is not defined" % opt.cpu_affinity_from_env)
sys.exit(1)
if requested_cpus == '':
logging.error("CPU affinity requested from environment variable %s "
"but this variable is empty" % opt.cpu_affinity_from_env)
sys.exit(1)
if requested_cpus is None:
requested_cpus = opt.cpu_affinity
if requested_cpus is not None:
command = 'taskset -pc %s %d' % (requested_cpus, os.getpid())
retcode = os.system(command)
if retcode != 0:
logging.error('taskset command <%s> failed with return code %d' % \
(command, retcode))
sys.exit(1)
logging.info("Pinned to CPUs %s " % requested_cpus)
class LimitedSizeDict(OrderedDict):
""" Fixed sized dict for FIFO caching"""
def __init__(self, *args, **kwds):
self.size_limit = kwds.pop("size_limit", None)
OrderedDict.__init__(self, *args, **kwds)
self._check_size_limit()
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
self._check_size_limit()
def _check_size_limit(self):
if self.size_limit is not None:
while len(self) > self.size_limit:
self.popitem(last=False)
| 5,385
| 35.890411
| 89
|
py
|
pycbc
|
pycbc-master/pycbc/_version.py
|
# Copyright (C) 2017 Duncan Brown
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains a function to provide an argparse action that reports
extremely verbose version information for PyCBC, lal, and lalsimulation.
"""
import os, sys
import argparse
import inspect
import subprocess
def print_link(library):
err_msg = "Could not execute runtime linker to determine\n" + \
"shared library paths for library:\n " + library + "\n"
FNULL = open(os.devnull, 'w')
try:
link = subprocess.check_output(['ldd', library],
stderr=FNULL)
except OSError:
try:
link = subprocess.check_output(['otool', '-L', library],
stderr=FNULL)
except:
link = err_msg
except:
link = err_msg
return link
class Version(argparse.Action):
""" print the pycbc, lal and lalsimulation versions """
def __init__(self, nargs=0, **kw):
super(Version, self).__init__(nargs=nargs, **kw)
def __call__(self, parser, namespace, values, option_string=None):
import pycbc
version_str="--- PyCBC Version --------------------------\n" + \
pycbc.version.git_verbose_msg + \
"\n\nImported from: " + inspect.getfile(pycbc)
version_str += "\n\n--- LAL Version ----------------------------\n"
try:
import lal.git_version
lal_module = inspect.getfile(lal)
lal_library = os.path.join( os.path.dirname(lal_module),
'_lal.so')
version_str += lal.git_version.verbose_msg + \
"\n\nImported from: " + lal_module + \
"\n\nRuntime libraries:\n" + print_link(lal_library)
except ImportError:
version_str += "\nLAL not installed in environment\n"
version_str += "\n\n--- LALSimulation Version-------------------\n"
try:
import lalsimulation.git_version
lalsimulation_module = inspect.getfile(lalsimulation)
lalsimulation_library = os.path.join( os.path.dirname(lalsimulation_module),
'_lalsimulation.so')
version_str += lalsimulation.git_version.verbose_msg + \
"\n\nImported from: " + lalsimulation_module + \
"\n\nRuntime libraries:\n" + print_link(lalsimulation_library)
except ImportError:
version_str += "\nLALSimulation not installed in environment\n"
print(version_str)
sys.exit(0)
| 3,219
| 37.795181
| 88
|
py
|
pycbc
|
pycbc-master/pycbc/detector.py
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2012 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module provides utilities for calculating detector responses and timing
between observatories.
"""
import os
import numpy as np
import lal
import pycbc.libutils
from pycbc.types import TimeSeries
from pycbc.types.config import InterpolatingConfigParser
from astropy.time import Time
from astropy import constants, coordinates, units
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.units.si import sday, meter
from numpy import cos, sin, pi
# Response functions are modelled after those in lalsuite and as also
# presented in https://arxiv.org/pdf/gr-qc/0008066.pdf
def gmst_accurate(gps_time):
gmst = Time(gps_time, format='gps', scale='utc',
location=(0, 0)).sidereal_time('mean').rad
return gmst
def get_available_detectors():
""" List the available detectors """
dets = list(_ground_detectors.keys())
for pfx, name in get_available_lal_detectors():
dets += [pfx]
return dets
def get_available_lal_detectors():
"""Return list of detectors known in the currently sourced lalsuite.
This function will query lalsuite about which detectors are known to
lalsuite. Detectors are identified by a two character string e.g. 'K1',
but also by a longer, and clearer name, e.g. KAGRA. This function returns
both. As LAL doesn't really expose this functionality we have to make some
assumptions about how this information is stored in LAL. Therefore while
we hope this function will work correctly, it's possible it will need
updating in the future. Better if lal would expose this information
properly.
"""
ld = lal.__dict__
known_lal_names = [j for j in ld.keys() if "DETECTOR_PREFIX" in j]
known_prefixes = [ld[k] for k in known_lal_names]
known_names = [ld[k.replace('PREFIX', 'NAME')] for k in known_lal_names]
return list(zip(known_prefixes, known_names))
_ground_detectors = {}
def add_detector_on_earth(name, longitude, latitude,
yangle=0, xangle=None, height=0,
xlength=4000, ylength=4000):
""" Add a new detector on the earth
Parameters
----------
name: str
two-letter name to identify the detector
longitude: float
Longitude in radians using geodetic coordinates of the detector
latitude: float
Latitude in radians using geodetic coordinates of the detector
yangle: float
Azimuthal angle of the y-arm (angle drawn from pointing north)
xangle: float
Azimuthal angle of the x-arm (angle drawn from point north). If not set
we assume a right angle detector following the right-hand rule.
height: float
The height in meters of the detector above the standard
reference ellipsoidal earth
"""
if xangle is None:
# assume right angle detector if no separate xarm direction given
xangle = yangle + np.pi / 2.0
# Rotation matrix to move detector to correct orientation
rm1 = rotation_matrix(longitude * units.rad, 'z')
rm2 = rotation_matrix((np.pi / 2.0 - latitude) * units.rad, 'y')
rm = np.matmul(rm2, rm1)
# Calculate response in earth centered coordinates
# by rotation of response in coordinates aligned
# with the detector arms
resps = []
vecs = []
for angle in [yangle, xangle]:
a, b = cos(2 * angle), sin(2 * angle)
resp = np.array([[-a, b, 0], [b, a, 0], [0, 0, 0]])
# apply rotation
resp = np.matmul(resp, rm)
resp = np.matmul(rm.T, resp) / 4.0
resps.append(resp)
vec = np.matmul(rm.T, np.array([-np.cos(angle), np.sin(angle), 0]))
vecs.append(vec)
full_resp = (resps[0] - resps[1])
loc = coordinates.EarthLocation.from_geodetic(longitude * units.rad,
latitude * units.rad,
height=height*units.meter)
loc = np.array([loc.x.value, loc.y.value, loc.z.value])
_ground_detectors[name] = {'location': loc,
'response': full_resp,
'xresp': resps[1],
'yresp': resps[0],
'xvec': vecs[1],
'yvec': vecs[0],
'yangle': yangle,
'xangle': xangle,
'height': height,
'xaltitude': 0.0,
'yaltitude': 0.0,
'ylength': ylength,
'xlength': xlength,
}
# Notation matches
# Eq 4 of https://link.aps.org/accepted/10.1103/PhysRevD.96.084004
def single_arm_frequency_response(f, n, arm_length):
""" The relative amplitude factor of the arm response due to
signal delay. This is relevant where the long-wavelength
approximation no longer applies)
"""
n = np.clip(n, -0.999, 0.999)
phase = arm_length / constants.c.value * 2.0j * np.pi * f
a = 1.0 / 4.0 / phase
b = (1 - np.exp(-phase * (1 - n))) / (1 - n)
c = np.exp(-2.0 * phase) * (1 - np.exp(phase * (1 + n))) / (1 + n)
return a * (b - c) * 2.0 # We'll make this relative to the static resp
def load_detector_config(config_files):
""" Add custom detectors from a configuration file
Parameters
----------
config_files: str or list of strs
The config file(s) which specify new detectors
"""
methods = {'earth_normal': (add_detector_on_earth,
['longitude', 'latitude'])}
conf = InterpolatingConfigParser(config_files)
dets = conf.get_subsections('detector')
for det in dets:
kwds = dict(conf.items('detector-{}'.format(det)))
try:
method, arg_names = methods[kwds.pop('method')]
except KeyError:
raise ValueError("Missing or unkown method, "
"options are {}".format(methods.keys()))
for k in kwds:
kwds[k] = float(kwds[k])
try:
args = [kwds.pop(arg) for arg in arg_names]
except KeyError as e:
raise ValueError("missing required detector argument"
" {} are required".format(arg_names))
method(det.upper(), *args, **kwds)
# autoload detector config files
if 'PYCBC_DETECTOR_CONFIG' in os.environ:
load_detector_config(os.environ['PYCBC_DETECTOR_CONFIG'].split(':'))
class Detector(object):
"""A gravitational wave detector
"""
def __init__(self, detector_name, reference_time=1126259462.0):
""" Create class representing a gravitational-wave detector
Parameters
----------
detector_name: str
The two-character detector string, i.e. H1, L1, V1, K1, I1
reference_time: float
Default is time of GW150914. In this case, the earth's rotation
will be estimated from a reference time. If 'None', we will
calculate the time for each gps time requested explicitly
using a slower but higher precision method.
"""
self.name = str(detector_name)
lal_detectors = [pfx for pfx, name in get_available_lal_detectors()]
if detector_name in _ground_detectors:
self.info = _ground_detectors[detector_name]
self.response = self.info['response']
self.location = self.info['location']
elif detector_name in lal_detectors:
lalsim = pycbc.libutils.import_optional('lalsimulation')
self._lal = lalsim.DetectorPrefixToLALDetector(self.name)
self.response = self._lal.response
self.location = self._lal.location
else:
raise ValueError("Unkown detector {}".format(detector_name))
loc = coordinates.EarthLocation(self.location[0],
self.location[1],
self.location[2],
unit=meter)
self.latitude = loc.lat.rad
self.longitude = loc.lon.rad
self.reference_time = reference_time
self.sday = None
self.gmst_reference = None
def set_gmst_reference(self):
if self.reference_time is not None:
self.sday = float(sday.si.scale)
self.gmst_reference = gmst_accurate(self.reference_time)
else:
raise RuntimeError("Can't get accurate sidereal time without GPS "
"reference time!")
def lal(self):
""" Return lal data type detector instance """
if hasattr(self, '_lal'):
return self._lal
else:
import lal
d = lal.FrDetector()
d.vertexLongitudeRadians = self.longitude
d.vertexLatitudeRadians = self.latitude
d.vertexElevation = self.info['height']
d.xArmAzimuthRadians = self.info['xangle']
d.yArmAzimuthRadians = self.info['yangle']
d.xArmAltitudeRadians = self.info['yaltitude']
d.xArmAltitudeRadians = self.info['xaltitude']
# This is somewhat abused by lalsimulation at the moment
# to determine a filter kernel size. We set this only so that
# value gets a similar number of samples as other detectors
# it is used for nothing else
d.yArmMidpoint = 4000.0
x = lal.Detector()
r = lal.CreateDetector(x, d, lal.LALDETECTORTYPE_IFODIFF)
self._lal = r
return r
def gmst_estimate(self, gps_time):
if self.reference_time is None:
return gmst_accurate(gps_time)
if self.gmst_reference is None:
self.set_gmst_reference()
dphase = (gps_time - self.reference_time) / self.sday * (2.0 * np.pi)
gmst = (self.gmst_reference + dphase) % (2.0 * np.pi)
return gmst
def light_travel_time_to_detector(self, det):
""" Return the light travel time from this detector
Parameters
----------
det: Detector
The other detector to determine the light travel time to.
Returns
-------
time: float
The light travel time in seconds
"""
d = self.location - det.location
return float(d.dot(d)**0.5 / constants.c.value)
def antenna_pattern(self, right_ascension, declination, polarization, t_gps,
frequency=0,
polarization_type='tensor'):
"""Return the detector response.
Parameters
----------
right_ascension: float or numpy.ndarray
The right ascension of the source
declination: float or numpy.ndarray
The declination of the source
polarization: float or numpy.ndarray
The polarization angle of the source
polarization_type: string flag: Tensor, Vector or Scalar
The gravitational wave polarizations. Default: 'Tensor'
Returns
-------
fplus(default) or fx or fb : float or numpy.ndarray
The plus or vector-x or breathing polarization factor for this sky location / orientation
fcross(default) or fy or fl : float or numpy.ndarray
The cross or vector-y or longitudnal polarization factor for this sky location / orientation
"""
if isinstance(t_gps, lal.LIGOTimeGPS):
t_gps = float(t_gps)
gha = self.gmst_estimate(t_gps) - right_ascension
cosgha = cos(gha)
singha = sin(gha)
cosdec = cos(declination)
sindec = sin(declination)
cospsi = cos(polarization)
sinpsi = sin(polarization)
if frequency:
e0 = cosdec * cosgha
e1 = cosdec * -singha
e2 = sin(declination)
nhat = np.array([e0, e1, e2], dtype=object)
nx = nhat.dot(self.info['xvec'])
ny = nhat.dot(self.info['yvec'])
rx = single_arm_frequency_response(frequency, nx,
self.info['xlength'])
ry = single_arm_frequency_response(frequency, ny,
self.info['ylength'])
resp = ry * self.info['yresp'] - rx * self.info['xresp']
ttype = np.complex128
else:
resp = self.response
ttype = np.float64
x0 = -cospsi * singha - sinpsi * cosgha * sindec
x1 = -cospsi * cosgha + sinpsi * singha * sindec
x2 = sinpsi * cosdec
x = np.array([x0, x1, x2], dtype=object)
dx = resp.dot(x)
y0 = sinpsi * singha - cospsi * cosgha * sindec
y1 = sinpsi * cosgha + cospsi * singha * sindec
y2 = cospsi * cosdec
y = np.array([y0, y1, y2], dtype=object)
dy = resp.dot(y)
if polarization_type != 'tensor':
z0 = -cosdec * cosgha
z1 = cosdec * singha
z2 = -sindec
z = np.array([z0, z1, z2], dtype=object)
dz = resp.dot(z)
if polarization_type == 'tensor':
if hasattr(dx, 'shape'):
fplus = (x * dx - y * dy).sum(axis=0).astype(ttype)
fcross = (x * dy + y * dx).sum(axis=0).astype(ttype)
else:
fplus = (x * dx - y * dy).sum()
fcross = (x * dy + y * dx).sum()
return fplus, fcross
elif polarization_type == 'vector':
if hasattr(dx, 'shape'):
fx = (z * dx + x * dz).sum(axis=0).astype(ttype)
fy = (z * dy + y * dz).sum(axis=0).astype(ttype)
else:
fx = (z * dx + x * dz).sum()
fy = (z * dy + y * dz).sum()
return fx, fy
elif polarization_type == 'scalar':
if hasattr(dx, 'shape'):
fb = (x * dx + y * dy).sum(axis=0).astype(ttype)
fl = (z * dz).sum(axis=0)
else:
fb = (x * dx + y * dy).sum()
fl = (z * dz).sum()
return fb, fl
def time_delay_from_earth_center(self, right_ascension, declination, t_gps):
"""Return the time delay from the earth center
"""
return self.time_delay_from_location(np.array([0, 0, 0]),
right_ascension,
declination,
t_gps)
def time_delay_from_location(self, other_location, right_ascension,
declination, t_gps):
"""Return the time delay from the given location to detector for
a signal with the given sky location
In other words return `t1 - t2` where `t1` is the
arrival time in this detector and `t2` is the arrival time in the
other location.
Parameters
----------
other_location : numpy.ndarray of coordinates
A detector instance.
right_ascension : float
The right ascension (in rad) of the signal.
declination : float
The declination (in rad) of the signal.
t_gps : float
The GPS time (in s) of the signal.
Returns
-------
float
The arrival time difference between the detectors.
"""
ra_angle = self.gmst_estimate(t_gps) - right_ascension
cosd = cos(declination)
e0 = cosd * cos(ra_angle)
e1 = cosd * -sin(ra_angle)
e2 = sin(declination)
ehat = np.array([e0, e1, e2], dtype=object)
dx = other_location - self.location
return dx.dot(ehat).astype(np.float64) / constants.c.value
def time_delay_from_detector(self, other_detector, right_ascension,
declination, t_gps):
"""Return the time delay from the given to detector for a signal with
the given sky location; i.e. return `t1 - t2` where `t1` is the
arrival time in this detector and `t2` is the arrival time in the
other detector. Note that this would return the same value as
`time_delay_from_earth_center` if `other_detector` was geocentric.
Parameters
----------
other_detector : detector.Detector
A detector instance.
right_ascension : float
The right ascension (in rad) of the signal.
declination : float
The declination (in rad) of the signal.
t_gps : float
The GPS time (in s) of the signal.
Returns
-------
float
The arrival time difference between the detectors.
"""
return self.time_delay_from_location(other_detector.location,
right_ascension,
declination,
t_gps)
def project_wave(self, hp, hc, ra, dec, polarization,
method='lal',
reference_time=None):
"""Return the strain of a waveform as measured by the detector.
Apply the time shift for the given detector relative to the assumed
geocentric frame and apply the antenna patterns to the plus and cross
polarizations.
Parameters
----------
hp: pycbc.types.TimeSeries
Plus polarization of the GW
hc: pycbc.types.TimeSeries
Cross polarization of the GW
ra: float
Right ascension of source location
dec: float
Declination of source location
polarization: float
Polarization angle of the source
method: {'lal', 'constant', 'vary_polarization'}
The method to use for projecting the polarizations into the
detector frame. Default is 'lal'.
reference_time: float, Optional
The time to use as, a reference for some methods of projection.
Used by 'constant' and 'vary_polarization' methods. Uses average
time if not provided.
"""
# The robust and most fefature rich method which includes
# time changing antenna patterns and doppler shifts due to the
# earth rotation and orbit
if method == 'lal':
import lalsimulation
h_lal = lalsimulation.SimDetectorStrainREAL8TimeSeries(
hp.astype(np.float64).lal(), hc.astype(np.float64).lal(),
ra, dec, polarization, self.lal())
ts = TimeSeries(
h_lal.data.data, delta_t=h_lal.deltaT, epoch=h_lal.epoch,
dtype=np.float64, copy=False)
# 'constant' assume fixed orientation relative to source over the
# duration of the signal, accurate for short duration signals
# 'fixed_polarization' applies only time changing orientation
# but no doppler corrections
elif method in ['constant', 'vary_polarization']:
if reference_time is not None:
rtime = reference_time
else:
# In many cases, one should set the reference time if using
# this method as we don't know where the signal is within
# the given time series. If not provided, we'll choose
# the midpoint time.
rtime = (float(hp.end_time) + float(hp.start_time)) / 2.0
if method == 'constant':
time = rtime
elif method == 'vary_polarization':
if (not isinstance(hp, TimeSeries) or
not isinstance(hc, TimeSeries)):
raise TypeError('Waveform polarizations must be given'
' as time series for this method')
# this is more granular than needed, may be optimized later
# assume earth rotation in ~30 ms needed for earth ceneter
# to detector is completely negligible.
time = hp.sample_times.numpy()
fp, fc = self.antenna_pattern(ra, dec, polarization, time)
dt = self.time_delay_from_earth_center(ra, dec, rtime)
ts = fp * hp + fc * hc
ts.start_time = float(ts.start_time) + dt
# add in only the correction for the time variance in the polarization
# due to the earth's rotation, no doppler correction applied
else:
raise ValueError("Unkown projection method {}".format(method))
return ts
def optimal_orientation(self, t_gps):
"""Return the optimal orientation in right ascension and declination
for a given GPS time.
Parameters
----------
t_gps: float
Time in gps seconds
Returns
-------
ra: float
Right ascension that is optimally oriented for the detector
dec: float
Declination that is optimally oriented for the detector
"""
ra = self.longitude + (self.gmst_estimate(t_gps) % (2.0*np.pi))
dec = self.latitude
return ra, dec
def get_icrs_pos(self):
""" Transforms GCRS frame to ICRS frame
Returns
----------
loc: numpy.ndarray shape (3,1) units: AU
ICRS coordinates in cartesian system
"""
loc = self.location
loc = coordinates.SkyCoord(x=loc[0], y=loc[1], z=loc[2], unit=units.m,
frame='gcrs', representation_type='cartesian').transform_to('icrs')
loc.representation_type = 'cartesian'
conv = np.float32(((loc.x.unit/units.AU).decompose()).to_string())
loc = np.array([np.float32(loc.x), np.float32(loc.y),
np.float32(loc.z)])*conv
return loc
def effective_distance(self, distance, ra, dec, pol, time, inclination):
""" Distance scaled to account for amplitude factors
The effective distance of the source. This scales the distance so that
the amplitude is equal to a source which is optimally oriented with
respect to the detector. For fixed detector-frame intrinsic parameters
this is a measure of the expected signal strength.
Parameters
----------
distance: float
Source luminosity distance in megaparsecs
ra: float
The right ascension in radians
dec: float
The declination in radians
pol: float
Polarization angle of the gravitational wave in radians
time: float
GPS time in seconds
inclination:
The inclination of the binary's orbital plane
Returns
-------
eff_dist: float
The effective distance of the source
"""
fp, fc = self.antenna_pattern(ra, dec, pol, time)
ic = np.cos(inclination)
ip = 0.5 * (1. + ic * ic)
scale = ((fp * ip) ** 2.0 + (fc * ic) ** 2.0) ** 0.5
return distance / scale
def overhead_antenna_pattern(right_ascension, declination, polarization):
"""Return the antenna pattern factors F+ and Fx as a function of sky
location and polarization angle for a hypothetical interferometer located
at the north pole. Angles are in radians. Declinations of ±π/2 correspond
to the normal to the detector plane (i.e. overhead and underneath) while
the point with zero right ascension and declination is the direction
of one of the interferometer arms.
Parameters
----------
right_ascension: float
declination: float
polarization: float
Returns
-------
f_plus: float
f_cros: float
"""
# convert from declination coordinate to polar (angle dropped from north axis)
theta = np.pi / 2.0 - declination
f_plus = - (1.0/2.0) * (1.0 + cos(theta)*cos(theta)) * \
cos (2.0 * right_ascension) * cos (2.0 * polarization) - \
cos(theta) * sin(2.0*right_ascension) * sin (2.0 * polarization)
f_cross = (1.0/2.0) * (1.0 + cos(theta)*cos(theta)) * \
cos (2.0 * right_ascension) * sin (2.0* polarization) - \
cos(theta) * sin(2.0*right_ascension) * cos (2.0 * polarization)
return f_plus, f_cross
""" LISA class """
class LISA(object):
"""For LISA detector
"""
def __init__(self):
None
def get_pos(self, ref_time):
"""Return the position of LISA detector for a given reference time
Parameters
----------
ref_time : numpy.ScalarType
Returns
-------
location : numpy.ndarray of shape (3,3)
Returns the position of all 3 sattelites with each row
correspoding to a single axis.
"""
ref_time = Time(val=ref_time, format='gps', scale='utc').jyear
n = np.array(range(1, 4))
kappa, _lambda_ = 0, 0
alpha = 2. * np.pi * ref_time/1 + kappa
beta_n = (n - 1) * 2.0 * pi / 3.0 + _lambda_
a, L = 1., 0.03342293561
e = L/(2. * a * np.sqrt(3))
x = a*cos(alpha) + a*e*(sin(alpha)*cos(alpha)*sin(beta_n) - (1 + sin(alpha)**2)*cos(beta_n))
y = a*sin(alpha) + a*e*(sin(alpha)*cos(alpha)*cos(beta_n) - (1 + cos(alpha)**2)*sin(beta_n))
z = -np.sqrt(3)*a*e*cos(alpha - beta_n)
self.location = np.array([x, y, z])
return self.location
def get_gcrs_pos(self, location):
""" Transforms ICRS frame to GCRS frame
Parameters
----------
loc : numpy.ndarray shape (3,1) units: AU
Cartesian Coordinates of the location
in ICRS frame
Returns
----------
loc : numpy.ndarray shape (3,1) units: meters
GCRS coordinates in cartesian system
"""
loc = location
loc = coordinates.SkyCoord(x=loc[0], y=loc[1], z=loc[2], unit=units.AU,
frame='icrs', representation_type='cartesian').transform_to('gcrs')
loc.representation_type = 'cartesian'
conv = np.float32(((loc.x.unit/units.m).decompose()).to_string())
loc = np.array([np.float32(loc.x), np.float32(loc.y),
np.float32(loc.z)])*conv
return loc
def time_delay_from_location(self, other_location, right_ascension,
declination, t_gps):
"""Return the time delay from the LISA detector to detector for
a signal with the given sky location. In other words return
`t1 - t2` where `t1` is the arrival time in this detector and
`t2` is the arrival time in the other location. Units(AU)
Parameters
----------
other_location : numpy.ndarray of coordinates in ICRS frame
A detector instance.
right_ascension : float
The right ascension (in rad) of the signal.
declination : float
The declination (in rad) of the signal.
t_gps : float
The GPS time (in s) of the signal.
Returns
-------
numpy.ndarray
The arrival time difference between the detectors.
"""
dx = self.location - other_location
cosd = cos(declination)
e0 = cosd * cos(right_ascension)
e1 = cosd * -sin(right_ascension)
e2 = sin(declination)
ehat = np.array([e0, e1, e2])
return dx.dot(ehat) / constants.c.value
def time_delay_from_detector(self, det, right_ascension,
declination, t_gps):
"""Return the time delay from the LISA detector for a signal with
the given sky location in ICRS frame; i.e. return `t1 - t2` where
`t1` is the arrival time in this detector and `t2` is the arrival
time in the other detector.
Parameters
----------
other_detector : detector.Detector
A detector instance.
right_ascension : float
The right ascension (in rad) of the signal.
declination : float
The declination (in rad) of the signal.
t_gps : float
The GPS time (in s) of the signal.
Returns
-------
numpy.ndarray
The arrival time difference between the detectors.
"""
loc = Detector(det, t_gps).get_icrs_pos()
return self.time_delay_from_location(loc, right_ascension,
declination, t_gps)
def time_delay_from_earth_center(self, right_ascension, declination, t_gps):
"""Return the time delay from the earth center in ICRS frame
"""
t_gps = Time(val=t_gps, format='gps', scale='utc')
earth = coordinates.get_body('earth', t_gps,
location=None).transform_to('icrs')
earth.representation_type = 'cartesian'
return self.time_delay_from_location(
np.array([np.float32(earth.x), np.float32(earth.y),
np.float32(earth.z)]), right_ascension,
declination, t_gps)
def ppdets(ifos, separator=', '):
"""Pretty-print a list (or set) of detectors: return a string listing
the given detectors alphabetically and separated by the given string
(comma by default).
"""
if ifos:
return separator.join(sorted(ifos))
return 'no detectors'
| 30,528
| 37.989783
| 104
|
py
|
pycbc
|
pycbc-master/pycbc/coordinates.py
|
# Copyright (C) 2016 Christopher M. Biwer
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Coordinate transformations.
"""
import numpy
def cartesian_to_spherical_rho(x, y, z):
""" Calculates the magnitude in spherical coordinates from Cartesian
coordinates.
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
z : {numpy.array, float}
Z-coordinate.
Returns
-------
rho : {numpy.array, float}
The radial amplitude.
"""
return numpy.sqrt(x**2 + y**2 + z**2)
def cartesian_to_spherical_azimuthal(x, y):
""" Calculates the azimuthal angle in spherical coordinates from Cartesian
coordinates. The azimuthal angle is in [0,2*pi].
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
Returns
-------
phi : {numpy.array, float}
The azimuthal angle.
"""
y = float(y) if isinstance(y, int) else y
phi = numpy.arctan2(y, x)
return phi % (2 * numpy.pi)
def cartesian_to_spherical_polar(x, y, z):
""" Calculates the polar angle in spherical coordinates from Cartesian
coordinates. The polar angle is in [0,pi].
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
z : {numpy.array, float}
Z-coordinate.
Returns
-------
theta : {numpy.array, float}
The polar angle.
"""
rho = cartesian_to_spherical_rho(x, y, z)
if numpy.isscalar(rho):
return numpy.arccos(z / rho) if rho else 0.0
else:
return numpy.arccos(numpy.divide(z, rho, out=numpy.ones_like(z),
where=rho != 0))
def cartesian_to_spherical(x, y, z):
""" Maps cartesian coordinates (x,y,z) to spherical coordinates
(rho,phi,theta) where phi is in [0,2*pi] and theta is in [0,pi].
Parameters
----------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
z : {numpy.array, float}
Z-coordinate.
Returns
-------
rho : {numpy.array, float}
The radial amplitude.
phi : {numpy.array, float}
The azimuthal angle.
theta : {numpy.array, float}
The polar angle.
"""
rho = cartesian_to_spherical_rho(x, y, z)
phi = cartesian_to_spherical_azimuthal(x, y)
theta = cartesian_to_spherical_polar(x, y, z)
return rho, phi, theta
def spherical_to_cartesian(rho, phi, theta):
""" Maps spherical coordinates (rho,phi,theta) to cartesian coordinates
(x,y,z) where phi is in [0,2*pi] and theta is in [0,pi].
Parameters
----------
rho : {numpy.array, float}
The radial amplitude.
phi : {numpy.array, float}
The azimuthal angle.
theta : {numpy.array, float}
The polar angle.
Returns
-------
x : {numpy.array, float}
X-coordinate.
y : {numpy.array, float}
Y-coordinate.
z : {numpy.array, float}
Z-coordinate.
"""
x = rho * numpy.cos(phi) * numpy.sin(theta)
y = rho * numpy.sin(phi) * numpy.sin(theta)
z = rho * numpy.cos(theta)
return x, y, z
__all__ = ['cartesian_to_spherical_rho', 'cartesian_to_spherical_azimuthal',
'cartesian_to_spherical_polar', 'cartesian_to_spherical',
'spherical_to_cartesian',
]
| 4,141
| 26.986486
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/__init__.py
|
# Copyright (C) 2012 Alex Nitz, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""PyCBC contains a toolkit for CBC gravitational wave analysis
"""
import subprocess, os, sys, signal, warnings
# Filter annoying Cython warnings that serve no good purpose.
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import logging
import random
import string
from datetime import datetime as dt
try:
# This will fail when pycbc is imported during the build process,
# before version.py has been generated.
from .version import git_hash
from .version import version as pycbc_version
except:
git_hash = 'none'
pycbc_version = 'none'
__version__ = pycbc_version
class LogFormatter(logging.Formatter):
"""
Format the logging appropriately
This will return the log time in the ISO 6801 standard,
but with millisecond precision
https://en.wikipedia.org/wiki/ISO_8601
e.g. 2022-11-18T09:53:01.554+00:00
"""
converter = dt.fromtimestamp
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created).astimezone()
t = ct.strftime("%Y-%m-%dT%H:%M:%S")
s = f"{t}.{int(record.msecs):03d}"
timezone = ct.strftime('%z')
timezone_colon = f"{timezone[:-2]}:{timezone[-2:]}"
s += timezone_colon
return s
def init_logging(verbose=False, format='%(asctime)s %(message)s'):
"""Common utility for setting up logging in PyCBC.
Installs a signal handler such that verbosity can be activated at
run-time by sending a SIGUSR1 to the process.
Parameters
----------
verbose : bool or int, optional
What level to set the verbosity level to. Accepts either a boolean
or an integer representing the level to set. If True/False will set to
``logging.INFO``/``logging.WARN``. For higher logging levels, pass
an integer representing the level to set (see the ``logging`` module
for details). Default is ``False`` (``logging.WARN``).
format : str, optional
The format to use for logging messages.
"""
def sig_handler(signum, frame):
logger = logging.getLogger()
log_level = logger.level
if log_level == logging.DEBUG:
log_level = logging.WARN
else:
log_level = logging.DEBUG
logging.warn('Got signal %d, setting log level to %d',
signum, log_level)
logger.setLevel(log_level)
signal.signal(signal.SIGUSR1, sig_handler)
if not verbose:
initial_level = logging.WARN
elif int(verbose) == 1:
initial_level = logging.INFO
else:
initial_level = int(verbose)
logger = logging.getLogger()
logger.setLevel(initial_level)
sh = logging.StreamHandler()
logger.addHandler(sh)
sh.setFormatter(LogFormatter(fmt=format))
def makedir(path):
"""
Make the analysis directory path and any parent directories that don't
already exist. Will do nothing if path already exists.
"""
if path is not None and not os.path.exists(path):
os.makedirs(path)
# PyCBC-Specific Constants
# Set the value we want any aligned memory calls to use
# N.B.: *Not* all pycbc memory will be aligned to multiples
# of this value
PYCBC_ALIGNMENT = 32
# Dynamic range factor: a large constant for rescaling
# GW strains. This is 2**69 rounded to 17 sig.fig.
DYN_RANGE_FAC = 5.9029581035870565e+20
# String used to separate parameters in configuration file section headers.
# This is used by the distributions and transforms modules
VARARGS_DELIM = '+'
# Check for optional components of the PyCBC Package
try:
# This is a crude check to make sure that the driver is installed
try:
loaded_modules = subprocess.Popen(['lsmod'], stdout=subprocess.PIPE).communicate()[0]
loaded_modules = loaded_modules.decode()
if 'nvidia' not in loaded_modules:
raise ImportError("nvidia driver may not be installed correctly")
except OSError:
pass
# Check that pycuda is installed and can talk to the driver
import pycuda.driver as _pycudadrv
HAVE_CUDA=True
except ImportError:
HAVE_CUDA=False
# Check for MKL capability
try:
import pycbc.fft.mkl
HAVE_MKL=True
except (ImportError, OSError):
HAVE_MKL=False
# Check for openmp suppport, currently we pressume it exists, unless on
# platforms (mac) that are silly and don't use the standard gcc.
if sys.platform == 'darwin':
HAVE_OMP = False
# MacosX after python3.7 switched to 'spawn', however, this does not
# preserve common state information which we have relied on when using
# multiprocessing based pools.
import multiprocessing
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('fork')
else:
HAVE_OMP = True
# https://pynative.com/python-generate-random-string/
def random_string(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def gps_now():
"""Return the current GPS time as a float using Astropy.
"""
from astropy.time import Time
return float(Time.now().gps)
| 6,218
| 31.731579
| 93
|
py
|
pycbc
|
pycbc-master/pycbc/scheme.py
|
# Copyright (C) 2014 Alex Nitz, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides python contexts that set the default behavior for PyCBC
objects.
"""
import os
import pycbc
from functools import wraps
import logging
from .libutils import get_ctypes_library
class _SchemeManager(object):
_single = None
def __init__(self):
if _SchemeManager._single is not None:
raise RuntimeError("SchemeManager is a private class")
_SchemeManager._single= self
self.state= None
self._lock= False
def lock(self):
self._lock= True
def unlock(self):
self._lock= False
def shift_to(self, state):
if self._lock is False:
self.state = state
else:
raise RuntimeError("The state is locked, cannot shift schemes")
# Create the global processing scheme manager
mgr = _SchemeManager()
DefaultScheme = None
default_context = None
class Scheme(object):
"""Context that sets PyCBC objects to use CPU processing. """
_single = None
def __init__(self):
if DefaultScheme is type(self):
return
if Scheme._single is not None:
raise RuntimeError("Only one processing scheme can be used")
Scheme._single = True
def __enter__(self):
mgr.shift_to(self)
mgr.lock()
def __exit__(self, type, value, traceback):
mgr.unlock()
mgr.shift_to(default_context)
def __del__(self):
if Scheme is not None:
Scheme._single = None
_cuda_cleanup_list=[]
def register_clean_cuda(function):
_cuda_cleanup_list.append(function)
def clean_cuda(context):
#Before cuda context is destroyed, all item destructions dependent on cuda
# must take place. This calls all functions that have been registered
# with _register_clean_cuda() in reverse order
#So the last one registered, is the first one cleaned
_cuda_cleanup_list.reverse()
for func in _cuda_cleanup_list:
func()
context.pop()
from pycuda.tools import clear_context_caches
clear_context_caches()
class CUDAScheme(Scheme):
"""Context that sets PyCBC objects to use a CUDA processing scheme. """
def __init__(self, device_num=0):
Scheme.__init__(self)
if not pycbc.HAVE_CUDA:
raise RuntimeError("Install PyCUDA to use CUDA processing")
import pycuda.driver
pycuda.driver.init()
self.device = pycuda.driver.Device(device_num)
self.context = self.device.make_context(flags=pycuda.driver.ctx_flags.SCHED_BLOCKING_SYNC)
import atexit
atexit.register(clean_cuda,self.context)
class CPUScheme(Scheme):
def __init__(self, num_threads=1):
if isinstance(num_threads, int):
self.num_threads=num_threads
elif num_threads == 'env' and "PYCBC_NUM_THREADS" in os.environ:
self.num_threads = int(os.environ["PYCBC_NUM_THREADS"])
else:
import multiprocessing
self.num_threads = multiprocessing.cpu_count()
self._libgomp = None
def __enter__(self):
Scheme.__enter__(self)
try:
self._libgomp = get_ctypes_library("gomp", ['gomp'],
mode=ctypes.RTLD_GLOBAL)
except:
# Should we fail or give a warning if we cannot import
# libgomp? Seems to work even for MKL scheme, but
# not entirely sure why...
pass
os.environ["OMP_NUM_THREADS"] = str(self.num_threads)
if self._libgomp is not None:
self._libgomp.omp_set_num_threads( int(self.num_threads) )
def __exit__(self, type, value, traceback):
os.environ["OMP_NUM_THREADS"] = "1"
if self._libgomp is not None:
self._libgomp.omp_set_num_threads(1)
Scheme.__exit__(self, type, value, traceback)
class MKLScheme(CPUScheme):
def __init__(self, num_threads=1):
CPUScheme.__init__(self, num_threads)
if not pycbc.HAVE_MKL:
raise RuntimeError("Can't find MKL libraries")
class NumpyScheme(CPUScheme):
pass
scheme_prefix = {
CUDAScheme: "cuda",
CPUScheme: "cpu",
MKLScheme: "mkl",
NumpyScheme: "numpy",
}
_scheme_map = {v: k for (k, v) in scheme_prefix.items()}
_default_scheme_prefix = os.getenv("PYCBC_SCHEME", "cpu")
try:
_default_scheme_class = _scheme_map[_default_scheme_prefix]
except KeyError as exc:
raise RuntimeError(
"PYCBC_SCHEME={!r} not recognised, please select one of: {}".format(
_default_scheme_prefix,
", ".join(map(repr, _scheme_map)),
),
)
class DefaultScheme(_default_scheme_class):
pass
default_context = DefaultScheme()
mgr.state = default_context
scheme_prefix[DefaultScheme] = _default_scheme_prefix
def current_prefix():
return scheme_prefix[type(mgr.state)]
_import_cache = {}
def schemed(prefix):
def scheming_function(func):
@wraps(func)
def _scheming_function(*args, **kwds):
try:
return _import_cache[mgr.state][func](*args, **kwds)
except KeyError:
exc_errors = []
for sch in mgr.state.__class__.__mro__[0:-2]:
try:
backend = __import__(prefix + scheme_prefix[sch],
fromlist=[func.__name__])
schemed_fn = getattr(backend, func.__name__)
except (ImportError, AttributeError) as e:
exc_errors += [e]
continue
if mgr.state not in _import_cache:
_import_cache[mgr.state] = {}
_import_cache[mgr.state][func] = schemed_fn
return schemed_fn(*args, **kwds)
err = """Failed to find implementation of (%s)
for %s scheme." % (str(fn), current_prefix())"""
for emsg in exc_errors:
err += print(emsg)
raise RuntimeError(err)
return _scheming_function
return scheming_function
def cpuonly(func):
@wraps(func)
def _cpuonly(*args, **kwds):
if not issubclass(type(mgr.state), CPUScheme):
raise TypeError(fn.__name__ +
" can only be called from a CPU processing scheme.")
else:
return func(*args, **kwds)
return _cpuonly
def insert_processing_option_group(parser):
"""
Adds the options used to choose a processing scheme. This should be used
if your program supports the ability to select the processing scheme.
Parameters
----------
parser : object
OptionParser instance
"""
processing_group = parser.add_argument_group("Options for selecting the"
" processing scheme in this program.")
processing_group.add_argument("--processing-scheme",
help="The choice of processing scheme. "
"Choices are " + str(list(set(scheme_prefix.values()))) +
". (optional for CPU scheme) The number of "
"execution threads "
"can be indicated by cpu:NUM_THREADS, "
"where NUM_THREADS "
"is an integer. The default is a single thread. "
"If the scheme is provided as cpu:env, the number "
"of threads can be provided by the PYCBC_NUM_THREADS "
"environment variable. If the environment variable "
"is not set, the number of threads matches the number "
"of logical cores. ",
default="cpu")
processing_group.add_argument("--processing-device-id",
help="(optional) ID of GPU to use for accelerated "
"processing",
default=0, type=int)
def from_cli(opt):
"""Parses the command line options and returns a precessing scheme.
Parameters
----------
opt: object
Result of parsing the CLI with OptionParser, or any object with
the required attributes.
Returns
-------
ctx: Scheme
Returns the requested processing scheme.
"""
scheme_str = opt.processing_scheme.split(':')
name = scheme_str[0]
if name == "cuda":
logging.info("Running with CUDA support")
ctx = CUDAScheme(opt.processing_device_id)
elif name == "mkl":
if len(scheme_str) > 1:
numt = scheme_str[1]
if numt.isdigit():
numt = int(numt)
ctx = MKLScheme(num_threads=numt)
else:
ctx = MKLScheme()
logging.info("Running with MKL support: %s threads" % ctx.num_threads)
else:
if len(scheme_str) > 1:
numt = scheme_str[1]
if numt.isdigit():
numt = int(numt)
ctx = CPUScheme(num_threads=numt)
else:
ctx = CPUScheme()
logging.info("Running with CPU support: %s threads" % ctx.num_threads)
return ctx
def verify_processing_options(opt, parser):
"""Parses the processing scheme options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
scheme_types = scheme_prefix.values()
if opt.processing_scheme.split(':')[0] not in scheme_types:
parser.error("(%s) is not a valid scheme type.")
class ChooseBySchemeDict(dict):
""" This class represents a dictionary whose purpose is to chose objects
based on their processing scheme. The keys are intended to be processing
schemes.
"""
def __getitem__(self, scheme):
for base in scheme.__mro__[0:-1]:
try:
return dict.__getitem__(self, base)
break
except:
pass
| 11,120
| 32.296407
| 98
|
py
|
pycbc
|
pycbc-master/pycbc/dq.py
|
# Copyright (C) 2018 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" Utilities to query archival instrument status information of
gravitational-wave detectors from public sources and/or dqsegdb.
"""
import logging
import json
import numpy
from ligo.segments import segmentlist, segment
from pycbc.frame.gwosc import get_run
from pycbc.io import get_file
def parse_veto_definer(veto_def_filename, ifos):
""" Parse a veto definer file from the filename and return a dictionary
indexed by ifo and veto definer category level.
Parameters
----------
veto_def_filename: str
The path to the veto definer file
ifos: str
The list of ifos for which we require information from the veto
definer file
Returns
--------
parsed_definition: dict
Returns a dictionary first indexed by ifo, then category level, and
finally a list of veto definitions.
"""
from ligo.lw import table, utils as ligolw_utils
from pycbc.io.ligolw import LIGOLWContentHandler as h
data = {}
for ifo_name in ifos:
data[ifo_name] = {}
data[ifo_name]['CAT_H'] = []
for cat_num in range(1, 5):
data[ifo_name]['CAT_{}'.format(cat_num)] = []
indoc = ligolw_utils.load_filename(veto_def_filename, False,
contenthandler=h)
veto_table = table.Table.get_table(indoc, 'veto_definer')
ifo = veto_table.getColumnByName('ifo')
name = veto_table.getColumnByName('name')
version = numpy.array(veto_table.getColumnByName('version'))
category = numpy.array(veto_table.getColumnByName('category'))
start = numpy.array(veto_table.getColumnByName('start_time'))
end = numpy.array(veto_table.getColumnByName('end_time'))
start_pad = numpy.array(veto_table.getColumnByName('start_pad'))
end_pad = numpy.array(veto_table.getColumnByName('end_pad'))
for i in range(len(veto_table)):
if ifo[i] not in data:
continue
# The veto-definer categories are weird! Hardware injections are stored
# in "3" and numbers above that are bumped up by one (although not
# often used any more). So we remap 3 to H and anything above 3 to
# N-1. 2 and 1 correspond to 2 and 1 (YAY!)
if category[i] > 3:
curr_cat = "CAT_{}".format(category[i]-1)
elif category[i] == 3:
curr_cat = "CAT_H"
else:
curr_cat = "CAT_{}".format(category[i])
veto_info = {'name': name[i],
'version': version[i],
'full_name': name[i]+':'+str(version[i]),
'start': start[i],
'end': end[i],
'start_pad': start_pad[i],
'end_pad': end_pad[i],
}
data[ifo[i]][curr_cat].append(veto_info)
return data
GWOSC_URL = 'https://www.gwosc.org/timeline/segments/json/{}/{}_{}/{}/{}/'
def query_dqsegdb2(detector, flag_name, start_time, end_time, server):
"""Utility function for better error reporting when calling dqsegdb2.
"""
from dqsegdb2.query import query_segments
complete_flag = detector + ':' + flag_name
try:
query_res = query_segments(complete_flag,
int(start_time),
int(end_time),
host=server)
return query_res['active']
except Exception as e:
logging.error('Could not query segment database, check name '
'(%s), times (%d-%d) and server (%s)',
complete_flag, int(start_time), int(end_time),
server)
raise e
def query_flag(ifo, segment_name, start_time, end_time,
source='any', server="https://segments.ligo.org",
veto_definer=None, cache=False):
"""Return the times where the flag is active
Parameters
----------
ifo: string
The interferometer to query (H1, L1).
segment_name: string
The status flag to query from GWOSC.
start_time: int
The starting gps time to begin querying from GWOSC
end_time: int
The end gps time of the query
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
cache: bool
If true cache the query. Default is not to cache
Returns
---------
segments: ligo.segments.segmentlist
List of segments
"""
flag_segments = segmentlist([])
if source in ['GWOSC', 'any']:
# Special cases as the GWOSC convention is backwards from normal
# LIGO / Virgo operation!!!!
if (('_HW_INJ' in segment_name and 'NO' not in segment_name) or
'VETO' in segment_name):
data = query_flag(ifo, 'DATA', start_time, end_time)
if '_HW_INJ' in segment_name:
name = 'NO_' + segment_name
else:
name = segment_name.replace('_VETO', '')
negate = query_flag(ifo, name, start_time, end_time, cache=cache)
return (data - negate).coalesce()
duration = end_time - start_time
try:
url = GWOSC_URL.format(get_run(start_time + duration/2, ifo),
ifo, segment_name,
int(start_time), int(duration))
fname = get_file(url, cache=cache, timeout=10)
data = json.load(open(fname, 'r'))
if 'segments' in data:
flag_segments = data['segments']
except Exception as e:
if source != 'any':
raise ValueError("Unable to find {} segments in GWOSC, check "
"flag name or times".format(segment_name))
return query_flag(ifo, segment_name, start_time, end_time,
source='dqsegdb', server=server,
veto_definer=veto_definer)
elif source == 'dqsegdb':
# The veto definer will allow the use of MACRO names
# These directly correspond to the name in the veto definer file
if veto_definer is not None:
veto_def = parse_veto_definer(veto_definer, [ifo])
# We treat the veto definer name as if it were its own flag and
# process the flags in the veto definer
if veto_definer is not None and segment_name in veto_def[ifo]:
for flag in veto_def[ifo][segment_name]:
partial = segmentlist([])
segs = query_dqsegdb2(ifo, flag['full_name'],
start_time, end_time, server)
# Apply padding to each segment
for rseg in segs:
seg_start = rseg[0] + flag['start_pad']
seg_end = rseg[1] + flag['end_pad']
partial.append(segment(seg_start, seg_end))
# Limit to the veto definer stated valid region of this flag
flag_start = flag['start']
flag_end = flag['end']
# Corner case: if the flag end time is 0 it means 'no limit'
# so use the query end time
if flag_end == 0:
flag_end = int(end_time)
send = segmentlist([segment(flag_start, flag_end)])
flag_segments += (partial.coalesce() & send)
else: # Standard case just query directly
segs = query_dqsegdb2(ifo, segment_name, start_time, end_time,
server)
for rseg in segs:
flag_segments.append(segment(rseg[0], rseg[1]))
# dqsegdb output is not guaranteed to lie entirely within start
# and end times, hence restrict to this range
flag_segments = flag_segments.coalesce() & \
segmentlist([segment(int(start_time), int(end_time))])
else:
raise ValueError("Source must be `dqsegdb`, `GWOSC` or `any`."
" Got {}".format(source))
return segmentlist(flag_segments).coalesce()
def query_cumulative_flags(ifo, segment_names, start_time, end_time,
source='any', server="https://segments.ligo.org",
veto_definer=None,
bounds=None,
padding=None,
override_ifos=None,
cache=False):
"""Return the times where any flag is active
Parameters
----------
ifo: string or dict
The interferometer to query (H1, L1). If a dict, an element for each
flag name must be provided.
segment_name: list of strings
The status flag to query from GWOSC.
start_time: int
The starting gps time to begin querying from GWOSC
end_time: int
The end gps time of the query
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
bounds: dict, Optional
Dict containing start-end tuples keyed by the flag name which
indicate places which should have a distinct time period to be active.
padding: dict, Optional
Dict keyed by the flag name. Each element is a tuple
(start_pad, end_pad) which indicates how to change the segment boundaries.
override_ifos: dict, Optional
A dict keyed by flag_name to override the ifo option on a per flag
basis.
Returns
---------
segments: ligo.segments.segmentlist
List of segments
"""
total_segs = segmentlist([])
for flag_name in segment_names:
ifo_name = ifo
if override_ifos is not None and flag_name in override_ifos:
ifo_name = override_ifos[flag_name]
segs = query_flag(ifo_name, flag_name, start_time, end_time,
source=source, server=server,
veto_definer=veto_definer,
cache=cache)
if padding and flag_name in padding:
s, e = padding[flag_name]
segs2 = segmentlist([])
for seg in segs:
segs2.append(segment(seg[0] + s, seg[1] + e))
segs = segs2
if bounds is not None and flag_name in bounds:
s, e = bounds[flag_name]
valid = segmentlist([segment([s, e])])
segs = (segs & valid).coalesce()
total_segs = (total_segs + segs).coalesce()
return total_segs
def parse_flag_str(flag_str):
""" Parse a dq flag query string
Parameters
----------
flag_str: str
String to be parsed
Returns
-------
flags: list of strings
List of reduced name strings which can be passed to lower level
query commands
signs: dict
Dict of bools indicating if the flag should add positively to the
segmentlist
ifos: dict
Ifo specified for the given flag
bounds: dict
The boundary of a given flag
padding: dict
Any padding that should be applied to the segments for a given flag
"""
flags = flag_str.replace(' ', '').strip().split(',')
signs = {}
ifos = {}
bounds = {}
padding = {}
bflags = []
for flag in flags:
# Check if the flag should add or subtract time
if not (flag[0] == '+' or flag[0] == '-'):
err_msg = "DQ flags must begin with a '+' or a '-' character. "
err_msg += "You provided {}. ".format(flag)
err_msg += "See http://pycbc.org/pycbc/latest/html/workflow/segments.html"
err_msg += " for more information."
raise ValueError(err_msg)
sign = flag[0] == '+'
flag = flag[1:]
ifo = pad = bound = None
# Check for non-default IFO
if len(flag.split(':')[0]) == 2:
ifo = flag.split(':')[0]
flag = flag[3:]
# Check for padding options
if '<' in flag:
popt = flag.split('<')[1].split('>')[0]
spad, epad = popt.split(':')
pad = (float(spad), float(epad))
flag = flag.replace(popt, '').replace('<>', '')
# Check if there are bounds on the flag
if '[' in flag:
bopt = flag.split('[')[1].split(']')[0]
start, end = bopt.split(':')
bound = (int(start), int(end))
flag = flag.replace(bopt, '').replace('[]', '')
if ifo:
ifos[flag] = ifo
if pad:
padding[flag] = pad
if bound:
bounds[flag] = bound
bflags.append(flag)
signs[flag] = sign
return bflags, signs, ifos, bounds, padding
def query_str(ifo, flag_str, start_time, end_time, source='any',
server="https://segments.ligo.org", veto_definer=None):
""" Query for flags based on a special str syntax
Parameters
----------
ifo: str
The ifo to query for (may be overridden in syntax)
flag_str: str
Specification of how to do the query. Ex. +H1:DATA:1<-8,8>[0,100000000]
would return H1 time for the DATA available flag with version 1. It
would then apply an 8 second padding and only return times within
the chosen range 0,1000000000.
start_time: int
The start gps time. May be overridden for individual flags with the
flag str bounds syntax
end_time: int
The end gps time. May be overridden for individual flags with the
flag str bounds syntax
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
Returns
-------
segs: segmentlist
A list of segments corresponding to the flag query string
"""
flags, sign, ifos, bounds, padding = parse_flag_str(flag_str)
up = [f for f in flags if sign[f]]
down = [f for f in flags if not sign[f]]
if len(up) + len(down) != len(flags):
raise ValueError('Not all flags could be parsed, check +/- prefix')
segs = query_cumulative_flags(ifo, up, start_time, end_time,
source=source,
server=server,
veto_definer=veto_definer,
bounds=bounds,
padding=padding,
override_ifos=ifos)
mseg = query_cumulative_flags(ifo, down, start_time, end_time,
source=source,
server=server,
veto_definer=veto_definer,
bounds=bounds,
padding=padding,
override_ifos=ifos)
segs = (segs - mseg).coalesce()
return segs
| 16,660
| 36.609481
| 86
|
py
|
pycbc
|
pycbc-master/pycbc/pool.py
|
""" Tools for creating pools of worker processes
"""
import multiprocessing.pool
import functools
from multiprocessing import TimeoutError, cpu_count
import types
import signal
import atexit
import logging
def is_main_process():
""" Check if this is the main control process and may handle one time tasks
"""
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
return rank == 0
except (ImportError, ValueError, RuntimeError):
return True
# Allow the pool to be interrupted, need to disable the children processes
# from intercepting the keyboard interrupt
def _noint(init, *args):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if init is not None:
return init(*args)
_process_lock = None
_numdone = None
def _lockstep_fcn(values):
""" Wrapper to ensure that all processes execute together """
numrequired, fcn, args = values
with _process_lock:
_numdone.value += 1
# yep this is an ugly busy loop, do something better please
# when we care about the performance of this call and not just the
# guarantee it provides (ok... maybe never)
while 1:
if _numdone.value == numrequired:
return fcn(args)
def _shutdown_pool(p):
p.terminate()
p.join()
class BroadcastPool(multiprocessing.pool.Pool):
""" Multiprocessing pool with a broadcast method
"""
def __init__(self, processes=None, initializer=None, initargs=(), **kwds):
global _process_lock
global _numdone
_process_lock = multiprocessing.Lock()
_numdone = multiprocessing.Value('i', 0)
noint = functools.partial(_noint, initializer)
super(BroadcastPool, self).__init__(processes, noint, initargs, **kwds)
atexit.register(_shutdown_pool, self)
def __len__(self):
return len(self._pool)
def broadcast(self, fcn, args):
""" Do a function call on every worker.
Parameters
----------
fcn: funtion
Function to call.
args: tuple
The arguments for Pool.map
"""
results = self.map(_lockstep_fcn, [(len(self), fcn, args)] * len(self))
_numdone.value = 0
return results
def allmap(self, fcn, args):
""" Do a function call on every worker with different arguments
Parameters
----------
fcn: funtion
Function to call.
args: tuple
The arguments for Pool.map
"""
results = self.map(_lockstep_fcn,
[(len(self), fcn, arg) for arg in args])
_numdone.value = 0
return results
def map(self, func, items, chunksize=None):
""" Catch keyboard interuppts to allow the pool to exit cleanly.
Parameters
----------
func: function
Function to call
items: list of tuples
Arguments to pass
chunksize: int, Optional
Number of calls for each process to handle at once
"""
results = self.map_async(func, items, chunksize)
while True:
try:
return results.get(1800)
except TimeoutError:
pass
except KeyboardInterrupt:
self.terminate()
self.join()
raise KeyboardInterrupt
def _dummy_broadcast(self, f, args):
self.map(f, [args] * self.size)
class SinglePool(object):
def broadcast(self, fcn, args):
return self.map(fcn, [args])
def map(self, f, items):
return [f(a) for a in items]
def use_mpi(require_mpi=False, log=True):
""" Get whether MPI is enabled and if so the current size and rank
"""
use_mpi = False
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if size > 1:
use_mpi = True
if log:
logging.info('Running under mpi with size: %s, rank: %s',
size, rank)
except ImportError as e:
if require_mpi:
print(e)
raise ValueError("Failed to load mpi, ensure mpi4py is installed")
if not use_mpi:
size = rank = 0
return use_mpi, size, rank
def choose_pool(processes, mpi=False):
""" Get processing pool
"""
do_mpi, size, rank = use_mpi(require_mpi=mpi)
if do_mpi:
try:
import schwimmbad
pool = schwimmbad.choose_pool(mpi=do_mpi,
processes=(size - 1))
pool.broadcast = types.MethodType(_dummy_broadcast, pool)
atexit.register(pool.close)
if processes:
logging.info('NOTE: that for MPI process size determined by '
'MPI launch size, not the processes argument')
if do_mpi and not mpi:
logging.info('NOTE: using MPI as this process was launched'
'under MPI')
except ImportError:
raise ValueError("Failed to start up an MPI pool, "
"install mpi4py / schwimmbad")
elif processes == 1:
pool = SinglePool()
else:
if processes == -1:
processes = cpu_count()
pool = BroadcastPool(processes)
pool.size = processes
if size:
pool.size = size
return pool
| 5,462
| 29.35
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/transforms.py
|
# Copyright (C) 2017 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for transforming parameters.
"""
import os
import logging
import numpy
from pycbc import conversions
from pycbc import coordinates
from pycbc import cosmology
from pycbc.io import record
from pycbc.waveform import parameters
from pycbc.boundaries import Bounds
from pycbc import VARARGS_DELIM
from pycbc.pnutils import jframe_to_l0frame
class BaseTransform(object):
"""A base class for transforming between two sets of parameters."""
name = None
inverse = None
_inputs = []
_outputs = []
def __init__(self):
self.inputs = set(self._inputs)
self.outputs = set(self._outputs)
def __call__(self, maps):
return self.transform(maps)
def transform(self, maps):
"""This function transforms from inputs to outputs."""
raise NotImplementedError("Not added.")
def inverse_transform(self, maps):
"""The inverse conversions of transform. This function transforms from
outputs to inputs.
"""
raise NotImplementedError("Not added.")
def jacobian(self, maps):
"""The Jacobian for the inputs to outputs transformation."""
raise NotImplementedError("Jacobian transform not implemented.")
def inverse_jacobian(self, maps):
"""The Jacobian for the outputs to inputs transformation."""
raise NotImplementedError("Jacobian transform not implemented.")
@staticmethod
def format_output(old_maps, new_maps):
"""This function takes the returned dict from `transform` and converts
it to the same datatype as the input.
Parameters
----------
old_maps : {FieldArray, dict}
The mapping object to add new maps to.
new_maps : dict
A dict with key as parameter name and value is numpy.array.
Returns
-------
{FieldArray, dict}
The old_maps object with new keys from new_maps.
"""
# if input is FieldArray then return FieldArray
if isinstance(old_maps, record.FieldArray):
keys = new_maps.keys()
values = [new_maps[key] for key in keys]
for key, vals in zip(keys, values):
try:
old_maps = old_maps.add_fields([vals], [key])
except ValueError:
old_maps[key] = vals
return old_maps
# if input is dict then return dict
elif isinstance(old_maps, dict):
out = old_maps.copy()
out.update(new_maps)
return out
# else error
else:
raise TypeError("Input type must be FieldArray or dict.")
@classmethod
def from_config(cls, cp, section, outputs,
skip_opts=None, additional_opts=None):
"""Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
tag = outputs
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
outputs = set(outputs.split(VARARGS_DELIM))
special_args = ["name"] + skip_opts + list(additional_opts.keys())
# get any extra arguments to pass to init
extra_args = {}
for opt in cp.options("-".join([section, tag])):
if opt in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, opt, tag)
try:
val = float(val)
except ValueError:
pass
# add option
extra_args.update({opt: val})
extra_args.update(additional_opts)
out = cls(**extra_args)
# check that the outputs matches
if outputs - out.outputs != set() or out.outputs - outputs != set():
raise ValueError(
"outputs of class do not match outputs specified " "in section"
)
return out
class CustomTransform(BaseTransform):
"""Allows for any transform to be defined.
Parameters
----------
input_args : (list of) str
The names of the input parameters.
output_args : (list of) str
The names of the output parameters.
transform_functions : dict
Dictionary mapping input args to a string giving a function call;
e.g., ``{'q': 'q_from_mass1_mass2(mass1, mass2)'}``.
jacobian : str, optional
String giving a jacobian function. The function must be in terms of
the input arguments.
Examples
--------
Create a custom transform that converts mass1, mass2 to mtotal, q:
>>> t = transforms.CustomTransform(['mass1', 'mass2'], ['mtotal', 'q'], {'mtotal': 'mass1+mass2', 'q': 'mass1/mass2'}, '(mass1 + mass2) / mass2**2')
Evaluate a pair of masses:
>>> t.transform({'mass1': 10., 'mass2': 5.})
{'mass1': 10.0, 'mass2': 5.0, 'mtotal': 15.0, 'q': 2.0}
The Jacobian for the same pair of masses:
>>> t.jacobian({'mass1': 10., 'mass2': 5.})
0.59999999999999998
"""
name = "custom"
def __init__(self, input_args, output_args, transform_functions,
jacobian=None):
if isinstance(input_args, str):
input_args = [input_args]
if isinstance(output_args, str):
output_args = [output_args]
self.inputs = set(input_args)
self.outputs = set(output_args)
self.transform_functions = transform_functions
self._jacobian = jacobian
# we'll create a scratch FieldArray space to do transforms on
# we'll default to length 1; this will be changed if a map is passed
# with more than one value in it
self._createscratch()
def _createscratch(self, shape=1):
"""Creates a scratch FieldArray to use for transforms."""
self._scratch = record.FieldArray(
shape, dtype=[(p, float) for p in self.inputs]
)
def _copytoscratch(self, maps):
"""Copies the data in maps to the scratch space.
If the maps contain arrays that are not the same shape as the scratch
space, a new scratch space will be created.
"""
try:
for p in self.inputs:
self._scratch[p][:] = maps[p]
except ValueError:
# we'll get a ValueError if the scratch space isn't the same size
# as the maps; in that case, re-create the scratch space with the
# appropriate size and try again
invals = maps[list(self.inputs)[0]]
if isinstance(invals, numpy.ndarray):
shape = invals.shape
else:
shape = len(invals)
self._createscratch(shape)
for p in self.inputs:
self._scratch[p][:] = maps[p]
def _getslice(self, maps):
"""Determines how to slice the scratch for returning values."""
invals = maps[list(self.inputs)[0]]
if not isinstance(invals, (numpy.ndarray, list)):
getslice = 0
else:
getslice = slice(None, None)
return getslice
def transform(self, maps):
"""Applies the transform functions to the given maps object.
Parameters
----------
maps : dict, or FieldArray
Returns
-------
dict or FieldArray
A map object containing the transformed variables, along with the
original variables. The type of the output will be the same as the
input.
"""
if self.transform_functions is None:
raise NotImplementedError("no transform function(s) provided")
# copy values to scratch
self._copytoscratch(maps)
# ensure that we return the same data type in each dict
getslice = self._getslice(maps)
# evaluate the functions
out = {
p: self._scratch[func][getslice]
for p, func in self.transform_functions.items()
}
return self.format_output(maps, out)
def jacobian(self, maps):
if self._jacobian is None:
raise NotImplementedError("no jacobian provided")
# copy values to scratch
self._copytoscratch(maps)
out = self._scratch[self._jacobian]
if isinstance(out, numpy.ndarray):
out = out[self._getslice(maps)]
return out
@classmethod
def from_config(cls, cp, section, outputs):
"""Loads a CustomTransform from the given config file.
Example section:
.. code-block:: ini
[{section}-outvar1+outvar2]
name = custom
inputs = inputvar1, inputvar2
outvar1 = func1(inputs)
outvar2 = func2(inputs)
jacobian = func(inputs)
"""
tag = outputs
outputs = set(outputs.split(VARARGS_DELIM))
inputs = map(str.strip,
cp.get_opt_tag(section, "inputs", tag).split(","))
# get the functions for each output
transform_functions = {}
for var in outputs:
# check if option can be cast as a float
func = cp.get_opt_tag(section, var, tag)
transform_functions[var] = func
s = "-".join([section, tag])
if cp.has_option(s, "jacobian"):
jacobian = cp.get_opt_tag(section, "jacobian", tag)
else:
jacobian = None
return cls(inputs, outputs, transform_functions, jacobian=jacobian)
#
# =============================================================================
#
# Forward Transforms
#
# =============================================================================
#
class MchirpQToMass1Mass2(BaseTransform):
"""Converts chirp mass and mass ratio to component masses."""
name = "mchirp_q_to_mass1_mass2"
def __init__(
self, mass1_param=None, mass2_param=None, mchirp_param=None, q_param=None
):
if mass1_param is None:
mass1_param = parameters.mass1
if mass2_param is None:
mass2_param = parameters.mass2
if mchirp_param is None:
mchirp_param = parameters.mchirp
if q_param is None:
q_param = parameters.q
self.mass1_param = mass1_param
self.mass2_param = mass2_param
self.mchirp_param = mchirp_param
self.q_param = q_param
self._inputs = [self.mchirp_param, self.q_param]
self._outputs = [self.mass1_param, self.mass2_param]
super(MchirpQToMass1Mass2, self).__init__()
def transform(self, maps):
"""This function transforms from chirp mass and mass ratio to component
masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpQToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'q': numpy.array([2.])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'q': array([ 2.])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[self.mass1_param] = conversions.mass1_from_mchirp_q(
maps[self.mchirp_param], maps[self.q_param]
)
out[self.mass2_param] = conversions.mass2_from_mchirp_q(
maps[self.mchirp_param], maps[self.q_param]
)
return self.format_output(maps, out)
def inverse_transform(self, maps):
"""This function transforms from component masses to chirp mass and
mass ratio.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpQToMass1Mass2()
>>> t.inverse_transform({'mass1': numpy.array([16.4]), 'mass2': numpy.array([8.2])})
{'mass1': array([ 16.4]), 'mass2': array([ 8.2]),
'mchirp': array([ 9.97717521]), 'q': 2.0}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
m1 = maps[self.mass1_param]
m2 = maps[self.mass2_param]
out[self.mchirp_param] = conversions.mchirp_from_mass1_mass2(m1, m2)
out[self.q_param] = m1 / m2
return self.format_output(maps, out)
def jacobian(self, maps):
"""Returns the Jacobian for transforming mchirp and q to mass1 and
mass2.
"""
mchirp = maps[self.mchirp_param]
q = maps[self.q_param]
return mchirp * ((1.0 + q) / q ** 3.0) ** (2.0 / 5)
def inverse_jacobian(self, maps):
"""Returns the Jacobian for transforming mass1 and mass2 to
mchirp and q.
"""
m1 = maps[self.mass1_param]
m2 = maps[self.mass2_param]
return conversions.mchirp_from_mass1_mass2(m1, m2) / m2 ** 2.0
class MchirpEtaToMass1Mass2(BaseTransform):
"""Converts chirp mass and symmetric mass ratio to component masses."""
name = "mchirp_eta_to_mass1_mass2"
_inputs = [parameters.mchirp, parameters.eta]
_outputs = [parameters.mass1, parameters.mass2]
def transform(self, maps):
"""This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp], maps[parameters.eta]
)
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp], maps[parameters.eta]
)
return self.format_output(maps, out)
def inverse_transform(self, maps):
"""This function transforms from component masses to chirp mass and
symmetric mass ratio.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpQToMass1Mass2()
>>> t.inverse_transform({'mass1': numpy.array([8.2]), 'mass2': numpy.array([8.2])})
{'mass1': array([ 8.2]), 'mass2': array([ 8.2]),
'mchirp': array([ 9.97717521]), 'eta': 0.25}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
m1 = maps[parameters.mass1]
m2 = maps[parameters.mass2]
out[parameters.mchirp] = conversions.mchirp_from_mass1_mass2(m1, m2)
out[parameters.eta] = conversions.eta_from_mass1_mass2(m1, m2)
return self.format_output(maps, out)
def jacobian(self, maps):
"""Returns the Jacobian for transforming mchirp and eta to mass1 and
mass2.
"""
mchirp = maps[parameters.mchirp]
eta = maps[parameters.eta]
m1 = conversions.mass1_from_mchirp_eta(mchirp, eta)
m2 = conversions.mass2_from_mchirp_eta(mchirp, eta)
return mchirp * (m1 - m2) / (m1 + m2) ** 3
def inverse_jacobian(self, maps):
"""Returns the Jacobian for transforming mass1 and mass2 to
mchirp and eta.
"""
m1 = maps[parameters.mass1]
m2 = maps[parameters.mass2]
mchirp = conversions.mchirp_from_mass1_mass2(m1, m2)
eta = conversions.eta_from_mass1_mass2(m1, m2)
return -1.0 * mchirp / eta ** (6.0 / 5)
class ChirpDistanceToDistance(BaseTransform):
"""Converts chirp distance to luminosity distance, given the chirp mass."""
name = "chirp_distance_to_distance"
_inputs = [parameters.chirp_distance, parameters.mchirp]
_outputs = [parameters.distance]
def __init__(self, ref_mass=1.4):
self.inputs = set(self._inputs)
self.outputs = set(self._outputs)
self.ref_mass = ref_mass
def transform(self, maps):
"""This function transforms from chirp distance to luminosity distance,
given the chirp mass.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy as np
>>> from pycbc import transforms
>>> t = transforms.ChirpDistanceToDistance()
>>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])})
{'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.distance] = conversions.distance_from_chirp_distance_mchirp(
maps[parameters.chirp_distance],
maps[parameters.mchirp],
ref_mass=self.ref_mass,
)
return self.format_output(maps, out)
def inverse_transform(self, maps):
"""This function transforms from luminosity distance to chirp distance,
given the chirp mass.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy as np
>>> from pycbc import transforms
>>> t = transforms.ChirpDistanceToDistance()
>>> t.inverse_transform({'distance': np.array([40.]), 'mchirp': np.array([1.2])})
{'distance': array([ 40.]), 'chirp_distance': array([ 40.52073522]), 'mchirp': array([ 1.2])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.chirp_distance] = conversions.chirp_distance(
maps[parameters.distance], maps[parameters.mchirp], ref_mass=self.ref_mass
)
return self.format_output(maps, out)
def jacobian(self, maps):
"""Returns the Jacobian for transforming chirp distance to
luminosity distance, given the chirp mass.
"""
ref_mass = 1.4
mchirp = maps["mchirp"]
return (2.0 ** (-1.0 / 5) * self.ref_mass / mchirp) ** (-5.0 / 6)
def inverse_jacobian(self, maps):
"""Returns the Jacobian for transforming luminosity distance to
chirp distance, given the chirp mass.
"""
ref_mass = 1.4
mchirp = maps["mchirp"]
return (2.0 ** (-1.0 / 5) * self.ref_mass / mchirp) ** (5.0 / 6)
class AlignTotalSpin(BaseTransform):
"""Converts angles from total angular momentum J frame to orbital angular
momentum L (waveform) frame"""
name = "align_total_spin"
_inputs = [parameters.thetajn, parameters.spin1x, parameters.spin1y,
parameters.spin1z, parameters.spin2x, parameters.spin2y,
parameters.spin2z, parameters.mass1, parameters.mass2,
parameters.f_ref, "phi_ref"]
_outputs = [parameters.inclination, parameters.spin1x, parameters.spin1y,
parameters.spin1z, parameters.spin2x, parameters.spin2y,
parameters.spin2z]
def __init__(self):
self.inputs = set(self._inputs)
self.outputs = set(self._outputs)
super(AlignTotalSpin, self).__init__()
def transform(self, maps):
"""
Rigidly rotate binary so that the total angular momentum has the given
inclination (iota) instead of the orbital angular momentum. Return
the new inclination, s1, and s2. s1 and s2 are dimensionless spin.
Note: the spins are assumed to be given in the frame defined by the
orbital angular momentum.
"""
if isinstance(maps, dict):
maps = record.FieldArray.from_kwargs(**maps)
newfields = [n for n in self._outputs if n not in maps.fieldnames]
newmaps = maps.add_fields([numpy.zeros(len(maps))]*len(newfields),
names=newfields)
for item in newmaps:
if not all(s == 0.0 for s in
[item[parameters.spin1x], item[parameters.spin1y],
item[parameters.spin2x], item[parameters.spin2y]]):
# Calculate the quantities required by jframe_to_l0frame
s1_a, s1_az, s1_pol = coordinates.cartesian_to_spherical(
item[parameters.spin1x], item[parameters.spin1y],
item[parameters.spin1z])
s2_a, s2_az, s2_pol = coordinates.cartesian_to_spherical(
item[parameters.spin2x], item[parameters.spin2y],
item[parameters.spin2z])
out = jframe_to_l0frame(
item[parameters.mass1],
item[parameters.mass2],
item[parameters.f_ref],
phiref=item["phi_ref"],
thetajn=item[parameters.thetajn],
phijl=numpy.pi,
spin1_a=s1_a,
spin2_a=s2_a,
spin1_polar=s1_pol,
spin2_polar=s2_pol,
spin12_deltaphi=s1_az-s2_az
)
for key in out:
item[key] = out[key]
else:
item[parameters.inclination] = item[parameters.thetajn]
return newmaps
class SphericalToCartesian(BaseTransform):
"""Converts spherical coordinates to cartesian.
Parameters
----------
x : str
The name of the x parameter.
y : str
The name of the y parameter.
z : str
The name of the z parameter.
radial : str
The name of the radial parameter.
azimuthal : str
The name of the azimuthal angle parameter.
polar : str
The name of the polar angle parameter.
"""
name = "spherical_to_cartesian"
def __init__(self, x, y, z, radial, azimuthal, polar):
self.x = x
self.y = y
self.z = z
self.radial = radial
self.polar = polar
self.azimuthal = azimuthal
self._inputs = [self.radial, self.azimuthal, self.polar]
self._outputs = [self.x, self.y, self.z]
super(SphericalToCartesian, self).__init__()
def transform(self, maps):
"""This function transforms from spherical to cartesian spins.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.SphericalToCartesian('x', 'y', 'z',
'a', 'phi', 'theta')
>>> t.transform({'a': numpy.array([0.1]), 'phi': numpy.array([0.1]),
'theta': numpy.array([0.1])})
{'a': array([ 0.1]), 'phi': array([ 0.1]), 'theta': array([ 0.1]),
'x': array([ 0.00993347]), 'y': array([ 0.00099667]),
'z': array([ 0.09950042])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
a = self.radial
az = self.azimuthal
po = self.polar
x, y, z = coordinates.spherical_to_cartesian(maps[a], maps[az], maps[po])
out = {self.x: x, self.y: y, self.z: z}
return self.format_output(maps, out)
def inverse_transform(self, maps):
"""This function transforms from cartesian to spherical spins.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
x = self.x
y = self.y
z = self.z
a, az, po = coordinates.cartesian_to_spherical(maps[x], maps[y], maps[z])
out = {self.radial: a, self.azimuthal: az, self.polar: po}
return self.format_output(maps, out)
class SphericalSpin1ToCartesianSpin1(SphericalToCartesian):
"""Converts spherical spin parameters (radial and two angles) to
catesian spin parameters. This class only transforms spsins for the first
component mass.
**Deprecation Warning:** This will be removed in a future update. Use
:py:class:`SphericalToCartesian` with spin-parameter names passed in
instead.
"""
name = "spherical_spin_1_to_cartesian_spin_1"
def __init__(self):
logging.warning(
"Deprecation warning: the {} transform will be "
"removed in a future update. Please use {} instead, "
"passing spin1x, spin1y, spin1z, spin1_a, "
"spin1_azimuthal, spin1_polar as arguments.".format(
self.name, SphericalToCartesian.name
)
)
super(SphericalSpin1ToCartesianSpin1, self).__init__(
"spin1x", "spin1y", "spin1z", "spin1_a",
"spin1_azimuthal", "spin1_polar"
)
class SphericalSpin2ToCartesianSpin2(SphericalToCartesian):
"""Converts spherical spin parameters (radial and two angles) to
catesian spin parameters. This class only transforms spsins for the first
component mass.
**Deprecation Warning:** This will be removed in a future update. Use
:py:class:`SphericalToCartesian` with spin-parameter names passed in
instead.
"""
name = "spherical_spin_2_to_cartesian_spin_2"
def __init__(self):
logging.warning(
"Deprecation warning: the {} transform will be "
"removed in a future update. Please use {} instead, "
"passing spin2x, spin2y, spin2z, spin2_a, "
"spin2_azimuthal, spin2_polar as arguments.".format(
self.name, SphericalToCartesian.name
)
)
super(SphericalSpin2ToCartesianSpin2, self).__init__(
"spin2x", "spin2y", "spin2z",
"spin2_a", "spin2_azimuthal", "spin2_polar"
)
class DistanceToRedshift(BaseTransform):
"""Converts distance to redshift."""
name = "distance_to_redshift"
inverse = None
_inputs = [parameters.distance]
_outputs = [parameters.redshift]
def transform(self, maps):
"""This function transforms from distance to redshift.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.DistanceToRedshift()
>>> t.transform({'distance': numpy.array([1000])})
{'distance': array([1000]), 'redshift': 0.19650987609144363}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {parameters.redshift: cosmology.redshift(maps[parameters.distance])}
return self.format_output(maps, out)
class AlignedMassSpinToCartesianSpin(BaseTransform):
"""Converts mass-weighted spins to cartesian z-axis spins."""
name = "aligned_mass_spin_to_cartesian_spin"
_inputs = [parameters.mass1, parameters.mass2, parameters.chi_eff, "chi_a"]
_outputs = [
parameters.mass1,
parameters.mass2,
parameters.spin1z,
parameters.spin2z,
]
def transform(self, maps):
"""This function transforms from aligned mass-weighted spins to
cartesian spins aligned along the z-axis.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
mass1 = maps[parameters.mass1]
mass2 = maps[parameters.mass2]
out = {}
out[parameters.spin1z] = conversions.spin1z_from_mass1_mass2_chi_eff_chi_a(
mass1, mass2, maps[parameters.chi_eff], maps["chi_a"]
)
out[parameters.spin2z] = conversions.spin2z_from_mass1_mass2_chi_eff_chi_a(
mass1, mass2, maps[parameters.chi_eff], maps["chi_a"]
)
return self.format_output(maps, out)
def inverse_transform(self, maps):
"""This function transforms from component masses and cartesian spins
to mass-weighted spin parameters aligned with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
mass1 = maps[parameters.mass1]
spin1z = maps[parameters.spin1z]
mass2 = maps[parameters.mass2]
spin2z = maps[parameters.spin2z]
out = {
parameters.chi_eff:
conversions.chi_eff(mass1, mass2, spin1z, spin2z),
"chi_a": conversions.chi_a(mass1, mass2, spin1z, spin2z),
}
return self.format_output(maps, out)
class PrecessionMassSpinToCartesianSpin(BaseTransform):
"""Converts mass-weighted spins to cartesian x-y plane spins."""
name = "precession_mass_spin_to_cartesian_spin"
_inputs = [parameters.mass1, parameters.mass2,
"xi1", "xi2", "phi_a", "phi_s"]
_outputs = [
parameters.mass1,
parameters.mass2,
parameters.spin1x,
parameters.spin1y,
parameters.spin2x,
parameters.spin2y,
]
def transform(self, maps):
"""This function transforms from mass-weighted spins to caretsian spins
in the x-y plane.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
# find primary and secondary masses
# since functions in conversions.py map to primary/secondary masses
m_p = conversions.primary_mass(maps["mass1"], maps["mass2"])
m_s = conversions.secondary_mass(maps["mass1"], maps["mass2"])
# find primary and secondary xi
# can re-purpose spin functions for just a generic variable
xi_p = conversions.primary_spin(
maps["mass1"], maps["mass2"], maps["xi1"], maps["xi2"]
)
xi_s = conversions.secondary_spin(
maps["mass1"], maps["mass2"], maps["xi1"], maps["xi2"]
)
# convert using convention of conversions.py that is mass1 > mass2
spinx_p = conversions.spin1x_from_xi1_phi_a_phi_s(
xi_p, maps["phi_a"], maps["phi_s"]
)
spiny_p = conversions.spin1y_from_xi1_phi_a_phi_s(
xi_p, maps["phi_a"], maps["phi_s"]
)
spinx_s = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s(
m_p, m_s, xi_s, maps["phi_a"], maps["phi_s"]
)
spiny_s = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s(
m_p, m_s, xi_s, maps["phi_a"], maps["phi_s"]
)
# map parameters from primary/secondary to indices
out = {}
if isinstance(m_p, numpy.ndarray):
mass1, mass2 = map(numpy.array, [maps["mass1"], maps["mass2"]])
mask_mass1_gte_mass2 = mass1 >= mass2
mask_mass1_lt_mass2 = mass1 < mass2
out[parameters.spin1x] = numpy.concatenate(
(spinx_p[mask_mass1_gte_mass2], spinx_s[mask_mass1_lt_mass2])
)
out[parameters.spin1y] = numpy.concatenate(
(spiny_p[mask_mass1_gte_mass2], spiny_s[mask_mass1_lt_mass2])
)
out[parameters.spin2x] = numpy.concatenate(
(spinx_p[mask_mass1_lt_mass2], spinx_s[mask_mass1_gte_mass2])
)
out[parameters.spin2y] = numpy.concatenate(
(spinx_p[mask_mass1_lt_mass2], spinx_s[mask_mass1_gte_mass2])
)
elif maps["mass1"] > maps["mass2"]:
out[parameters.spin1x] = spinx_p
out[parameters.spin1y] = spiny_p
out[parameters.spin2x] = spinx_s
out[parameters.spin2y] = spiny_s
else:
out[parameters.spin1x] = spinx_s
out[parameters.spin1y] = spiny_s
out[parameters.spin2x] = spinx_p
out[parameters.spin2y] = spiny_p
return self.format_output(maps, out)
def inverse_transform(self, maps):
"""This function transforms from component masses and cartesian spins to
mass-weighted spin parameters perpendicular with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
# convert
out = {}
xi1 = conversions.primary_xi(
maps[parameters.mass1],
maps[parameters.mass2],
maps[parameters.spin1x],
maps[parameters.spin1y],
maps[parameters.spin2x],
maps[parameters.spin2y],
)
xi2 = conversions.secondary_xi(
maps[parameters.mass1],
maps[parameters.mass2],
maps[parameters.spin1x],
maps[parameters.spin1y],
maps[parameters.spin2x],
maps[parameters.spin2y],
)
out["phi_a"] = conversions.phi_a(
maps[parameters.mass1],
maps[parameters.mass2],
maps[parameters.spin1x],
maps[parameters.spin1y],
maps[parameters.spin2x],
maps[parameters.spin2y],
)
out["phi_s"] = conversions.phi_s(
maps[parameters.spin1x],
maps[parameters.spin1y],
maps[parameters.spin2x],
maps[parameters.spin2y],
)
# map parameters from primary/secondary to indices
if isinstance(xi1, numpy.ndarray):
mass1, mass2 = map(
numpy.array, [maps[parameters.mass1], maps[parameters.mass2]]
)
mask_mass1_gte_mass2 = mass1 >= mass2
mask_mass1_lt_mass2 = mass1 < mass2
out["xi1"] = numpy.concatenate(
(xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])
)
out["xi2"] = numpy.concatenate(
(xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])
)
elif maps["mass1"] > maps["mass2"]:
out["xi1"] = xi1
out["xi2"] = xi2
else:
out["xi1"] = xi2
out["xi2"] = xi1
return self.format_output(maps, out)
class CartesianSpinToChiP(BaseTransform):
"""Converts cartesian spins to `chi_p`."""
name = "cartesian_spin_to_chi_p"
_inputs = [
parameters.mass1,
parameters.mass2,
parameters.spin1x,
parameters.spin1y,
parameters.spin2x,
parameters.spin2y,
]
_outputs = ["chi_p"]
def transform(self, maps):
"""This function transforms from component masses and caretsian spins
to chi_p.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out["chi_p"] = conversions.chi_p(
maps[parameters.mass1],
maps[parameters.mass2],
maps[parameters.spin1x],
maps[parameters.spin1y],
maps[parameters.spin2x],
maps[parameters.spin2y],
)
return self.format_output(maps, out)
class LambdaFromTOVFile(BaseTransform):
"""Transforms mass values corresponding to Lambda values for a given EOS
interpolating from the mass-Lambda data for that EOS read in from an
external ASCII file.
The interpolation of the mass-Lambda data is a one-dimensional piecewise
linear interpolation. If the ``redshift_mass`` keyword argument is ``True``
(the default), the mass values to be transformed are assumed to be detector
frame masses. In that case, a distance should be provided along with the
mass for transformation to the source frame mass before the Lambda values
are extracted from the interpolation. If the transform is read in from a
config file, an example code block would be:
.. code-block:: ini
[{section}-lambda1]
name = lambda_from_tov_file
mass_param = mass1
lambda_param = lambda1
distance = 40
mass_lambda_file = {filepath}
If this transform is used in a parameter estimation analysis where
distance is a variable parameter, the distance to be used will vary
with each draw. In that case, the example code block will be:
.. code-block:: ini
[{section}-lambda1]
name = lambda_from_tov_file
mass_param = mass1
lambda_param = lambda1
mass_lambda_file = filepath
If your prior is in terms of the source-frame masses (``srcmass``), then
you can shut off the redshifting by adding ``do-not-redshift-mass`` to the
config file. In this case you do not need to worry about a distance.
Example:
.. code-block:: ini
[{section}-lambda1]
name = lambda_from_tov_file
mass_param = srcmass1
lambda_param = lambda1
mass_lambda_file = filepath
do-not-redshift-mass =
Parameters
----------
mass_param : str
The name of the mass parameter to transform.
lambda_param : str
The name of the tidal deformability parameter that mass_param is to
be converted to interpolating from the data in the mass-Lambda file.
mass_lambda_file : str
Path of the mass-Lambda data file. The first column in the data file
should contain mass values, and the second column Lambda values.
distance : float, optional
The distance (in Mpc) of the source. Used to redshift the mass. Needed
if ``redshift_mass`` is True and no distance parameter exists If
None, then a distance must be provided to the transform.
redshift_mass : bool, optional
Redshift the mass parameters when computing the lambdas. Default is
False.
file_columns : list of str, optional
The names and order of columns in the ``mass_lambda_file``. Must
contain at least 'mass' and 'lambda'. If not provided, will assume the
order is ('mass', 'lambda').
"""
name = "lambda_from_tov_file"
def __init__(
self,
mass_param,
lambda_param,
mass_lambda_file,
distance=None,
redshift_mass=True,
file_columns=None,
):
self._mass_lambda_file = mass_lambda_file
self._mass_param = mass_param
self._lambda_param = lambda_param
self.redshift_mass = redshift_mass
self._distance = distance
self._inputs = [mass_param, "distance"]
self._outputs = [lambda_param]
if file_columns is None:
file_columns = ["mass", "lambda"]
dtype = [(fname, float) for fname in file_columns]
data = numpy.loadtxt(self._mass_lambda_file, dtype=dtype)
self._data = data
super(LambdaFromTOVFile, self).__init__()
@property
def mass_param(self):
"""Returns the input mass parameter."""
return self._mass_param
@property
def lambda_param(self):
"""Returns the output lambda parameter."""
return self._lambda_param
@property
def data(self):
return self._data
@property
def mass_data(self):
"""Returns the mass data read from the mass-Lambda data file for
an EOS.
"""
return self._data["mass"]
@property
def lambda_data(self):
"""Returns the Lambda data read from the mass-Lambda data file for
an EOS.
"""
return self._data["lambda"]
@property
def distance(self):
"""Returns the fixed distance to transform mass samples from detector
to source frame if one is specified.
"""
return self._distance
@staticmethod
def lambda_from_tov_data(m_src, mass_data, lambda_data):
"""Returns Lambda corresponding to a given mass interpolating from the
TOV data.
Parameters
----------
m : float
Value of the mass.
mass_data : array
Mass array from the Lambda-M curve of an EOS.
lambda_data : array
Lambda array from the Lambda-M curve of an EOS.
Returns
-------
lambdav : float
The Lambda corresponding to the mass `m` for the EOS considered.
"""
if m_src > mass_data.max():
# assume black hole
lambdav = 0.0
else:
lambdav = numpy.interp(m_src, mass_data, lambda_data)
return lambdav
def transform(self, maps):
"""Computes the transformation of mass to Lambda.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
out : dict or FieldArray
A map between the transformed variable name and value(s), along
with the original variable name and value(s).
"""
m = maps[self._mass_param]
if self.redshift_mass:
if self._distance is not None:
d = self._distance
else:
try:
d = maps["distance"]
except KeyError as e:
logging.warning(
"Either provide distance samples in the "
"list of samples to be transformed, or "
"provide a fixed distance value as input "
"when initializing LambdaFromTOVFile."
)
raise e
shift = 1.0 / (1.0 + cosmology.redshift(abs(d)))
else:
shift = 1.0
out = {
self._lambda_param: self.lambda_from_tov_data(
m * shift, self._data["mass"], self._data["lambda"]
)
}
return self.format_output(maps, out)
@classmethod
def from_config(cls, cp, section, outputs):
# see if we're redshifting masses
if cp.has_option("-".join([section, outputs]), "do-not-redshift-mass"):
additional_opts = {"redshift_mass": False}
skip_opts = ["do-not-redshift-mass"]
else:
additional_opts = None
skip_opts = None
return super(LambdaFromTOVFile, cls).from_config(
cp, section, outputs, skip_opts=skip_opts, additional_opts=additional_opts
)
class LambdaFromMultipleTOVFiles(BaseTransform):
"""Uses multiple equation of states.
Parameters
----------
mass_param : str
The name of the mass parameter to transform.
lambda_param : str
The name of the tidal deformability parameter that mass_param is to
be converted to interpolating from the data in the mass-Lambda file.
mass_lambda_file : str
Path of the mass-Lambda data file. The first column in the data file
should contain mass values, and the second column Lambda values.
distance : float, optional
The distance (in Mpc) of the source. Used to redshift the mass. If
None, then a distance must be provided to the transform.
file_columns : list of str, optional
The names and order of columns in the ``mass_lambda_file``. Must
contain at least 'mass' and 'lambda'. If not provided, will assume the
order is ('radius', 'mass', 'lambda').
"""
name = "lambda_from_multiple_tov_files"
def __init__(
self,
mass_param,
lambda_param,
map_file,
distance=None,
redshift_mass=True,
file_columns=None,
):
self._map_file = map_file
self._mass_param = mass_param
self._lambda_param = lambda_param
self._distance = distance
self.redshift_mass = redshift_mass
self._inputs = [mass_param, "eos", "distance"]
self._outputs = [lambda_param]
# create a dictionary of the EOS files from the map_file
self._eos_files = {}
with open(self._map_file, "r") as fp:
for line in fp:
fname = line.rstrip("\n")
eosidx = int(os.path.basename(fname).split(".")[0])
self._eos_files[eosidx] = os.path.abspath(fname)
# create an eos cache for fast load later
self._eos_cache = {}
if file_columns is None:
file_columns = ("radius", "mass", "lambda")
self._file_columns = file_columns
super(LambdaFromMultipleTOVFiles, self).__init__()
@property
def mass_param(self):
"""Returns the input mass parameter."""
return self._mass_param
@property
def lambda_param(self):
"""Returns the output lambda parameter."""
return self._lambda_param
@property
def map_file(self):
"""Returns the mass data read from the mass-Lambda data file for
an EOS.
"""
return self._map_file
@property
def distance(self):
"""Returns the fixed distance to transform mass samples from detector
to source frame if one is specified.
"""
return self._distance
def get_eos(self, eos_index):
"""Gets the EOS for the given index.
If the index is not in range returns None.
"""
try:
eos = self._eos_cache[eos_index]
except KeyError:
try:
fname = self._eos_files[eos_index]
eos = LambdaFromTOVFile(
mass_param=self._mass_param,
lambda_param=self._lambda_param,
mass_lambda_file=fname,
distance=self._distance,
redshift_mass=self.redshift_mass,
file_columns=self._file_columns,
)
self._eos_cache[eos_index] = eos
except KeyError:
eos = None
return eos
def transform(self, maps):
"""Transforms mass value and eos index into a lambda value"""
m = maps[self._mass_param]
# floor
eos_index = int(maps["eos"])
eos = self.get_eos(eos_index)
if eos is not None:
return eos.transform(maps)
else:
# no eos, just return nan
out = {self._lambda_param: numpy.nan}
return self.format_output(maps, out)
@classmethod
def from_config(cls, cp, section, outputs):
# see if we're redshifting masses
if cp.has_option("-".join([section, outputs]), "do-not-redshift-mass"):
additional_opts = {"redshift_mass": False}
skip_opts = ["do-not-redshift-mass"]
else:
additional_opts = None
skip_opts = None
return super(LambdaFromMultipleTOVFiles, cls).from_config(
cp, section, outputs, skip_opts=skip_opts, additional_opts=additional_opts
)
class Log(BaseTransform):
"""Applies a log transform from an `inputvar` parameter to an `outputvar`
parameter. This is the inverse of the exponent transform.
Parameters
----------
inputvar : str
The name of the parameter to transform.
outputvar : str
The name of the transformed parameter.
"""
name = "log"
def __init__(self, inputvar, outputvar):
self._inputvar = inputvar
self._outputvar = outputvar
self._inputs = [inputvar]
self._outputs = [outputvar]
super(Log, self).__init__()
@property
def inputvar(self):
"""Returns the input parameter."""
return self._inputvar
@property
def outputvar(self):
"""Returns the output parameter."""
return self._outputvar
def transform(self, maps):
r"""Computes :math:`\log(x)`.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
out : dict or FieldArray
A map between the transformed variable name and value(s), along
with the original variable name and value(s).
"""
x = maps[self._inputvar]
out = {self._outputvar: numpy.log(x)}
return self.format_output(maps, out)
def inverse_transform(self, maps):
r"""Computes :math:`y = e^{x}`.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
out : dict or FieldArray
A map between the transformed variable name and value(s), along
with the original variable name and value(s).
"""
y = maps[self._outputvar]
out = {self._inputvar: numpy.exp(y)}
return self.format_output(maps, out)
def jacobian(self, maps):
r"""Computes the Jacobian of :math:`y = \log(x)`.
This is:
.. math::
\frac{\mathrm{d}y}{\mathrm{d}x} = \frac{1}{x}.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
float
The value of the jacobian at the given point(s).
"""
x = maps[self._inputvar]
return 1.0 / x
def inverse_jacobian(self, maps):
r"""Computes the Jacobian of :math:`y = e^{x}`.
This is:
.. math::
\frac{\mathrm{d}y}{\mathrm{d}x} = e^{x}.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
float
The value of the jacobian at the given point(s).
"""
x = maps[self._outputvar]
return numpy.exp(x)
class Logit(BaseTransform):
"""Applies a logit transform from an `inputvar` parameter to an `outputvar`
parameter. This is the inverse of the logistic transform.
Typically, the input of the logit function is assumed to have domain
:math:`\in (0, 1)`. However, the `domain` argument can be used to expand
this to any finite real interval.
Parameters
----------
inputvar : str
The name of the parameter to transform.
outputvar : str
The name of the transformed parameter.
domain : tuple or distributions.bounds.Bounds, optional
The domain of the input parameter. Can be any finite
interval. Default is (0., 1.).
"""
name = "logit"
def __init__(self, inputvar, outputvar, domain=(0.0, 1.0)):
self._inputvar = inputvar
self._outputvar = outputvar
self._inputs = [inputvar]
self._outputs = [outputvar]
self._bounds = Bounds(domain[0], domain[1],
btype_min="open", btype_max="open")
# shortcuts for quick access later
self._a = domain[0]
self._b = domain[1]
super(Logit, self).__init__()
@property
def inputvar(self):
"""Returns the input parameter."""
return self._inputvar
@property
def outputvar(self):
"""Returns the output parameter."""
return self._outputvar
@property
def bounds(self):
"""Returns the domain of the input parameter."""
return self._bounds
@staticmethod
def logit(x, a=0.0, b=1.0):
r"""Computes the logit function with domain :math:`x \in (a, b)`.
This is given by:
.. math::
\mathrm{logit}(x; a, b) = \log\left(\frac{x-a}{b-x}\right).
Note that this is also the inverse of the logistic function with range
:math:`(a, b)`.
Parameters
----------
x : float
The value to evaluate.
a : float, optional
The minimum bound of the domain of x. Default is 0.
b : float, optional
The maximum bound of the domain of x. Default is 1.
Returns
-------
float
The logit of x.
"""
return numpy.log(x - a) - numpy.log(b - x)
@staticmethod
def logistic(x, a=0.0, b=1.0):
r"""Computes the logistic function with range :math:`\in (a, b)`.
This is given by:
.. math::
\mathrm{logistic}(x; a, b) = \frac{a + b e^x}{1 + e^x}.
Note that this is also the inverse of the logit function with domain
:math:`(a, b)`.
Parameters
----------
x : float
The value to evaluate.
a : float, optional
The minimum bound of the range of the logistic function. Default
is 0.
b : float, optional
The maximum bound of the range of the logistic function. Default
is 1.
Returns
-------
float
The logistic of x.
"""
expx = numpy.exp(x)
return (a + b * expx) / (1.0 + expx)
def transform(self, maps):
r"""Computes :math:`\mathrm{logit}(x; a, b)`.
The domain :math:`a, b` of :math:`x` are given by the class's bounds.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
out : dict or FieldArray
A map between the transformed variable name and value(s), along
with the original variable name and value(s).
"""
x = maps[self._inputvar]
# check that x is in bounds
isin = self._bounds.__contains__(x)
if isinstance(isin, numpy.ndarray):
isin = isin.all()
if not isin:
raise ValueError("one or more values are not in bounds")
out = {self._outputvar: self.logit(x, self._a, self._b)}
return self.format_output(maps, out)
def inverse_transform(self, maps):
r"""Computes :math:`y = \mathrm{logistic}(x; a,b)`.
The codomain :math:`a, b` of :math:`y` are given by the class's bounds.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
out : dict or FieldArray
A map between the transformed variable name and value(s), along
with the original variable name and value(s).
"""
y = maps[self._outputvar]
out = {self._inputvar: self.logistic(y, self._a, self._b)}
return self.format_output(maps, out)
def jacobian(self, maps):
r"""Computes the Jacobian of :math:`y = \mathrm{logit}(x; a,b)`.
This is:
.. math::
\frac{\mathrm{d}y}{\mathrm{d}x} = \frac{b -a}{(x-a)(b-x)},
where :math:`x \in (a, b)`.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
float
The value of the jacobian at the given point(s).
"""
x = maps[self._inputvar]
# check that x is in bounds
isin = self._bounds.__contains__(x)
if isinstance(isin, numpy.ndarray) and not isin.all():
raise ValueError("one or more values are not in bounds")
elif not isin:
raise ValueError("{} is not in bounds".format(x))
return (self._b - self._a) / ((x - self._a) * (self._b - x))
def inverse_jacobian(self, maps):
r"""Computes the Jacobian of :math:`y = \mathrm{logistic}(x; a,b)`.
This is:
.. math::
\frac{\mathrm{d}y}{\mathrm{d}x} = \frac{e^x (b-a)}{(1+e^y)^2},
where :math:`y \in (a, b)`.
Parameters
----------
maps : dict or FieldArray
A dictionary or FieldArray which provides a map between the
parameter name of the variable to transform and its value(s).
Returns
-------
float
The value of the jacobian at the given point(s).
"""
x = maps[self._outputvar]
expx = numpy.exp(x)
return expx * (self._b - self._a) / (1.0 + expx) ** 2.0
@classmethod
def from_config(cls, cp, section, outputs,
skip_opts=None, additional_opts=None):
"""Initializes a Logit transform from the given section.
The section must specify an input and output variable name. The domain
of the input may be specified using `min-{input}`, `max-{input}`.
Example:
.. code-block:: ini
[{section}-logitq]
name = logit
inputvar = q
outputvar = logitq
min-q = 1
max-q = 8
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
# pull out the minimum, maximum values of the input variable
inputvar = cp.get_opt_tag(section, "inputvar", outputs)
s = "-".join([section, outputs])
opt = "min-{}".format(inputvar)
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
if cp.has_option(s, opt):
a = cp.get_opt_tag(section, opt, outputs)
skip_opts.append(opt)
else:
a = None
opt = "max-{}".format(inputvar)
if cp.has_option(s, opt):
b = cp.get_opt_tag(section, opt, outputs)
skip_opts.append(opt)
else:
b = None
if a is None and b is not None or b is None and a is not None:
raise ValueError(
"if providing a min(max)-{}, must also provide "
"a max(min)-{}".format(inputvar, inputvar)
)
elif a is not None:
additional_opts.update({"domain": (float(a), float(b))})
return super(Logit, cls).from_config(
cp, section, outputs, skip_opts, additional_opts
)
#
# =============================================================================
#
# Inverse Transforms
#
# =============================================================================
#
class Mass1Mass2ToMchirpQ(MchirpQToMass1Mass2):
"""The inverse of MchirpQToMass1Mass2."""
name = "mass1_mass2_to_mchirp_q"
inverse = MchirpQToMass1Mass2
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
def __init__(
self, mass1_param=None, mass2_param=None, mchirp_param=None, q_param=None
):
if mass1_param is None:
mass1_param = parameters.mass1
if mass2_param is None:
mass2_param = parameters.mass2
if mchirp_param is None:
mchirp_param = parameters.mchirp
if q_param is None:
q_param = parameters.q
self.mass1_param = mass1_param
self.mass2_param = mass2_param
self.mchirp_param = mchirp_param
self.q_param = q_param
self._inputs = [self.mass1_param, self.mass2_param]
self._outputs = [self.mchirp_param, self.q_param]
BaseTransform.__init__(self)
class Mass1Mass2ToMchirpEta(MchirpEtaToMass1Mass2):
"""The inverse of MchirpEtaToMass1Mass2."""
name = "mass1_mass2_to_mchirp_eta"
inverse = MchirpEtaToMass1Mass2
_inputs = inverse._outputs
_outputs = inverse._inputs
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
class DistanceToChirpDistance(ChirpDistanceToDistance):
"""The inverse of ChirpDistanceToDistance."""
name = "distance_to_chirp_distance"
inverse = ChirpDistanceToDistance
_inputs = [parameters.distance, parameters.mchirp]
_outputs = [parameters.chirp_distance]
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
class CartesianToSpherical(SphericalToCartesian):
"""Converts spherical coordinates to cartesian.
Parameters
----------
x : str
The name of the x parameter.
y : str
The name of the y parameter.
z : str
The name of the z parameter.
radial : str
The name of the radial parameter.
azimuthal : str
The name of the azimuthal angle parameter.
polar : str
The name of the polar angle parameter.
"""
name = "cartesian_to_spherical"
inverse = SphericalToCartesian
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
def __init__(self, *args):
super(CartesianToSpherical, self).__init__(*args)
# swap inputs and outputs
outputs = self._inputs
inputs = self._outputs
self._inputs = inputs
self._outputs = outputs
self.inputs = set(self._inputs)
self.outputs = set(self._outputs)
class CartesianSpin1ToSphericalSpin1(CartesianToSpherical):
"""The inverse of SphericalSpin1ToCartesianSpin1.
**Deprecation Warning:** This will be removed in a future update. Use
:py:class:`CartesianToSpherical` with spin-parameter names passed in
instead.
"""
name = "cartesian_spin_1_to_spherical_spin_1"
def __init__(self):
logging.warning(
"Deprecation warning: the {} transform will be "
"removed in a future update. Please use {} instead, "
"passing spin1x, spin1y, spin1z, spin1_a, "
"spin1_azimuthal, spin1_polar as arguments.".format(
self.name, CartesianToSpherical.name
)
)
super(CartesianSpin1ToSphericalSpin1, self).__init__(
"spin1x", "spin1y", "spin1z",
"spin1_a", "spin1_azimuthal", "spin1_polar"
)
class CartesianSpin2ToSphericalSpin2(CartesianToSpherical):
"""The inverse of SphericalSpin2ToCartesianSpin2.
**Deprecation Warning:** This will be removed in a future update. Use
:py:class:`CartesianToSpherical` with spin-parameter names passed in
instead.
"""
name = "cartesian_spin_2_to_spherical_spin_2"
def __init__(self):
logging.warning(
"Deprecation warning: the {} transform will be "
"removed in a future update. Please use {} instead, "
"passing spin2x, spin2y, spin2z, spin2_a, "
"spin2_azimuthal, spin2_polar as arguments.".format(
self.name, CartesianToSpherical.name
)
)
super(CartesianSpin2ToSphericalSpin2, self).__init__(
"spin2x", "spin2y", "spin2z",
"spin2_a", "spin2_azimuthal", "spin2_polar"
)
class CartesianSpinToAlignedMassSpin(AlignedMassSpinToCartesianSpin):
"""The inverse of AlignedMassSpinToCartesianSpin."""
name = "cartesian_spin_to_aligned_mass_spin"
inverse = AlignedMassSpinToCartesianSpin
_inputs = inverse._outputs
_outputs = inverse._inputs
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
class CartesianSpinToPrecessionMassSpin(PrecessionMassSpinToCartesianSpin):
"""The inverse of PrecessionMassSpinToCartesianSpin."""
name = "cartesian_spin_to_precession_mass_spin"
inverse = PrecessionMassSpinToCartesianSpin
_inputs = inverse._outputs
_outputs = inverse._inputs
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
class ChiPToCartesianSpin(CartesianSpinToChiP):
"""The inverse of `CartesianSpinToChiP`."""
name = "cartesian_spin_to_chi_p"
inverse = CartesianSpinToChiP
_inputs = inverse._outputs
_outputs = inverse._inputs
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
class Exponent(Log):
"""Applies an exponent transform to an `inputvar` parameter.
This is the inverse of the log transform.
Parameters
----------
inputvar : str
The name of the parameter to transform.
outputvar : str
The name of the transformed parameter.
"""
name = "exponent"
inverse = Log
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
def __init__(self, inputvar, outputvar):
super(Exponent, self).__init__(outputvar, inputvar)
class Logistic(Logit):
"""Applies a logistic transform from an `input` parameter to an `output`
parameter. This is the inverse of the logit transform.
Typically, the output of the logistic function has range :math:`\in [0,1)`.
However, the `codomain` argument can be used to expand this to any
finite real interval.
Parameters
----------
inputvar : str
The name of the parameter to transform.
outputvar : str
The name of the transformed parameter.
frange : tuple or distributions.bounds.Bounds, optional
The range of the output parameter. Can be any finite
interval. Default is (0., 1.).
"""
name = "logistic"
inverse = Logit
transform = inverse.inverse_transform
inverse_transform = inverse.transform
jacobian = inverse.inverse_jacobian
inverse_jacobian = inverse.jacobian
def __init__(self, inputvar, outputvar, codomain=(0.0, 1.0)):
super(Logistic, self).__init__(outputvar, inputvar, domain=codomain)
@property
def bounds(self):
"""Returns the range of the output parameter."""
return self._bounds
@classmethod
def from_config(cls, cp, section, outputs,
skip_opts=None, additional_opts=None):
"""Initializes a Logistic transform from the given section.
The section must specify an input and output variable name. The
codomain of the output may be specified using `min-{output}`,
`max-{output}`. Example:
.. code-block:: ini
[{section}-q]
name = logistic
inputvar = logitq
outputvar = q
min-q = 1
max-q = 8
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
# pull out the minimum, maximum values of the output variable
outputvar = cp.get_opt_tag(section, "output", outputs)
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
s = "-".join([section, outputs])
opt = "min-{}".format(outputvar)
if cp.has_option(s, opt):
a = cp.get_opt_tag(section, opt, outputs)
skip_opts.append(opt)
else:
a = None
opt = "max-{}".format(outputvar)
if cp.has_option(s, opt):
b = cp.get_opt_tag(section, opt, outputs)
skip_opts.append(opt)
else:
b = None
if a is None and b is not None or b is None and a is not None:
raise ValueError(
"if providing a min(max)-{}, must also provide "
"a max(min)-{}".format(outputvar, outputvar)
)
elif a is not None:
additional_opts.update({"codomain": (float(a), float(b))})
return super(Logistic, cls).from_config(
cp, section, outputs, skip_opts, additional_opts
)
# set the inverse of the forward transforms to the inverse transforms
MchirpQToMass1Mass2.inverse = Mass1Mass2ToMchirpQ
ChirpDistanceToDistance.inverse = DistanceToChirpDistance
SphericalToCartesian.inverse = CartesianToSpherical
SphericalSpin1ToCartesianSpin1.inverse = CartesianSpin1ToSphericalSpin1
SphericalSpin2ToCartesianSpin2.inverse = CartesianSpin2ToSphericalSpin2
AlignedMassSpinToCartesianSpin.inverse = CartesianSpinToAlignedMassSpin
PrecessionMassSpinToCartesianSpin.inverse = CartesianSpinToPrecessionMassSpin
ChiPToCartesianSpin.inverse = CartesianSpinToChiP
Log.inverse = Exponent
Logit.inverse = Logistic
#
# =============================================================================
#
# Collections of transforms
#
# =============================================================================
#
# dictionary of all transforms
transforms = {
CustomTransform.name: CustomTransform,
MchirpQToMass1Mass2.name: MchirpQToMass1Mass2,
Mass1Mass2ToMchirpQ.name: Mass1Mass2ToMchirpQ,
MchirpEtaToMass1Mass2.name: MchirpEtaToMass1Mass2,
Mass1Mass2ToMchirpEta.name: Mass1Mass2ToMchirpEta,
ChirpDistanceToDistance.name: ChirpDistanceToDistance,
DistanceToChirpDistance.name: DistanceToChirpDistance,
SphericalToCartesian.name: SphericalToCartesian,
CartesianToSpherical.name: CartesianToSpherical,
SphericalSpin1ToCartesianSpin1.name: SphericalSpin1ToCartesianSpin1,
CartesianSpin1ToSphericalSpin1.name: CartesianSpin1ToSphericalSpin1,
SphericalSpin2ToCartesianSpin2.name: SphericalSpin2ToCartesianSpin2,
CartesianSpin2ToSphericalSpin2.name: CartesianSpin2ToSphericalSpin2,
DistanceToRedshift.name: DistanceToRedshift,
AlignedMassSpinToCartesianSpin.name: AlignedMassSpinToCartesianSpin,
CartesianSpinToAlignedMassSpin.name: CartesianSpinToAlignedMassSpin,
PrecessionMassSpinToCartesianSpin.name: PrecessionMassSpinToCartesianSpin,
CartesianSpinToPrecessionMassSpin.name: CartesianSpinToPrecessionMassSpin,
ChiPToCartesianSpin.name: ChiPToCartesianSpin,
CartesianSpinToChiP.name: CartesianSpinToChiP,
Log.name: Log,
Exponent.name: Exponent,
Logit.name: Logit,
Logistic.name: Logistic,
LambdaFromTOVFile.name: LambdaFromTOVFile,
LambdaFromMultipleTOVFiles.name: LambdaFromMultipleTOVFiles,
AlignTotalSpin.name: AlignTotalSpin,
}
# standard CBC transforms: these are transforms that do not require input
# arguments; they are typically used in CBC parameter estimation to transform
# to coordinates understood by the waveform generator
common_cbc_forward_transforms = [
MchirpQToMass1Mass2(),
DistanceToRedshift(),
SphericalToCartesian(
parameters.spin1x,
parameters.spin1y,
parameters.spin1z,
parameters.spin1_a,
parameters.spin1_azimuthal,
parameters.spin1_polar,
),
SphericalToCartesian(
parameters.spin2x,
parameters.spin2y,
parameters.spin2z,
parameters.spin2_a,
parameters.spin2_azimuthal,
parameters.spin2_polar,
),
AlignedMassSpinToCartesianSpin(),
PrecessionMassSpinToCartesianSpin(),
ChiPToCartesianSpin(),
ChirpDistanceToDistance(),
]
common_cbc_inverse_transforms = [
_t.inverse()
for _t in common_cbc_forward_transforms
if not (_t.inverse is None or _t.name == "spherical_to_cartesian")
]
common_cbc_inverse_transforms.extend(
[
CartesianToSpherical(
parameters.spin1x,
parameters.spin1y,
parameters.spin1z,
parameters.spin1_a,
parameters.spin1_azimuthal,
parameters.spin1_polar,
),
CartesianToSpherical(
parameters.spin2x,
parameters.spin2y,
parameters.spin2z,
parameters.spin2_a,
parameters.spin2_azimuthal,
parameters.spin2_polar,
),
]
)
common_cbc_transforms = common_cbc_forward_transforms \
+ common_cbc_inverse_transforms
def get_common_cbc_transforms(requested_params, variable_args, valid_params=None):
"""Determines if any additional parameters from the InferenceFile are
needed to get derived parameters that user has asked for.
First it will try to add any base parameters that are required to calculate
the derived parameters. Then it will add any sampling parameters that are
required to calculate the base parameters needed.
Parameters
----------
requested_params : list
List of parameters that user wants.
variable_args : list
List of parameters that InferenceFile has.
valid_params : list
List of parameters that can be accepted.
Returns
-------
requested_params : list
Updated list of parameters that user wants.
all_c : list
List of BaseTransforms to apply.
"""
variable_args = (
set(variable_args) if not isinstance(variable_args, set) else variable_args
)
# try to parse any equations by putting all strings together
# this will get some garbage but ensures all alphanumeric/underscored
# parameter names are added
new_params = []
for opt in requested_params:
s = ""
for ch in opt:
s += ch if ch.isalnum() or ch == "_" else " "
new_params += s.split(" ")
requested_params = set(list(requested_params) + list(new_params))
# can pass a list of valid parameters to remove garbage from parsing above
if valid_params:
valid_params = set(valid_params)
requested_params = requested_params.intersection(valid_params)
# find all the transforms for the requested derived parameters
# calculated from base parameters
from_base_c = []
for converter in common_cbc_inverse_transforms:
if converter.outputs.issubset(variable_args) or \
converter.outputs.isdisjoint(requested_params):
continue
intersect = converter.outputs.intersection(requested_params)
if (
not intersect
or intersect.issubset(converter.inputs)
or intersect.issubset(variable_args)
):
continue
requested_params.update(converter.inputs)
from_base_c.append(converter)
# find all the tranforms for the required base parameters
# calculated from sampling parameters
to_base_c = []
for converter in common_cbc_forward_transforms:
if (
converter.inputs.issubset(variable_args)
and len(converter.outputs.intersection(requested_params)) > 0
):
requested_params.update(converter.inputs)
to_base_c.append(converter)
variable_args.update(converter.outputs)
# get list of transforms that converts sampling parameters to the base
# parameters and then converts base parameters to the derived parameters
all_c = to_base_c + from_base_c
return list(requested_params), all_c
def apply_transforms(samples, transforms, inverse=False):
"""Applies a list of BaseTransform instances on a mapping object.
Parameters
----------
samples : {FieldArray, dict}
Mapping object to apply transforms to.
transforms : list
List of BaseTransform instances to apply. Nested transforms are assumed
to be in order for forward transforms.
inverse : bool, optional
Apply inverse transforms. In this case transforms will be applied in
the opposite order. Default is False.
Returns
-------
samples : {FieldArray, dict}
Mapping object with transforms applied. Same type as input.
"""
if inverse:
transforms = transforms[::-1]
for t in transforms:
try:
if inverse:
samples = t.inverse_transform(samples)
else:
samples = t.transform(samples)
except NotImplementedError:
continue
return samples
def compute_jacobian(samples, transforms, inverse=False):
"""Computes the jacobian of the list of transforms at the given sample
points.
Parameters
----------
samples : {FieldArray, dict}
Mapping object specifying points at which to compute jacobians.
transforms : list
List of BaseTransform instances to apply. Nested transforms are assumed
to be in order for forward transforms.
inverse : bool, optional
Compute inverse jacobians. Default is False.
Returns
-------
float :
The product of the jacobians of all fo the transforms.
"""
j = 1.0
if inverse:
for t in transforms:
j *= t.inverse_jacobian(samples)
else:
for t in transforms:
j *= t.jacobian(samples)
return j
def order_transforms(transforms):
"""Orders transforms to ensure proper chaining.
For example, if `transforms = [B, A, C]`, and `A` produces outputs needed
by `B`, the transforms will be re-rorderd to `[A, B, C]`.
Parameters
----------
transforms : list
List of transform instances to order.
Outputs
-------
list :
List of transformed ordered such that forward transforms can be carried
out without error.
"""
# get a set of all inputs and all outputs
outputs = set().union(*[set(t.outputs)-set(t.inputs) for t in transforms])
out = []
remaining = [t for t in transforms]
while remaining:
# pull out transforms that have no inputs in the set of outputs
leftover = []
for t in remaining:
if t.inputs.isdisjoint(outputs):
out.append(t)
outputs -= t.outputs
else:
leftover.append(t)
remaining = leftover
return out
def read_transforms_from_config(cp, section="transforms"):
"""Returns a list of PyCBC transform instances for a section in the
given configuration file.
If the transforms are nested (i.e., the output of one transform is the
input of another), the returned list will be sorted by the order of the
nests.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"transforms", string}
Prefix on section names from which to retrieve the transforms.
Returns
-------
list
A list of the parsed transforms.
"""
trans = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
t = transforms[name].from_config(cp, section, subsection)
trans.append(t)
return order_transforms(trans)
| 84,293
| 32.663738
| 152
|
py
|
pycbc
|
pycbc-master/pycbc/events/eventmgr.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This modules defines functions for clustering and thresholding timeseries to
produces event triggers
"""
import numpy, copy, os.path
import logging
import h5py
import pickle
from pycbc.types import Array
from pycbc.scheme import schemed
from pycbc.detector import Detector
from . import coinc, ranking
from .eventmgr_cython import findchirp_cluster_over_window_cython
@schemed("pycbc.events.threshold_")
def threshold(series, value):
"""Return list of values and indices values over threshold in series.
"""
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@schemed("pycbc.events.threshold_")
def threshold_only(series, value):
"""Return list of values and indices whose values in series are
larger (in absolute value) than value
"""
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
# FIXME: This should be under schemed, but I don't understand that yet!
def threshold_real_numpy(series, value):
arr = series.data
locs = numpy.where(arr > value)[0]
vals = arr[locs]
return locs, vals
@schemed("pycbc.events.threshold_")
def threshold_and_cluster(series, threshold, window):
"""Return list of values and indices values over threshold in series.
"""
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@schemed("pycbc.events.threshold_")
def _threshold_cluster_factory(series):
err_msg = "This class is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
class ThresholdCluster(object):
"""Create a threshold and cluster engine
Parameters
-----------
series : complex64
Input pycbc.types.Array (or subclass); it will be searched for
points above threshold that are then clustered
"""
def __new__(cls, *args, **kwargs):
real_cls = _threshold_cluster_factory(*args, **kwargs)
return real_cls(*args, **kwargs) # pylint:disable=not-callable
# The class below should serve as the parent for all schemed classes.
# The intention is that this class serves simply as the location for
# all documentation of the class and its methods, though that is not
# yet implemented. Perhaps something along the lines of:
#
# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance
#
# will work? Is there a better way?
class _BaseThresholdCluster(object):
def threshold_and_cluster(self, threshold, window):
"""
Threshold and cluster the memory specified at instantiation with the
threshold and window size specified at creation.
Parameters
-----------
threshold : float32
The minimum absolute value of the series given at object initialization
to return when thresholding and clustering.
window : uint32
The size (in number of samples) of the window over which to cluster
Returns:
--------
event_vals : complex64
Numpy array, complex values of the clustered events
event_locs : uint32
Numpy array, indices into series of location of events
"""
pass
def findchirp_cluster_over_window(times, values, window_length):
""" Reduce the events by clustering over a window using
the FindChirp clustering algorithm
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples. Must be positive.
Returns
-------
indices: Array
The reduced list of indices of the SNR values
"""
assert window_length > 0, 'Clustering window length is not positive'
indices = numpy.zeros(len(times), dtype=numpy.int32)
tlen = len(times)
absvalues = numpy.array(abs(values), copy=False)
times = numpy.array(times, dtype=numpy.int32, copy=False)
k = findchirp_cluster_over_window_cython(times, absvalues, window_length,
indices, tlen)
return indices[0:k+1]
def cluster_reduce(idx, snr, window_size):
""" Reduce the events by clustering over a window
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples.
Returns
-------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR values
"""
ind = findchirp_cluster_over_window(idx, snr, window_size)
return idx.take(ind), snr.take(ind)
class EventManager(object):
def __init__(self, opt, column, column_types, **kwds):
self.opt = opt
self.global_params = kwds
self.event_dtype = [('template_id', int)]
for col, coltype in zip(column, column_types):
self.event_dtype.append((col, coltype))
self.events = numpy.array([], dtype=self.event_dtype)
self.accumulate = [self.events]
self.template_params = []
self.template_index = -1
self.template_events = numpy.array([], dtype=self.event_dtype)
self.write_performance = False
def save_state(self, tnum_finished, filename):
"""Save the current state of the background buffers"""
from pycbc.io.hdf import dump_state
self.tnum_finished = tnum_finished
logging.info('Writing checkpoint file at template %s', tnum_finished)
fp = h5py.File(filename, 'w')
dump_state(self, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.close()
@staticmethod
def restore_state(filename):
"""Restore state of the background buffers from a file"""
from pycbc.io.hdf import load_state
fp = h5py.File(filename, 'r')
try:
mgr = load_state(fp)
except Exception as e:
fp.close()
raise e
fp.close()
next_template = mgr.tnum_finished + 1
logging.info('Restoring with checkpoint at template %s', next_template)
return mgr.tnum_finished + 1, mgr
@classmethod
def from_multi_ifo_interface(cls, opt, ifo, column, column_types, **kwds):
"""
To use this for a single ifo from the multi ifo interface requires
some small fixing of the opt structure. This does that. As we edit the
opt structure the process_params table will not be correct.
"""
opt = copy.deepcopy(opt)
opt_dict = vars(opt)
for arg, value in opt_dict.items():
if isinstance(value, dict):
setattr(opt, arg, getattr(opt, arg)[ifo])
return cls(opt, column, column_types, **kwds)
def chisq_threshold(self, value, num_bins, delta=0):
remove = []
for i, event in enumerate(self.events):
xi = event['chisq'] / (event['chisq_dof'] +
delta * event['snr'].conj() * event['snr'])
if xi > value:
remove.append(i)
self.events = numpy.delete(self.events, remove)
def newsnr_threshold(self, threshold):
""" Remove events with newsnr smaller than given threshold
"""
if not self.opt.chisq_bins:
raise RuntimeError('Chi-square test must be enabled in order to '
'use newsnr threshold')
nsnrs = ranking.newsnr(abs(self.events['snr']),
self.events['chisq'] / self.events['chisq_dof'])
remove_idxs = numpy.where(nsnrs < threshold)[0]
self.events = numpy.delete(self.events, remove_idxs)
def keep_near_injection(self, window, injections):
from pycbc.events.veto import indices_within_times
if len(self.events) == 0:
return
inj_time = numpy.array(injections.end_times())
gpstime = self.events['time_index'].astype(numpy.float64)
gpstime = gpstime / self.opt.sample_rate + self.opt.gps_start_time
i = indices_within_times(gpstime, inj_time - window, inj_time + window)
self.events = self.events[i]
def keep_loudest_in_interval(self, window, num_keep, statname="newsnr",
log_chirp_width=None):
if len(self.events) == 0:
return
e_copy = self.events.copy()
# Here self.events['snr'] is the complex SNR
e_copy['snr'] = abs(e_copy['snr'])
# Messy step because pycbc inspiral's internal 'chisq_dof' is 2p-2
# but stat.py / ranking.py functions use 'chisq_dof' = p
e_copy['chisq_dof'] = e_copy['chisq_dof'] / 2 + 1
statv = ranking.get_sngls_ranking_from_trigs(e_copy, statname)
# Convert trigger time to integer bin number
# NB time_index and window are in units of samples
wtime = (e_copy['time_index'] / window).astype(numpy.int32)
bins = numpy.unique(wtime)
if log_chirp_width:
from pycbc.conversions import mchirp_from_mass1_mass2
m1 = numpy.array([p['tmplt'].mass1 for p in self.template_params])
m2 = numpy.array([p['tmplt'].mass2 for p in self.template_params])
mc = mchirp_from_mass1_mass2(m1, m2)[e_copy['template_id']]
# convert chirp mass to integer bin number
imc = (numpy.log(mc) / log_chirp_width).astype(numpy.int32)
cbins = numpy.unique(imc)
keep = []
for b in bins:
if log_chirp_width:
for b2 in cbins:
bloc = numpy.where((wtime == b) & (imc == b2))[0]
bloudest = statv[bloc].argsort()[-num_keep:]
keep.append(bloc[bloudest])
else:
bloc = numpy.where((wtime == b))[0]
bloudest = statv[bloc].argsort()[-num_keep:]
keep.append(bloc[bloudest])
keep = numpy.concatenate(keep)
self.events = self.events[keep]
def add_template_events(self, columns, vectors):
""" Add a vector indexed """
# initialize with zeros - since vectors can be None, look for the
# longest one that isn't
new_events = None
for v in vectors:
if v is not None:
new_events = numpy.zeros(len(v), dtype=self.event_dtype)
break
# they shouldn't all be None
assert new_events is not None
new_events['template_id'] = self.template_index
for c, v in zip(columns, vectors):
if v is not None:
if isinstance(v, Array):
new_events[c] = v.numpy()
else:
new_events[c] = v
self.template_events = numpy.append(self.template_events, new_events)
def cluster_template_events(self, tcolumn, column, window_size):
""" Cluster the internal events over the named column
"""
cvec = self.template_events[column]
tvec = self.template_events[tcolumn]
if window_size == 0:
indices = numpy.arange(len(tvec))
else:
indices = findchirp_cluster_over_window(tvec, cvec, window_size)
self.template_events = numpy.take(self.template_events, indices)
def new_template(self, **kwds):
self.template_params.append(kwds)
self.template_index += 1
def add_template_params(self, **kwds):
self.template_params[-1].update(kwds)
def finalize_template_events(self):
self.accumulate.append(self.template_events)
self.template_events = numpy.array([], dtype=self.event_dtype)
def consolidate_events(self, opt, gwstrain=None):
self.events = numpy.concatenate(self.accumulate)
logging.info("We currently have %d triggers", len(self.events))
if opt.chisq_threshold and opt.chisq_bins:
logging.info("Removing triggers with poor chisq")
self.chisq_threshold(opt.chisq_threshold, opt.chisq_bins,
opt.chisq_delta)
logging.info("%d remaining triggers", len(self.events))
if opt.newsnr_threshold and opt.chisq_bins:
logging.info("Removing triggers with NewSNR below threshold")
self.newsnr_threshold(opt.newsnr_threshold)
logging.info("%d remaining triggers", len(self.events))
if opt.keep_loudest_interval:
logging.info("Removing triggers not within the top %s "
"loudest of a %s second interval by %s",
opt.keep_loudest_num, opt.keep_loudest_interval,
opt.keep_loudest_stat)
self.keep_loudest_in_interval\
(opt.keep_loudest_interval * opt.sample_rate,
opt.keep_loudest_num, statname=opt.keep_loudest_stat,
log_chirp_width=opt.keep_loudest_log_chirp_window)
logging.info("%d remaining triggers", len(self.events))
if opt.injection_window and hasattr(gwstrain, 'injections'):
logging.info("Keeping triggers within %s seconds of injection",
opt.injection_window)
self.keep_near_injection(opt.injection_window,
gwstrain.injections)
logging.info("%d remaining triggers", len(self.events))
self.accumulate = [self.events]
def finalize_events(self):
self.events = numpy.concatenate(self.accumulate)
def make_output_dir(self, outname):
path = os.path.dirname(outname)
if path != '':
if not os.path.exists(path) and path is not None:
os.makedirs(path)
def save_performance(self, ncores, nfilters, ntemplates, run_time,
setup_time):
"""
Calls variables from pycbc_inspiral to be used in a timing calculation
"""
self.run_time = run_time
self.setup_time = setup_time
self.ncores = ncores
self.nfilters = nfilters
self.ntemplates = ntemplates
self.write_performance = True
def write_events(self, outname):
""" Write the found events to a sngl inspiral table
"""
self.make_output_dir(outname)
if '.hdf' in outname:
self.write_to_hdf(outname)
else:
raise ValueError('Cannot write to this format')
def write_to_hdf(self, outname):
class fw(object):
def __init__(self, name, prefix):
self.f = h5py.File(name, 'w')
self.prefix = prefix
def __setitem__(self, name, data):
col = self.prefix + '/' + name
self.f.create_dataset(col, data=data,
compression='gzip',
compression_opts=9,
shuffle=True)
self.events.sort(order='template_id')
th = numpy.array([p['tmplt'].template_hash for p in
self.template_params])
tid = self.events['template_id']
f = fw(outname, self.opt.channel_name[0:2])
if len(self.events):
f['snr'] = abs(self.events['snr'])
try:
# Precessing
f['u_vals'] = self.events['u_vals']
f['coa_phase'] = self.events['coa_phase']
f['hplus_cross_corr'] = self.events['hplus_cross_corr']
except Exception:
# Not precessing
f['coa_phase'] = numpy.angle(self.events['snr'])
f['chisq'] = self.events['chisq']
f['bank_chisq'] = self.events['bank_chisq']
f['bank_chisq_dof'] = self.events['bank_chisq_dof']
f['cont_chisq'] = self.events['cont_chisq']
f['end_time'] = self.events['time_index'] / \
float(self.opt.sample_rate) \
+ self.opt.gps_start_time
try:
# Precessing
template_sigmasq_plus = numpy.array(
[t['sigmasq_plus'] for t in self.template_params],
dtype=numpy.float32)
f['sigmasq_plus'] = template_sigmasq_plus[tid]
template_sigmasq_cross = numpy.array(
[t['sigmasq_cross'] for t in self.template_params],
dtype=numpy.float32)
f['sigmasq_cross'] = template_sigmasq_cross[tid]
# FIXME: I want to put something here, but I haven't yet
# figured out what it should be. I think we would also
# need information from the plus and cross correlation
# (both real and imaginary(?)) to get this.
f['sigmasq'] = template_sigmasq_plus[tid]
except Exception:
# Not precessing
f['sigmasq'] = self.events['sigmasq']
template_durations = [p['tmplt'].template_duration for p in
self.template_params]
f['template_duration'] = numpy.array(template_durations,
dtype=numpy.float32)[tid]
# FIXME: Can we get this value from the autochisq instance?
cont_dof = self.opt.autochi_number_points
if self.opt.autochi_onesided is None:
cont_dof = cont_dof * 2
if self.opt.autochi_two_phase:
cont_dof = cont_dof * 2
if self.opt.autochi_max_valued_dof:
cont_dof = self.opt.autochi_max_valued_dof
f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(self.events))
if 'chisq_dof' in self.events.dtype.names:
f['chisq_dof'] = self.events['chisq_dof'] / 2 + 1
else:
f['chisq_dof'] = numpy.zeros(len(self.events))
f['template_hash'] = th[tid]
if 'sg_chisq' in self.events.dtype.names:
f['sg_chisq'] = self.events['sg_chisq']
if self.opt.psdvar_segment is not None:
f['psd_var_val'] = self.events['psd_var_val']
if self.opt.trig_start_time:
f['search/start_time'] = numpy.array([self.opt.trig_start_time])
search_start_time = float(self.opt.trig_start_time)
else:
f['search/start_time'] = numpy.array([self.opt.gps_start_time +
self.opt.segment_start_pad])
search_start_time = float(self.opt.gps_start_time +
self.opt.segment_start_pad)
if self.opt.trig_end_time:
f['search/end_time'] = numpy.array([self.opt.trig_end_time])
search_end_time = float(self.opt.trig_end_time)
else:
f['search/end_time'] = numpy.array([self.opt.gps_end_time -
self.opt.segment_end_pad])
search_end_time = float(self.opt.gps_end_time -
self.opt.segment_end_pad)
if self.write_performance:
self.analysis_time = search_end_time - search_start_time
time_ratio = numpy.array(
[float(self.analysis_time) / float(self.run_time)])
temps_per_core = float(self.ntemplates) / float(self.ncores)
filters_per_core = float(self.nfilters) / float(self.ncores)
f['search/templates_per_core'] = \
numpy.array([float(temps_per_core) * float(time_ratio)])
f['search/filter_rate_per_core'] = \
numpy.array([filters_per_core / float(self.run_time)])
f['search/setup_time_fraction'] = \
numpy.array([float(self.setup_time) / float(self.run_time)])
f['search/run_time'] = numpy.array([float(self.run_time)])
if 'q_trans' in self.global_params:
qtrans = self.global_params['q_trans']
for key in qtrans:
if key == 'qtiles':
for seg in qtrans[key]:
for q in qtrans[key][seg]:
f['qtransform/%s/%s/%s' % (key, seg, q)] = \
qtrans[key][seg][q]
elif key == 'qplanes':
for seg in qtrans[key]:
f['qtransform/%s/%s' % (key, seg)] = qtrans[key][seg]
if 'gating_info' in self.global_params:
gating_info = self.global_params['gating_info']
for gate_type in ['file', 'auto']:
if gate_type in gating_info:
f['gating/' + gate_type + '/time'] = \
numpy.array([float(g[0]) for g in gating_info[gate_type]])
f['gating/' + gate_type + '/width'] = \
numpy.array([g[1] for g in gating_info[gate_type]])
f['gating/' + gate_type + '/pad'] = \
numpy.array([g[2] for g in gating_info[gate_type]])
class EventManagerMultiDetBase(EventManager):
def __init__(self, opt, ifos, column, column_types, psd=None, **kwargs):
self.opt = opt
self.ifos = ifos
self.global_params = kwargs
if psd is not None:
self.global_params['psd'] = psd[ifos[0]]
# The events array does not like holding the ifo as string,
# so create a mapping dict and hold as an int
self.ifo_dict = {}
self.ifo_reverse = {}
for i, ifo in enumerate(ifos):
self.ifo_dict[ifo] = i
self.ifo_reverse[i] = ifo
self.event_dtype = [('template_id', int), ('event_id', int)]
for col, coltype in zip(column, column_types):
self.event_dtype.append((col, coltype))
self.events = numpy.array([], dtype=self.event_dtype)
self.event_id_map = {}
self.template_params = []
self.template_index = -1
self.template_event_dict = {}
self.coinc_list = []
self.write_performance = False
for ifo in ifos:
self.template_event_dict[ifo] = \
numpy.array([], dtype=self.event_dtype)
def add_template_events_to_ifo(self, ifo, columns, vectors):
""" Add a vector indexed """
# Just call through to the standard function
self.template_events = self.template_event_dict[ifo]
self.add_template_events(columns, vectors)
self.template_event_dict[ifo] = self.template_events
self.template_events = None
class EventManagerCoherent(EventManagerMultiDetBase):
def __init__(self, opt, ifos, column, column_types, network_column,
network_column_types, psd=None, **kwargs):
super(EventManagerCoherent, self).__init__(
opt, ifos, column, column_types, psd=None, **kwargs)
self.network_event_dtype = \
[(ifo + '_event_id', int) for ifo in self.ifos]
self.network_event_dtype.append(('template_id', int))
self.network_event_dtype.append(('event_id', int))
for col, coltype in zip(network_column, network_column_types):
self.network_event_dtype.append((col, coltype))
self.network_events = numpy.array([], dtype=self.network_event_dtype)
self.event_index = {}
for ifo in self.ifos:
self.event_index[ifo] = 0
self.event_index['network'] = 0
self.template_event_dict['network'] = numpy.array(
[], dtype=self.network_event_dtype)
def cluster_template_network_events(self, tcolumn, column, window_size,
slide=0):
""" Cluster the internal events over the named column
Parameters
----------------
tcolumn
Indicates which column contains the time.
column
The named column to cluster.
window_size
The size of the window.
slide
Default is 0.
"""
slide_indices = (
self.template_event_dict['network']['slide_id'] == slide)
cvec = self.template_event_dict['network'][column][slide_indices]
tvec = self.template_event_dict['network'][tcolumn][slide_indices]
if not window_size == 0:
# cluster events over the window
indices = findchirp_cluster_over_window(tvec, cvec, window_size)
# if a slide_indices = 0
if any(~slide_indices):
indices = numpy.concatenate((
numpy.flatnonzero(~slide_indices),
numpy.flatnonzero(slide_indices)[indices]))
indices.sort()
# get value of key for where you have indicies
for key in self.template_event_dict:
self.template_event_dict[key] = \
self.template_event_dict[key][indices]
else:
indices = numpy.arange(len(tvec))
def add_template_network_events(self, columns, vectors):
""" Add a vector indexed """
# initialize with zeros - since vectors can be None, look for the
# longest one that isn't
new_events = None
new_events = numpy.zeros(
max([len(v) for v in vectors if v is not None]),
dtype=self.network_event_dtype
)
# they shouldn't all be None
assert new_events is not None
new_events['template_id'] = self.template_index
for c, v in zip(columns, vectors):
if v is not None:
if isinstance(v, Array):
new_events[c] = v.numpy()
else:
new_events[c] = v
self.template_events = numpy.append(self.template_events, new_events)
def add_template_events_to_network(self, columns, vectors):
""" Add a vector indexed """
# Just call through to the standard function
self.template_events = self.template_event_dict['network']
self.add_template_network_events(columns, vectors)
self.template_event_dict['network'] = self.template_events
self.template_events = None
def write_to_hdf(self, outname):
class fw(object):
def __init__(self, name):
self.f = h5py.File(name, 'w')
def __setitem__(self, name, data):
col = self.prefix + '/' + name
self.f.create_dataset(
col, data=data, compression='gzip', compression_opts=9,
shuffle=True)
self.events.sort(order='template_id')
th = numpy.array(
[p['tmplt'].template_hash for p in self.template_params])
f = fw(outname)
# Output network stuff
f.prefix = 'network'
network_events = numpy.array(
[e for e in self.network_events], dtype=self.network_event_dtype)
for col in network_events.dtype.names:
if col == 'time_index':
f['end_time_gc'] = (
network_events[col]
/ float(self.opt.sample_rate[self.ifos[0].lower()])
+ self.opt.gps_start_time[self.ifos[0].lower()]
)
else:
f[col] = network_events[col]
# Individual ifo stuff
for i, ifo in enumerate(self.ifos):
tid = self.events['template_id'][self.events['ifo'] == i]
f.prefix = ifo
ifo_events = numpy.array([e for e in self.events
if e['ifo'] == self.ifo_dict[ifo]], dtype=self.event_dtype)
if len(ifo_events):
ifo_str = ifo.lower()[0] if ifo != 'H1' else ifo.lower()
f['snr_%s' % ifo_str] = abs(ifo_events['snr'])
f['event_id'] = ifo_events['event_id']
try:
# Precessing
f['u_vals'] = ifo_events['u_vals']
f['coa_phase'] = ifo_events['coa_phase']
f['hplus_cross_corr'] = ifo_events['hplus_cross_corr']
except Exception:
f['coa_phase'] = numpy.angle(ifo_events['snr'])
f['chisq'] = ifo_events['chisq']
f['bank_chisq'] = ifo_events['bank_chisq']
f['bank_chisq_dof'] = ifo_events['bank_chisq_dof']
f['cont_chisq'] = ifo_events['cont_chisq']
f['end_time'] = ifo_events['time_index'] / \
float(self.opt.sample_rate[ifo_str]) + \
self.opt.gps_start_time[ifo_str]
f['time_index'] = ifo_events['time_index']
try:
# Precessing
template_sigmasq_plus = numpy.array(
[t['sigmasq_plus'] for t in self.template_params],
dtype=numpy.float32
)
f['sigmasq_plus'] = template_sigmasq_plus[tid]
template_sigmasq_cross = numpy.array(
[t['sigmasq_cross'] for t in self.template_params],
dtype=numpy.float32
)
f['sigmasq_cross'] = template_sigmasq_cross[tid]
# FIXME: I want to put something here, but I haven't yet
# figured out what it should be. I think we would also
# need information from the plus and cross correlation
# (both real and imaginary(?)) to get this.
f['sigmasq'] = template_sigmasq_plus[tid]
except Exception:
# Not precessing
template_sigmasq = numpy.array(
[t['sigmasq'][ifo] for t in self.template_params],
dtype=numpy.float32)
f['sigmasq'] = template_sigmasq[tid]
template_durations = [p['tmplt'].template_duration for p in
self.template_params]
f['template_duration'] = numpy.array(template_durations,
dtype=numpy.float32)[tid]
# FIXME: Can we get this value from the autochisq instance?
# cont_dof = self.opt.autochi_number_points
# if self.opt.autochi_onesided is None:
# cont_dof = cont_dof * 2
# if self.opt.autochi_two_phase:
# cont_dof = cont_dof * 2
# if self.opt.autochi_max_valued_dof:
# cont_dof = self.opt.autochi_max_valued_dof
# f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(ifo_events))
if 'chisq_dof' in ifo_events.dtype.names:
f['chisq_dof'] = ifo_events['chisq_dof'] / 2 + 1
else:
f['chisq_dof'] = numpy.zeros(len(ifo_events))
f['template_hash'] = th[tid]
if self.opt.trig_start_time:
f['search/start_time'] = numpy.array([
self.opt.trig_start_time[ifo]], dtype=numpy.int32)
search_start_time = float(self.opt.trig_start_time[ifo])
else:
f['search/start_time'] = numpy.array([
self.opt.gps_start_time[ifo] +
self.opt.segment_start_pad[ifo]], dtype=numpy.int32)
search_start_time = float(self.opt.gps_start_time[ifo] +
self.opt.segment_start_pad[ifo])
if self.opt.trig_end_time:
f['search/end_time'] = numpy.array([
self.opt.trig_end_time[ifo]], dtype=numpy.int32)
search_end_time = float(self.opt.trig_end_time[ifo])
else:
f['search/end_time'] = numpy.array(
[self.opt.gps_end_time[ifo] -
self.opt.segment_end_pad[ifo]], dtype=numpy.int32)
search_end_time = float(self.opt.gps_end_time[ifo] -
self.opt.segment_end_pad[ifo])
if self.write_performance:
self.analysis_time = search_end_time - search_start_time
time_ratio = numpy.array([float(self.analysis_time) /
float(self.run_time)])
temps_per_core = float(self.ntemplates) / float(self.ncores)
filters_per_core = float(self.nfilters) / float(self.ncores)
f['search/templates_per_core'] = \
numpy.array([float(temps_per_core) * float(time_ratio)])
f['search/filter_rate_per_core'] = \
numpy.array([filters_per_core / float(self.run_time)])
f['search/setup_time_fraction'] = \
numpy.array([float(self.setup_time) / float(self.run_time)])
if 'gating_info' in self.global_params:
gating_info = self.global_params['gating_info']
for gate_type in ['file', 'auto']:
if gate_type in gating_info:
f['gating/' + gate_type + '/time'] = numpy.array(
[float(g[0]) for g in gating_info[gate_type]])
f['gating/' + gate_type + '/width'] = numpy.array(
[g[1] for g in gating_info[gate_type]])
f['gating/' + gate_type + '/pad'] = numpy.array(
[g[2] for g in gating_info[gate_type]])
def finalize_template_events(self):
# Check that none of the template events have the same time index as an
# existing event in events. I.e. don't list the same ifo event multiple
# times when looping over sky points and time slides.
existing_times = {}
new_times = {}
existing_template_id = {}
new_template_id = {}
existing_events_mask = {}
new_template_event_mask = {}
existing_template_event_mask = {}
for i, ifo in enumerate(self.ifos):
ifo_events = numpy.where(self.events['ifo'] == i)
existing_times[ifo] = self.events['time_index'][ifo_events]
new_times[ifo] = self.template_event_dict[ifo]['time_index']
existing_template_id[ifo] = self.events['template_id'][ifo_events]
new_template_id[ifo] = self.template_event_dict[ifo]['template_id']
# This is true for each existing event that has the same time index
# and template id as a template trigger.
existing_events_mask[ifo] = numpy.argwhere(
numpy.logical_and(
numpy.isin(existing_times[ifo], new_times[ifo]),
numpy.isin(existing_template_id[ifo], new_template_id[ifo])
)).reshape(-1,)
# This is true for each template event that has either a new
# trigger time or a new template id.
new_template_event_mask[ifo] = numpy.argwhere(
numpy.logical_or(
~numpy.isin(new_times[ifo], existing_times[ifo]),
~numpy.isin(new_template_id[ifo], existing_template_id[ifo])
)).reshape(-1,)
# This is true for each template event that has the same time index
# and template id as an exisitng event trigger.
existing_template_event_mask[ifo] = numpy.argwhere(
numpy.logical_and(
numpy.isin(new_times[ifo], existing_times[ifo]),
numpy.isin(new_template_id[ifo], existing_template_id[ifo])
)).reshape(-1,)
# Set ids (These show how each trigger in the single ifo trigger
# list correspond to the network triggers)
num_events = len(new_template_event_mask[ifo])
new_event_ids = numpy.arange(self.event_index[ifo],
self.event_index[ifo] + num_events)
# Every template event that corresponds to a new trigger gets a new
# id. Triggers that have been found before are not saved.
self.template_event_dict[ifo]['event_id'][
new_template_event_mask[ifo]] = new_event_ids
self.template_event_dict['network'][ifo + '_event_id'][
new_template_event_mask[ifo]] = new_event_ids
# Template events that have been found before get the event id of
# the first time they were found.
self.template_event_dict['network'][ifo + '_event_id'][
existing_template_event_mask[ifo]] = \
self.events[self.events['ifo'] == i][
existing_events_mask[ifo]]['event_id']
self.event_index[ifo] = self.event_index[ifo] + num_events
# Add the network event ids for the events with this template.
num_events = len(self.template_event_dict['network'])
new_event_ids = numpy.arange(self.event_index['network'],
self.event_index['network'] + num_events)
self.event_index['network'] = self.event_index['network'] + num_events
self.template_event_dict['network']['event_id'] = new_event_ids
# Move template events for each ifo to the events list
for ifo in self.ifos:
self.events = numpy.append(
self.events,
self.template_event_dict[ifo][new_template_event_mask[ifo]]
)
self.template_event_dict[ifo] = \
numpy.array([], dtype=self.event_dtype)
# Move the template events for the network to the network events list
self.network_events = numpy.append(self.network_events,
self.template_event_dict['network'])
self.template_event_dict['network'] = \
numpy.array([], dtype=self.network_event_dtype)
class EventManagerMultiDet(EventManagerMultiDetBase):
def __init__(self, opt, ifos, column, column_types, psd=None, **kwargs):
super(EventManagerMultiDet, self).__init__(
opt, ifos, column, column_types, psd=None, **kwargs)
self.event_index = 0
def cluster_template_events_single_ifo(
self, tcolumn, column, window_size, ifo):
""" Cluster the internal events over the named column
"""
# Just call through to the standard function
self.template_events = self.template_event_dict[ifo]
self.cluster_template_events(tcolumn, column, window_size)
self.template_event_dict[ifo] = self.template_events
self.template_events = None
def finalize_template_events(self, perform_coincidence=True,
coinc_window=0.0):
# Set ids
for ifo in self.ifos:
num_events = len(self.template_event_dict[ifo])
new_event_ids = numpy.arange(self.event_index,
self.event_index+num_events)
self.template_event_dict[ifo]['event_id'] = new_event_ids
self.event_index = self.event_index+num_events
if perform_coincidence:
if not len(self.ifos) == 2:
err_msg = "Coincidence currently only supported for 2 ifos."
raise ValueError(err_msg)
ifo1 = self.ifos[0]
ifo2 = self.ifos[1]
end_times1 = self.template_event_dict[ifo1]['time_index'] /\
float(self.opt.sample_rate[ifo1]) + self.opt.gps_start_time[ifo1]
end_times2 = self.template_event_dict[ifo2]['time_index'] /\
float(self.opt.sample_rate[ifo2]) + self.opt.gps_start_time[ifo2]
light_travel_time = Detector(ifo1).light_travel_time_to_detector(
Detector(ifo2))
coinc_window = coinc_window + light_travel_time
# FIXME: Remove!!!
coinc_window = 2.0
if len(end_times1) and len(end_times2):
idx_list1, idx_list2, _ = \
coinc.time_coincidence(end_times1, end_times2,
coinc_window)
if len(idx_list1):
for idx1, idx2 in zip(idx_list1, idx_list2):
event1 = self.template_event_dict[ifo1][idx1]
event2 = self.template_event_dict[ifo2][idx2]
self.coinc_list.append((event1, event2))
for ifo in self.ifos:
self.events = numpy.append(self.events,
self.template_event_dict[ifo])
self.template_event_dict[ifo] = numpy.array([],
dtype=self.event_dtype)
def write_events(self, outname):
""" Write the found events to a sngl inspiral table
"""
self.make_output_dir(outname)
if '.hdf' in outname:
self.write_to_hdf(outname)
else:
raise ValueError('Cannot write to this format')
def write_to_hdf(self, outname):
class fw(object):
def __init__(self, name):
self.f = h5py.File(name, 'w')
def __setitem__(self, name, data):
col = self.prefix + '/' + name
self.f.create_dataset(col, data=data,
compression='gzip',
compression_opts=9,
shuffle=True)
self.events.sort(order='template_id')
th = numpy.array([p['tmplt'].template_hash for p in
self.template_params])
tid = self.events['template_id']
f = fw(outname)
for ifo in self.ifos:
f.prefix = ifo
ifo_events = numpy.array([e for e in self.events if
e['ifo'] == self.ifo_dict[ifo]],
dtype=self.event_dtype)
if len(ifo_events):
ifo_str = ifo.lower()[0] if ifo != 'H1' else ifo.lower()
f['snr_%s' % ifo_str] = abs(ifo_events['snr'])
try:
# Precessing
f['u_vals'] = ifo_events['u_vals']
f['coa_phase'] = ifo_events['coa_phase']
f['hplus_cross_corr'] = ifo_events['hplus_cross_corr']
except Exception:
f['coa_phase'] = numpy.angle(ifo_events['snr'])
f['chisq'] = ifo_events['chisq']
f['bank_chisq'] = ifo_events['bank_chisq']
f['bank_chisq_dof'] = ifo_events['bank_chisq_dof']
f['cont_chisq'] = ifo_events['cont_chisq']
f['end_time'] = ifo_events['time_index'] / \
float(self.opt.sample_rate[ifo_str]) + \
self.opt.gps_start_time[ifo_str]
try:
# Precessing
template_sigmasq_plus = numpy.array([t['sigmasq_plus'] for
t in self.template_params], dtype=numpy.float32)
f['sigmasq_plus'] = template_sigmasq_plus[tid]
template_sigmasq_cross = numpy.array([t['sigmasq_cross']
for t in self.template_params], dtype=numpy.float32)
f['sigmasq_cross'] = template_sigmasq_cross[tid]
# FIXME: I want to put something here, but I haven't yet
# figured out what it should be. I think we would also
# need information from the plus and cross correlation
# (both real and imaginary(?)) to get this.
f['sigmasq'] = template_sigmasq_plus[tid]
except Exception:
# Not precessing
template_sigmasq = numpy.array([t['sigmasq'][ifo] for t in
self.template_params],
dtype=numpy.float32)
f['sigmasq'] = template_sigmasq[tid]
template_durations = [p['tmplt'].template_duration for p in
self.template_params]
f['template_duration'] = \
numpy.array(template_durations, dtype=numpy.float32)[tid]
# FIXME: Can we get this value from the autochisq instance?
cont_dof = self.opt.autochi_number_points
if self.opt.autochi_onesided is None:
cont_dof = cont_dof * 2
# if self.opt.autochi_two_phase:
# cont_dof = cont_dof * 2
# if self.opt.autochi_max_valued_dof:
# cont_dof = self.opt.autochi_max_valued_dof
f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(ifo_events))
if 'chisq_dof' in ifo_events.dtype.names:
f['chisq_dof'] = ifo_events['chisq_dof'] / 2 + 1
else:
f['chisq_dof'] = numpy.zeros(len(ifo_events))
f['template_hash'] = th[tid]
if self.opt.psdvar_segment is not None:
f['psd_var_val'] = ifo_events['psd_var_val']
if self.opt.trig_start_time:
f['search/start_time'] = numpy.array(
[self.opt.trig_start_time[ifo]], dtype=numpy.int32)
search_start_time = float(self.opt.trig_start_time[ifo])
else:
f['search/start_time'] = numpy.array(
[self.opt.gps_start_time[ifo] +
self.opt.segment_start_pad[ifo]], dtype=numpy.int32)
search_start_time = float(self.opt.gps_start_time[ifo] +
self.opt.segment_start_pad[ifo])
if self.opt.trig_end_time:
f['search/end_time'] = numpy.array(
[self.opt.trig_end_time[ifo]], dtype=numpy.int32)
search_end_time = float(self.opt.trig_end_time[ifo])
else:
f['search/end_time'] = numpy.array(
[self.opt.gps_end_time[ifo] -
self.opt.segment_end_pad[ifo]], dtype=numpy.int32)
search_end_time = float(self.opt.gps_end_time[ifo] -
self.opt.segment_end_pad[ifo])
if self.write_performance:
self.analysis_time = search_end_time - search_start_time
time_ratio = numpy.array(
[float(self.analysis_time) / float(self.run_time)])
temps_per_core = float(self.ntemplates) / float(self.ncores)
filters_per_core = float(self.nfilters) / float(self.ncores)
f['search/templates_per_core'] = \
numpy.array([float(temps_per_core) * float(time_ratio)])
f['search/filter_rate_per_core'] = \
numpy.array([filters_per_core / float(self.run_time)])
f['search/setup_time_fraction'] = \
numpy.array([float(self.setup_time) / float(self.run_time)])
if 'gating_info' in self.global_params:
gating_info = self.global_params['gating_info']
for gate_type in ['file', 'auto']:
if gate_type in gating_info:
f['gating/' + gate_type + '/time'] = numpy.array(
[float(g[0]) for g in gating_info[gate_type]])
f['gating/' + gate_type + '/width'] = numpy.array(
[g[1] for g in gating_info[gate_type]])
f['gating/' + gate_type + '/pad'] = numpy.array(
[g[2] for g in gating_info[gate_type]])
__all__ = ['threshold_and_cluster', 'findchirp_cluster_over_window',
'threshold', 'cluster_reduce', 'ThresholdCluster',
'threshold_real_numpy', 'threshold_only',
'EventManager', 'EventManagerMultiDet', 'EventManagerCoherent']
| 49,942
| 44.526892
| 94
|
py
|
pycbc
|
pycbc-master/pycbc/events/trigger_fits.py
|
"""
Tools for maximum likelihood fits to single trigger statistic values
For some set of values above a threshold, e.g. trigger SNRs, the functions
in this module perform maximum likelihood fits with 1-sigma uncertainties
to various simple functional forms of PDF, all normalized to 1.
You can also obtain the fitted function and its (inverse) CDF and perform
a Kolmogorov-Smirnov test.
Usage:
# call the fit function directly if the threshold is known
alpha, sigma_alpha = fit_exponential(snrs, 5.5)
# apply a threshold explicitly
alpha, sigma_alpha = fit_above_thresh('exponential', snrs, thresh=6.25)
# let the code work out the threshold from the smallest value via the default thresh=None
alpha, sigma_alpha = fit_above_thresh('exponential', snrs)
# or only fit the largest N values, i.e. tail fitting
thresh = tail_threshold(snrs, N=500)
alpha, sigma_alpha = fit_above_thresh('exponential', snrs, thresh)
# obtain the fitted function directly
xvals = numpy.xrange(5.5, 10.5, 20)
exponential_fit = expfit(xvals, alpha, thresh)
# or access function by name
exponential_fit_1 = fit_fn('exponential', xvals, alpha, thresh)
# Use weighting factors to e.g. take decimation into account
alpha, sigma_alpha = fit_above_thresh('exponential', snrs, weights=weights)
# get the KS test statistic and p-value - see scipy.stats.kstest
ks_stat, ks_pval = KS_test('exponential', snrs, alpha, thresh)
"""
# Copyright T. Dent 2015 (thomas.dent@aei.mpg.de)
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
import logging
import numpy
from scipy.stats import kstest
def exponential_fitalpha(vals, thresh, w):
"""
Maximum likelihood estimator for the fit factor for
an exponential decrease model
"""
return 1. / (numpy.average(vals, weights=w) - thresh)
def rayleigh_fitalpha(vals, thresh, w):
"""
Maximum likelihood estimator for the fit factor for
a Rayleigh distribution of events
"""
return 2. / (numpy.average(vals ** 2., weights=w) - thresh ** 2.)
def power_fitalpha(vals, thresh, w):
"""
Maximum likelihood estimator for the fit factor for
a power law model
"""
return numpy.average(numpy.log(vals/thresh), weights=w) ** -1. + 1.
fitalpha_dict = {
'exponential' : exponential_fitalpha,
'rayleigh' : rayleigh_fitalpha,
'power' : power_fitalpha
}
# measurement standard deviation = (-d^2 log L/d alpha^2)^(-1/2)
fitstd_dict = {
'exponential' : lambda weights, alpha : alpha / sum(weights) ** 0.5,
'rayleigh' : lambda weights, alpha : alpha / sum(weights) ** 0.5,
'power' : lambda weights, alpha : (alpha - 1.) / sum(weights) ** 0.5
}
def fit_above_thresh(distr, vals, thresh=None, weights=None):
"""
Maximum likelihood fit for the coefficient alpha
Fitting a distribution of discrete values above a given threshold.
Exponential p(x) = alpha exp(-alpha (x-x_t))
Rayleigh p(x) = alpha x exp(-alpha (x**2-x_t**2)/2)
Power p(x) = ((alpha-1)/x_t) (x/x_t)**-alpha
Values below threshold will be discarded.
If no threshold is specified the minimum sample value will be used.
Parameters
----------
distr : {'exponential', 'rayleigh', 'power'}
Name of distribution
vals : sequence of floats
Values to fit
thresh : float
Threshold to apply before fitting; if None, use min(vals)
weights: sequence of floats
Weighting factors to use for the values when fitting.
Default=None - all the same
Returns
-------
alpha : float
Fitted value
sigma_alpha : float
Standard error in fitted value
"""
vals = numpy.array(vals)
if thresh is None:
thresh = min(vals)
above_thresh = numpy.ones_like(vals, dtype=bool)
else:
above_thresh = vals >= thresh
if numpy.count_nonzero(above_thresh) == 0:
# Nothing is above threshold - warn and return -1
logging.warning("No values are above the threshold, %.2f, "
"maximum is %.2f.", thresh, vals.max())
return -1., -1.
vals = vals[above_thresh]
# Set up the weights
if weights is not None:
weights = numpy.array(weights)
w = weights[above_thresh]
else:
w = numpy.ones_like(vals)
alpha = fitalpha_dict[distr](vals, thresh, w)
return alpha, fitstd_dict[distr](w, alpha)
# Variables:
# x: the trigger stat value(s) at which to evaluate the function
# a: slope parameter of the fit
# t: lower threshold stat value
fitfn_dict = {
'exponential' : lambda x, a, t : a * numpy.exp(-a * (x - t)),
'rayleigh' : lambda x, a, t : (a * x * \
numpy.exp(-a * (x ** 2 - t ** 2) / 2.)),
'power' : lambda x, a, t : (a - 1.) * x ** (-a) * t ** (a - 1.)
}
def fit_fn(distr, xvals, alpha, thresh):
"""
The fitted function normalized to 1 above threshold
To normalize to a given total count multiply by the count.
Parameters
----------
xvals : sequence of floats
Values where the function is to be evaluated
alpha : float
The fitted parameter
thresh : float
Threshold value applied to fitted values
Returns
-------
fit : array of floats
Fitted function at the requested xvals
"""
xvals = numpy.array(xvals)
fit = fitfn_dict[distr](xvals, alpha, thresh)
# set fitted values below threshold to 0
numpy.putmask(fit, xvals < thresh, 0.)
return fit
cum_fndict = {
'exponential' : lambda x, alpha, t : numpy.exp(-alpha * (x - t)),
'rayleigh' : lambda x, alpha, t : numpy.exp(-alpha * (x ** 2. - t ** 2.) / 2.),
'power' : lambda x, alpha, t : x ** (1. - alpha) * t ** (alpha - 1.)
}
def cum_fit(distr, xvals, alpha, thresh):
"""
Integral of the fitted function above a given value (reverse CDF)
The fitted function is normalized to 1 above threshold
Parameters
----------
xvals : sequence of floats
Values where the function is to be evaluated
alpha : float
The fitted parameter
thresh : float
Threshold value applied to fitted values
Returns
-------
cum_fit : array of floats
Reverse CDF of fitted function at the requested xvals
"""
xvals = numpy.array(xvals)
cum_fit = cum_fndict[distr](xvals, alpha, thresh)
# set fitted values below threshold to 0
numpy.putmask(cum_fit, xvals < thresh, 0.)
return cum_fit
def tail_threshold(vals, N=1000):
"""Determine a threshold above which there are N louder values"""
vals = numpy.array(vals)
if len(vals) < N:
raise RuntimeError('Not enough input values to determine threshold')
vals.sort()
return min(vals[-N:])
def KS_test(distr, vals, alpha, thresh=None):
"""
Perform Kolmogorov-Smirnov test for fitted distribution
Compare the given set of discrete values above a given threshold to the
fitted distribution function.
If no threshold is specified, the minimum sample value will be used.
Returns the KS test statistic and its p-value: lower p means less
probable under the hypothesis of a perfect fit
Parameters
----------
distr : {'exponential', 'rayleigh', 'power'}
Name of distribution
vals : sequence of floats
Values to compare to fit
alpha : float
Fitted distribution parameter
thresh : float
Threshold to apply before fitting; if None, use min(vals)
Returns
-------
D : float
KS test statistic
p-value : float
p-value, assumed to be two-tailed
"""
vals = numpy.array(vals)
if thresh is None:
thresh = min(vals)
else:
vals = vals[vals >= thresh]
def cdf_fn(x):
return 1 - cum_fndict[distr](x, alpha, thresh)
return kstest(vals, cdf_fn)
def which_bin(par, minpar, maxpar, nbins, log=False):
"""
Helper function
Returns bin index where a parameter value belongs (from 0 through nbins-1)
when dividing the range between minpar and maxpar equally into bins.
Parameters
----------
par : float
Parameter value being binned
minpar : float
Minimum parameter value
maxpar : float
Maximum parameter value
nbins : int
Number of bins to use
log : boolean
If True, use log spaced bins
Returns
-------
binind : int
Bin index
"""
assert (par >= minpar and par <= maxpar)
if log:
par, minpar, maxpar = numpy.log(par), numpy.log(minpar), numpy.log(maxpar)
# par lies some fraction of the way between min and max
if minpar != maxpar:
frac = float(par - minpar) / float(maxpar - minpar)
else:
# if they are equal there is only one size 0 bin
# must be in that bin
frac = 0
# binind then lies between 0 and nbins - 1
binind = int(frac * nbins)
# corner case
if par == maxpar:
binind = nbins - 1
return binind
| 9,465
| 29.934641
| 89
|
py
|
pycbc
|
pycbc-master/pycbc/events/veto.py
|
""" This module contains utilities to manipulate trigger lists based on
segment.
"""
import numpy
from ligo.lw import table, lsctables, utils as ligolw_utils
from ligo.segments import segment, segmentlist
def start_end_to_segments(start, end):
return segmentlist([segment(s, e) for s, e in zip(start, end)])
def segments_to_start_end(segs):
segs.coalesce()
return (numpy.array([s[0] for s in segs]),
numpy.array([s[1] for s in segs]))
def start_end_from_segments(segment_file):
"""
Return the start and end time arrays from a segment file.
Parameters
----------
segment_file: xml segment file
Returns
-------
start: numpy.ndarray
end: numpy.ndarray
"""
from pycbc.io.ligolw import LIGOLWContentHandler as h
indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h)
segment_table = lsctables.SegmentTable.get_table(indoc)
start = numpy.array(segment_table.getColumnByName('start_time'))
start_ns = numpy.array(segment_table.getColumnByName('start_time_ns'))
end = numpy.array(segment_table.getColumnByName('end_time'))
end_ns = numpy.array(segment_table.getColumnByName('end_time_ns'))
return start + start_ns * 1e-9, end + end_ns * 1e-9
def indices_within_times(times, start, end):
"""
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
"""
# coalesce the start/end segments
start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce())
tsort = times.argsort()
times_sorted = times[tsort]
left = numpy.searchsorted(times_sorted, start)
right = numpy.searchsorted(times_sorted, end)
if len(left) == 0:
return numpy.array([], dtype=numpy.uint32)
return tsort[numpy.hstack([numpy.r_[s:e] for s, e in zip(left, right)])]
def indices_outside_times(times, start, end):
"""
Return an index array into times that like outside the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
"""
exclude = indices_within_times(times, start, end)
indices = numpy.arange(0, len(times))
return numpy.delete(indices, exclude)
def select_segments_by_definer(segment_file, segment_name=None, ifo=None):
""" Return the list of segments that match the segment name
Parameters
----------
segment_file: str
path to segment xml file
segment_name: str
Name of segment
ifo: str, optional
Returns
-------
seg: list of segments
"""
from pycbc.io.ligolw import LIGOLWContentHandler as h
indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h)
segment_table = table.Table.get_table(indoc, 'segment')
seg_def_table = table.Table.get_table(indoc, 'segment_definer')
def_ifos = seg_def_table.getColumnByName('ifos')
def_names = seg_def_table.getColumnByName('name')
def_ids = seg_def_table.getColumnByName('segment_def_id')
valid_id = []
for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids):
if ifo and ifo != def_ifo:
continue
if segment_name and segment_name != def_name:
continue
valid_id += [def_id]
start = numpy.array(segment_table.getColumnByName('start_time'))
start_ns = numpy.array(segment_table.getColumnByName('start_time_ns'))
end = numpy.array(segment_table.getColumnByName('end_time'))
end_ns = numpy.array(segment_table.getColumnByName('end_time_ns'))
start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns
did = segment_table.getColumnByName('segment_def_id')
keep = numpy.array([d in valid_id for d in did])
if sum(keep) > 0:
return start_end_to_segments(start[keep], end[keep])
else:
return segmentlist([])
def indices_within_segments(times, segment_files, ifo=None, segment_name=None):
""" Return the list of indices that should be vetoed by the segments in the
list of veto_files.
Parameters
----------
times: numpy.ndarray of integer type
Array of gps start times
segment_files: string or list of strings
A string or list of strings that contain the path to xml files that
contain a segment table
ifo: string, optional
The ifo to retrieve segments for from the segment files
segment_name: str, optional
name of segment
Returns
-------
indices: numpy.ndarray
The array of index values within the segments
segmentlist:
The segment list corresponding to the selected time.
"""
veto_segs = segmentlist([])
indices = numpy.array([], dtype=numpy.uint32)
for veto_file in segment_files:
veto_segs += select_segments_by_definer(veto_file, segment_name, ifo)
veto_segs.coalesce()
start, end = segments_to_start_end(veto_segs)
if len(start) > 0:
idx = indices_within_times(times, start, end)
indices = numpy.union1d(indices, idx)
return indices, veto_segs.coalesce()
def indices_outside_segments(times, segment_files, ifo=None, segment_name=None):
""" Return the list of indices that are outside the segments in the
list of segment files.
Parameters
----------
times: numpy.ndarray of integer type
Array of gps start times
segment_files: string or list of strings
A string or list of strings that contain the path to xml files that
contain a segment table
ifo: string, optional
The ifo to retrieve segments for from the segment files
segment_name: str, optional
name of segment
Returns
--------
indices: numpy.ndarray
The array of index values outside the segments
segmentlist:
The segment list corresponding to the selected time.
"""
exclude, segs = indices_within_segments(times, segment_files,
ifo=ifo, segment_name=segment_name)
indices = numpy.arange(0, len(times))
return numpy.delete(indices, exclude), segs
def get_segment_definer_comments(xml_file, include_version=True):
"""Returns a dict with the comment column as the value for each segment"""
from pycbc.io.ligolw import LIGOLWContentHandler as h
# read segment definer table
xmldoc = ligolw_utils.load_fileobj(xml_file,
compress='auto',
contenthandler=h)
seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)
# put comment column into a dict
comment_dict = {}
for seg_def in seg_def_table:
if include_version:
full_channel_name = ':'.join([str(seg_def.ifos),
str(seg_def.name),
str(seg_def.version)])
else:
full_channel_name = ':'.join([str(seg_def.ifos),
str(seg_def.name)])
comment_dict[full_channel_name] = seg_def.comment
return comment_dict
| 7,572
| 32.361233
| 96
|
py
|
pycbc
|
pycbc-master/pycbc/events/threshold_cuda.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy, mako.template
from pycuda.tools import dtype_to_ctype
from pycuda.elementwise import ElementwiseKernel
from pycuda.compiler import SourceModule
from .eventmgr import _BaseThresholdCluster
import pycbc.scheme
threshold_op = """
if (i == 0)
bn[0] = 0;
pycuda::complex<float> val = in[i];
if ( abs(val) > threshold){
int n_w = atomicAdd(bn, 1);
outv[n_w] = val;
outl[n_w] = i;
}
"""
threshold_kernel = ElementwiseKernel(
" %(tp_in)s *in, %(tp_out1)s *outv, %(tp_out2)s *outl, %(tp_th)s threshold, %(tp_n)s *bn" % {
"tp_in": dtype_to_ctype(numpy.complex64),
"tp_out1": dtype_to_ctype(numpy.complex64),
"tp_out2": dtype_to_ctype(numpy.uint32),
"tp_th": dtype_to_ctype(numpy.float32),
"tp_n": dtype_to_ctype(numpy.uint32),
},
threshold_op,
"getstuff")
import pycuda.driver as drv
n = drv.pagelocked_empty((1), numpy.uint32, mem_flags=drv.host_alloc_flags.DEVICEMAP)
nptr = numpy.intp(n.base.get_device_pointer())
val = drv.pagelocked_empty((4096*256), numpy.complex64, mem_flags=drv.host_alloc_flags.DEVICEMAP)
vptr = numpy.intp(val.base.get_device_pointer())
loc = drv.pagelocked_empty((4096*256), numpy.int32, mem_flags=drv.host_alloc_flags.DEVICEMAP)
lptr = numpy.intp(loc.base.get_device_pointer())
class T():
pass
tn = T()
tv = T()
tl = T()
tn.gpudata = nptr
tv.gpudata = vptr
tl.gpudata = lptr
tn.flags = tv.flags = tl.flags = n.flags
tkernel1 = mako.template.Template("""
#include <stdio.h>
__global__ void threshold_and_cluster(float2* in, float2* outv, int* outl, int window, float threshold){
int s = window * blockIdx.x;
int e = s + window;
// shared memory for chuck size candidates
__shared__ float svr[${chunk}];
__shared__ float svi[${chunk}];
__shared__ int sl[${chunk}];
// shared memory for the warp size candidates
__shared__ float svv[32];
__shared__ int idx[32];
int ml = -1;
float mvr = 0;
float mvi = 0;
float re;
float im;
// Iterate trought the entire window size chunk and find blockDim.x number
// of candidates
for (int i = s + threadIdx.x; i < e; i += blockDim.x){
re = in[i].x;
im = in[i].y;
if ((re * re + im * im) > (mvr * mvr + mvi * mvi)){
mvr = re;
mvi = im;
ml = i;
}
}
// Save the candidate from this thread to shared memory
svr[threadIdx.x] = mvr;
svi[threadIdx.x] = mvi;
sl[threadIdx.x] = ml;
__syncthreads();
if (threadIdx.x < 32){
int tl = threadIdx.x;
// Now that we have all the candiates for this chunk in shared memory
// Iterate through in the warp size to reduce to 32 candidates
for (int i = threadIdx.x; i < ${chunk}; i += 32){
re = svr[i];
im = svi[i];
if ((re * re + im * im) > (mvr * mvr + mvi * mvi)){
tl = i;
mvr = re;
mvi = im;
}
}
// Store the 32 candidates into shared memory
svv[threadIdx.x] = svr[tl] * svr[tl] + svi[tl] * svi[tl];
idx[threadIdx.x] = tl;
// Find the 1 candidate we are looking for using a manual log algorithm
if ((threadIdx.x < 16) && (svv[threadIdx.x] < svv[threadIdx.x + 16])){
svv[threadIdx.x] = svv[threadIdx.x + 16];
idx[threadIdx.x] = idx[threadIdx.x + 16];
}
if ((threadIdx.x < 8) && (svv[threadIdx.x] < svv[threadIdx.x + 8])){
svv[threadIdx.x] = svv[threadIdx.x + 8];
idx[threadIdx.x] = idx[threadIdx.x + 8];
}
if ((threadIdx.x < 4) && (svv[threadIdx.x] < svv[threadIdx.x + 4])){
svv[threadIdx.x] = svv[threadIdx.x + 4];
idx[threadIdx.x] = idx[threadIdx.x + 4];
}
if ((threadIdx.x < 2) && (svv[threadIdx.x] < svv[threadIdx.x + 2])){
svv[threadIdx.x] = svv[threadIdx.x + 2];
idx[threadIdx.x] = idx[threadIdx.x + 2];
}
// Save the 1 candidate maximum and location to the output vectors
if (threadIdx.x == 0){
if (svv[threadIdx.x] < svv[threadIdx.x + 1]){
idx[0] = idx[1];
svv[0] = svv[1];
}
if (svv[0] > threshold){
tl = idx[0];
outv[blockIdx.x].x = svr[tl];
outv[blockIdx.x].y = svi[tl];
outl[blockIdx.x] = sl[tl];
} else{
outl[blockIdx.x] = -1;
}
}
}
}
""")
tkernel2 = mako.template.Template("""
#include <stdio.h>
__global__ void threshold_and_cluster2(float2* outv, int* outl, float threshold, int window){
__shared__ int loc[${blocks}];
__shared__ float val[${blocks}];
int i = threadIdx.x;
int l = outl[i];
loc[i] = l;
if (l == -1)
return;
val[i] = outv[i].x * outv[i].x + outv[i].y * outv[i].y;
// Check right
if ( (i < (${blocks} - 1)) && (val[i + 1] > val[i]) ){
outl[i] = -1;
return;
}
// Check left
if ( (i > 0) && (val[i - 1] > val[i]) ){
outl[i] = -1;
return;
}
}
""")
tfn_cache = {}
def get_tkernel(slen, window):
if window < 32:
raise ValueError("GPU threshold kernel does not support a window smaller than 32 samples")
elif window <= 4096:
nt = 128
elif window <= 16384:
nt = 256
elif window <= 32768:
nt = 512
else:
nt = 1024
nb = int(numpy.ceil(slen / float(window)))
if nb > 1024:
raise ValueError("More than 1024 blocks not supported yet")
try:
return tfn_cache[(nt, nb)], nt, nb
except KeyError:
mod = SourceModule(tkernel1.render(chunk=nt))
mod2 = SourceModule(tkernel2.render(blocks=nb))
fn = mod.get_function("threshold_and_cluster")
fn.prepare("PPPif")
fn2 = mod2.get_function("threshold_and_cluster2")
fn2.prepare("PPfi")
tfn_cache[(nt, nb)] = (fn, fn2)
return tfn_cache[(nt, nb)], nt, nb
def threshold_and_cluster(series, threshold, window):
outl = tl.gpudata
outv = tv.gpudata
slen = len(series)
series = series.data.gpudata
(fn, fn2), nt, nb = get_tkernel(slen, window)
threshold = numpy.float32(threshold * threshold)
window = numpy.int32(window)
cl = loc[0:nb]
cv = val[0:nb]
fn.prepared_call((nb, 1), (nt, 1, 1), series, outv, outl, window, threshold,)
fn2.prepared_call((1, 1), (nb, 1, 1), outv, outl, threshold, window)
pycbc.scheme.mgr.state.context.synchronize()
w = (cl != -1)
return cv[w], cl[w]
class CUDAThresholdCluster(_BaseThresholdCluster):
def __init__(self, series):
self.series = series.data.gpudata
self.outl = tl.gpudata
self.outv = tv.gpudata
self.slen = len(series)
def threshold_and_cluster(self, threshold, window):
threshold = numpy.float32(threshold * threshold)
window = numpy.int32(window)
(fn, fn2), nt, nb = get_tkernel(self.slen, window)
fn = fn.prepared_call
fn2 = fn2.prepared_call
cl = loc[0:nb]
cv = val[0:nb]
fn((nb, 1), (nt, 1, 1), self.series, self.outv, self.outl, window, threshold,)
fn2((1, 1), (nb, 1, 1), self.outv, self.outl, threshold, window)
pycbc.scheme.mgr.state.context.synchronize()
w = (cl != -1)
return cv[w], cl[w]
def _threshold_cluster_factory(series):
return CUDAThresholdCluster
| 8,580
| 29.108772
| 105
|
py
|
pycbc
|
pycbc-master/pycbc/events/significance.py
|
# Copyright (C) 2022 Gareth Cabourn Davies
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module contains functions to calculate the significance
through different estimation methods of the background, and functions that
read in the associated options to do so.
"""
import logging
import copy
import numpy as np
from pycbc.events import trigger_fits as trstats
def count_n_louder(bstat, fstat, dec, skip_background=False,
**kwargs): # pylint:disable=unused-argument
""" Calculate for each foreground event the number of background events
that are louder than it.
Parameters
----------
bstat: numpy.ndarray
Array of the background statistic values
fstat: numpy.ndarray or scalar
Array of the foreground statistic values or single value
dec: numpy.ndarray
Array of the decimation factors for the background statistics
skip_background: optional, {boolean, False}
Skip calculating cumulative numbers for background triggers
Returns
-------
cum_back_num: numpy.ndarray
The cumulative array of background triggers. Does not return this
argument if skip_background == True
fore_n_louder: numpy.ndarray
The number of background triggers above each foreground trigger
"""
sort = bstat.argsort()
bstat = bstat[sort]
dec = dec[sort]
# calculate cumulative number of triggers louder than the trigger in
# a given index. We need to subtract the decimation factor, as the cumsum
# includes itself in the first sum (it is inclusive of the first value)
n_louder = dec[::-1].cumsum()[::-1] - dec
# Determine how many values are louder than the foreground ones
# We need to subtract one from the index, to be consistent with definition
# of n_louder, as here we do want to include the background value at the
# found index
idx = np.searchsorted(bstat, fstat, side='left') - 1
# If the foreground are *quieter* than the background or at the same value
# then the search sorted algorithm will choose position -1, which does not
# exist. We force it back to zero.
if isinstance(idx, np.ndarray): # Case where our input is an array
idx[idx < 0] = 0
else: # Case where our input is just a scalar value
if idx < 0:
idx = 0
fore_n_louder = n_louder[idx]
if not skip_background:
unsort = sort.argsort()
back_cum_num = n_louder[unsort]
return back_cum_num, fore_n_louder
return fore_n_louder
def n_louder_from_fit(back_stat, fore_stat, dec_facs,
fit_function='exponential', fit_threshold=0):
"""
Use a fit to events in back_stat in order to estimate the
distribution for use in recovering the estimate count of louder
background events. Below the fit threshold, use the n_louder
method for these triggers
back_stat: numpy.ndarray
Array of the background statistic values
fore_stat: numpy.ndarray or scalar
Array of the foreground statistic values or single value
dec_facs: numpy.ndarray
Array of the decimation factors for the background statistics
fit_function: str
Name of the function to be used for the fit to background
statistic values
fit_threshold: float
Threshold above which triggers use the fitted value, below this
the counted number of louder events will be used
Returns
-------
back_cnum: numpy.ndarray
The estimated number of background events louder than each
background event
fn_louder: numpy.ndarray
The estimated number of background events louder than each
foreground event
"""
# Calculate the fitting factor of the ranking statistic distribution
alpha, _ = trstats.fit_above_thresh(fit_function, back_stat,
thresh=fit_threshold,
weights=dec_facs)
# Count background events above threshold as the cum_fit is
# normalised to 1
bg_above = back_stat > fit_threshold
n_above = np.sum(dec_facs[bg_above])
fg_above = fore_stat > fit_threshold
# These will be overwritten, but just to silence a warning
# in the case where trstats.cum_fit returns zero
back_cnum = np.zeros_like(back_stat)
fnlouder = np.zeros_like(fore_stat)
# Ue the fit above the threshold
back_cnum[bg_above] = n_above * trstats.cum_fit(fit_function,
back_stat[bg_above],
alpha,
fit_threshold)
fnlouder[fg_above] = n_above * trstats.cum_fit(fit_function,
fore_stat[fg_above],
alpha,
fit_threshold)
# Below the fit threshold, we expect there to be sufficient events
# to use the count_n_louder method, and the distribution may deviate
# from the fit function
fg_below = np.logical_not(fg_above)
bg_below = np.logical_not(bg_above)
# Count the number of below-threshold background events louder than the
# bg and foreground
back_cnum[bg_below], fnlouder[fg_below] = \
count_n_louder(back_stat[bg_below], fore_stat[fg_below], dec_facs)
# As we have only counted the louder below-threshold events, need to
# add the above threshold events, which by definition are louder than
# all the below-threshold events
back_cnum[bg_below] += n_above
fnlouder[fg_below] += n_above
return back_cnum, fnlouder
_significance_meth_dict = {
'trigger_fit': n_louder_from_fit,
'n_louder': count_n_louder
}
_default_opt_dict = {
'method': 'n_louder',
'fit_threshold': None,
'fit_function': None}
def get_n_louder(back_stat, fore_stat, dec_facs,
method=_default_opt_dict['method'],
**kwargs): # pylint:disable=unused-argument
"""
Wrapper to find the correct n_louder calculation method using standard
inputs
"""
return _significance_meth_dict[method](
back_stat,
fore_stat,
dec_facs,
**kwargs)
def insert_significance_option_group(parser):
"""
Add some options for use when a significance is being estimated from
events or event distributions.
"""
parser.add_argument('--far-calculation-method', nargs='+',
default=[],
help="Method used for FAR calculation in each "
"detector combination, given as "
"combination:method pairs, i.e. "
"H1:trigger_fit H1L1:n_louder H1L1V1:n_louder "
"etc. Method options are ["
+ ",".join(_significance_meth_dict.keys()) +
"]. Default = n_louder for all not given")
parser.add_argument('--fit-threshold', nargs='+', default=[],
help="Trigger statistic fit thresholds for FAN "
"estimation, given as combination-value pairs "
"ex. H1:0 L1:0 V1:-4 for all combinations with "
"--far-calculation-method = trigger_fit")
parser.add_argument("--fit-function", nargs='+', default=[],
help="Functional form for the statistic slope fit if "
"--far-calculation-method is 'trigger_fit'. "
"Given as combination:function pairs, i.e. "
"H1:exponential H1L1:n_louder H1L1V1:n_louder. "
"Options: ["
+ ",".join(trstats.fitalpha_dict.keys()) + "]. "
"Default = exponential for all")
def check_significance_options(args, parser):
"""
Check the significance group options
"""
# Check that the combo:method/function/threshold are in the
# right format, and are in allowed combinations
lists_to_check = [(args.far_calculation_method, str,
_significance_meth_dict.keys()),
(args.fit_function, str,
trstats.fitalpha_dict.keys()),
(args.fit_threshold, float,
None)]
for list_to_check, type_to_convert, allowed_values in lists_to_check:
combo_list = []
for combo_value in list_to_check:
try:
combo, value = tuple(combo_value.split(':'))
except ValueError:
parser.error("Need combo:value format, got %s" % combo_value)
if combo in combo_list:
parser.error("Duplicate combo %s in a significance "
"option" % combo)
combo_list.append(combo)
try:
type_to_convert(value)
except ValueError:
err_fmat = "Value {} of combo {} can't be converted"
parser.error(err_fmat.format(value, combo))
if allowed_values is not None and \
type_to_convert(value) not in allowed_values:
err_fmat = "Value {} of combo {} is not in allowed values: {}"
parser.error(err_fmat.format(value, combo, allowed_values))
# Are the functions/thresholds appropriate for the methods given?
methods = {}
# A method has been specified
for combo_value in args.far_calculation_method:
combo, value = tuple(combo_value.split(':'))
methods[combo] = value
# A function or threshold has been specified
function_or_thresh_given = []
for combo_value in args.fit_function + args.fit_threshold:
combo, _ = tuple(combo_value.split(':'))
if combo not in methods:
# Assign the default method for use in further tests
methods[combo] = _default_opt_dict['method']
function_or_thresh_given.append(combo)
for combo, value in methods.items():
if value != 'trigger_fit' and combo in function_or_thresh_given:
# Function/Threshold given for combo not using trigger_fit method
parser.error("--fit-function and/or --fit-threshold given for "
+ combo + " which has method " + value)
elif value == 'trigger_fit' and combo not in function_or_thresh_given:
# Threshold not given for trigger_fit combo
parser.error("Threshold required for combo " + combo)
def digest_significance_options(combo_keys, args):
"""
Read in information from the significance option group and ensure
it makes sense before putting into a dictionary
Parameters
----------
combo_keys: list of strings
list of detector combinations for which options are needed
args: parsed arguments
from argparse ArgumentParser parse_args()
Returns
-------
significance_dict: dictionary
Dictionary containing method, threshold and function for trigger fits
as appropriate
"""
lists_to_unpack = [('method', args.far_calculation_method, str),
('fit_function', args.fit_function, str),
('fit_threshold', args.fit_threshold, float)]
significance_dict = {}
# Set everything as a default to start with:
for combo in combo_keys:
significance_dict[combo] = copy.deepcopy(_default_opt_dict)
# Unpack everything from the arguments into the dictionary
for argument_key, arg_to_unpack, conv_func in lists_to_unpack:
for combo_value in arg_to_unpack:
combo, value = tuple(combo_value.split(':'))
if combo not in significance_dict:
# Allow options for detector combos that are not actually
# used/required for a given job. Such options have
# no effect, but emit a warning for (e.g.) diagnostic checks
logging.warning("Key %s not used by this code, uses %s",
combo, combo_keys)
significance_dict[combo] = copy.deepcopy(_default_opt_dict)
significance_dict[combo][argument_key] = conv_func(value)
return significance_dict
| 13,225
| 38.957704
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/events/cuts.py
|
# Copyright (C) 2022 Gareth Cabourn Davies
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module contains functions for reading in command line options and
applying cuts to triggers or templates in the offline search
"""
import logging
import copy
import numpy as np
from pycbc.events import ranking
from pycbc.io import hdf
from pycbc.tmpltbank import bank_conversions as bank_conv
from pycbc.io import get_chisq_from_file_choice
# Only used to check isinstance:
from pycbc.io.hdf import ReadByTemplate
# sngl_rank_keys are the allowed names of reweighted SNR functions
sngl_rank_keys = ranking.sngls_ranking_function_dict.keys()
trigger_param_choices = list(sngl_rank_keys)
trigger_param_choices += [cc + '_chisq' for cc in hdf.chisq_choices]
trigger_param_choices += ['end_time', 'psd_var_val', 'sigmasq',
'sigma_multiple']
template_fit_param_choices = ['fit_by_fit_coeff', 'smoothed_fit_coeff',
'fit_by_count_above_thresh',
'smoothed_fit_count_above_thresh',
'fit_by_count_in_template',
'smoothed_fit_count_in_template']
template_param_choices = bank_conv.conversion_options + \
template_fit_param_choices
# What are the inequalities associated with the cuts?
# 'upper' means upper limit, and so requires value < threshold
# to keep a trigger
ineq_functions = {
'upper': np.less,
'lower': np.greater,
'upper_inc': np.less_equal,
'lower_inc': np.greater_equal
}
ineq_choices = list(ineq_functions.keys())
def insert_cuts_option_group(parser):
"""
Add options to the parser for cuts to the templates/triggers
"""
parser.add_argument('--trigger-cuts', nargs='+',
help="Cuts to apply to the triggers, supplied as "
"PARAMETER:VALUE:LIMIT, where, PARAMETER is the "
"parameter to be cut, VALUE is the value at "
"which it is cut, and LIMIT is one of '"
+ "', '".join(ineq_choices) +
"' to indicate the inequality needed. "
"PARAMETER is one of:'"
+ "', '".join(trigger_param_choices) +
"'. For example snr:6:LOWER removes triggers "
"with matched filter SNR < 6")
parser.add_argument('--template-cuts', nargs='+',
help="Cuts to apply to the triggers, supplied as "
"PARAMETER:VALUE:LIMIT. Format is the same as in "
"--trigger-cuts. PARAMETER can be one of '"
+ "', '".join(template_param_choices) + "'.")
def convert_inputstr(inputstr, choices):
"""
Convert the inputstr into a dictionary keyed on parameter
with a tuple of the function to be used in the cut, and
the float to compare to.
Do input checks
"""
try:
cut_param, cut_value_str, cut_limit = inputstr.split(':')
except ValueError as value_e:
logging.warning("ERROR: Cut string format not correct, please "
"supply as PARAMETER:VALUE:LIMIT")
raise value_e
if cut_param.lower() not in choices:
raise NotImplementedError("Cut parameter " + cut_param.lower() + " "
"not recognised, choose from "
+ ", ".join(choices))
if cut_limit.lower() not in ineq_choices:
raise NotImplementedError("Cut inequality " + cut_limit.lower() + " "
"not recognised, choose from "
+ ", ".join(ineq_choices))
try:
cut_value = float(cut_value_str)
except ValueError as value_e:
logging.warning("ERROR: Cut value must be convertible into a float, "
"got '%s'.", cut_value_str)
raise value_e
return {(cut_param, ineq_functions[cut_limit]): cut_value}
def check_update_cuts(cut_dict, new_cut):
"""
Update a cuts dictionary, but check whether the cut exists already,
warn and only apply the strictest cuts
Parameters
----------
cut_dict: dictionary
Dictionary containing the cuts to be checked, will be updated
new_cut: single-entry dictionary
dictionary to define the new cut which is being considered to add
"""
new_cut_key = list(new_cut.keys())[0]
if new_cut_key in cut_dict:
# The cut has already been called
logging.warning("WARNING: Cut parameter %s and function %s have "
"already been used. Utilising the strictest cut.",
new_cut_key[0], new_cut_key[1].__name__)
# Extract the function and work out which is strictest
cut_function = new_cut_key[1]
value_new = list(new_cut.values())[0]
value_old = cut_dict[new_cut_key]
if cut_function(value_new, value_old):
# The new threshold would survive the cut of the
# old threshold, therefore the new threshold is stricter
# - update it
logging.warning("WARNING: New threshold of %.3f is "
"stricter than old threshold %.3f, "
"using cut at %.3f.",
value_new, value_old, value_new)
cut_dict.update(new_cut)
else:
# New cut would not make a difference, ignore it
logging.warning("WARNING: New threshold of %.3f is less "
"strict than old threshold %.3f, using "
"cut at %.3f.",
value_new, value_old, value_old)
else:
# This is a new cut - add it
cut_dict.update(new_cut)
def ingest_cuts_option_group(args):
"""
Return dictionaries for trigger and template cuts.
"""
# Deal with the case where no cuts are supplied:
if not args.trigger_cuts and not args.template_cuts:
return {}, {}
# Deal with the case where one set of cuts is supplied
# but not the other
trigger_cut_strs = args.trigger_cuts or []
template_cut_strs = args.template_cuts or []
# Handle trigger cuts
trigger_cut_dict = {}
for inputstr in trigger_cut_strs:
new_trigger_cut = convert_inputstr(inputstr, trigger_param_choices)
check_update_cuts(trigger_cut_dict, new_trigger_cut)
# Handle template cuts
template_cut_dict = {}
for inputstr in template_cut_strs:
new_template_cut = convert_inputstr(inputstr, template_param_choices)
check_update_cuts(template_cut_dict, new_template_cut)
return trigger_cut_dict, template_cut_dict
def sigma_multiple_cut_thresh(template_ids, statistic,
cut_thresh, ifo):
"""
Apply cuts based on a multiple of the median sigma value for the template
Parameters
----------
template_ids:
template_id values for each of the triggers to be considered,
this will be used to associate a sigma threshold for each trigger
statistic:
A PyCBC ranking statistic instance. Used to get the median_sigma
value for the cuts. If fits_by_tid does not exist for the specified
ifo (where median_sigma lives), an error will be raised.
ifo:
The IFO for which we want to read median_sigma
cut_thresh: int or float
The multiple of median_sigma to compare triggers to
Returns
-------
idx_out: numpy array
An array of the indices of triggers which meet the criteria
set by the dictionary
"""
statistic_classname = statistic.__class__.__name__
if not hasattr(statistic, 'fits_by_tid'):
raise ValueError("Cut parameter 'sigma_muliple' cannot "
"be used when the ranking statistic " +
statistic_classname + " does not use "
"template fitting.")
tid_med_sigma = statistic.fits_by_tid[ifo]['median_sigma']
return cut_thresh * tid_med_sigma[template_ids]
def apply_trigger_cuts(triggers, trigger_cut_dict, statistic=None):
"""
Fetch/Calculate the parameter for triggers, and then
apply the cuts defined in template_cut_dict
Parameters
----------
triggers: ReadByTemplate object or dictionary
The triggers in this particular template. This
must have the correct datasets required to calculate
the values we cut on.
trigger_cut_dict: dictionary
Dictionary with tuples of (parameter, cut_function)
as keys, cut_thresholds as values
made using ingest_cuts_option_group function
Returns
-------
idx_out: numpy array
An array of the indices which meet the criteria
set by the dictionary
"""
idx_out = np.arange(len(triggers['snr']))
# Loop through the different cuts, and apply them
for parameter_cut_function, cut_thresh in trigger_cut_dict.items():
# The function and threshold are stored as a tuple so unpack it
parameter, cut_function = parameter_cut_function
# What kind of parameter is it?
if parameter.endswith('_chisq'):
# parameter is a chisq-type thing
chisq_choice = parameter.split('_')[0]
# Currently calculated for all triggers - this seems inefficient
value = get_chisq_from_file_choice(triggers, chisq_choice)
# Apply any previous cuts to the value for comparison
value = value[idx_out]
elif parameter == "sigma_multiple":
if isinstance(triggers, ReadByTemplate):
ifo_grp = triggers.file[triggers.ifo]
value = np.sqrt(ifo_grp['sigmasq'][idx_out])
template_ids = ifo_grp['template_id'][idx_out]
# Get a cut threshold value, this will be different
# depending on the template ID, so we rewrite cut_thresh
# as a value for each trigger, numpy comparison functions
# allow this
cut_thresh = sigma_multiple_cut_thresh(template_ids,
statistic,
cut_thresh,
triggers.ifo)
else:
err_msg = "Cuts on 'sigma_multiple' are only implemented for "
err_msg += "triggers in a ReadByTemplate format. This code "
err_msg += f"uses a {type(triggers).__name__} format."
raise NotImplementedError(err_msg)
elif ((not hasattr(triggers, "file") and parameter in triggers)
or (hasattr(triggers, "file")
and parameter in triggers.file[triggers.ifo])):
# parameter can be read direct from the trigger dictionary / file
if not hasattr(triggers, 'file') and parameter in triggers:
value = triggers[parameter]
else:
value = triggers.file[triggers.ifo][parameter]
# Apply any previous cuts to the value for comparison
value = value[idx_out]
elif parameter in sngl_rank_keys:
# parameter is a newsnr-type thing
# Currently calculated for all triggers - this seems inefficient
value = ranking.get_sngls_ranking_from_trigs(triggers, parameter)
# Apply any previous cuts to the value for comparison
value = value[idx_out]
else:
raise NotImplementedError("Parameter '" + parameter + "' not "
"recognised. Input sanitisation means "
"this shouldn't have happened?!")
idx_out = idx_out[cut_function(value, cut_thresh)]
return idx_out
def apply_template_fit_cut(statistic, ifos, parameter_cut_function, cut_thresh,
template_ids):
"""
Apply cuts to template fit parameters, these have a few more checks
needed, so we separate out from apply_template_cuts defined later
Parameters
----------
statistic:
A PyCBC ranking statistic instance. Used for the template fit
cuts. If fits_by_tid does not exist for each ifo, then
template fit cuts will be skipped. If a fit cut has been specified
and fits_by_tid does not exist for all ifos, an error will be raised.
ifos: list of strings
List of IFOS used in this findtrigs instance.
Templates must pass cuts in all IFOs.
parameter_cut_function: tuple
First entry: Which parameter is being used for the cut?
Second entry: Cut function
cut_thresh: float or int
Cut threshold to the parameter according to the cut function
template_ids: numpy array
Array of template_ids which have passed previous cuts
Returns
-------
tids_out: numpy array
Array of template_ids which have passed this cut
"""
parameter, cut_function = parameter_cut_function
statistic_classname = statistic.__class__.__name__
# We can only apply template fit cuts if template fits have been done
if not hasattr(statistic, 'fits_by_tid'):
raise ValueError("Cut parameter " + parameter + " cannot "
"be used when the ranking statistic " +
statistic_classname + " does not use "
"template fitting.")
# Is the parameter actually in the fits dictionary?
if parameter not in statistic.fits_by_tid[ifos[0]]:
# Shouldn't get here due to input sanitisation
raise ValueError("Cut parameter " + parameter + " not "
"available in fits file.")
# Template IDs array to cut down in each IFO
tids_out = copy.copy(template_ids)
# Need to apply this cut to all IFOs
for ifo in ifos:
fits_dict = statistic.fits_by_tid[ifo]
values = fits_dict[parameter][tids_out]
# Only keep templates which pass this cut
tids_out = tids_out[cut_function(values, cut_thresh)]
return tids_out
def apply_template_cuts(bank, template_cut_dict, template_ids=None,
statistic=None, ifos=None):
"""
Fetch/calculate the parameter for the templates, possibly already
preselected by template_ids, and then apply the cuts defined
in template_cut_dict
As this is used to select templates for use in findtrigs codes,
we remove anything which does not pass
Parameters
----------
bank: h5py File object, or a dictionary
Must contain the usual template bank datasets
template_cut_dict: dictionary
Dictionary with tuples of (parameter, cut_function)
as keys, cut_thresholds as values
made using ingest_cuts_option_group function
Optional Parameters
-------------------
template_ids: list of indices
Indices of templates to consider within the bank, useful if
templates have already been down-selected
statistic:
A PyCBC ranking statistic instance. Used for the template fit
cuts. If fits_by_tid does not exist for each ifo, then
template fit cuts will be skipped. If a fit cut has been specified
and fits_by_tid does not exist for all ifos, an error will be raised.
If not supplied, no template fit cuts will be attempted.
ifos: list of strings
List of IFOS used in this findtrigs instance.
Templates must pass cuts in all IFOs. This is important
e.g. for template fit parameter cuts.
Returns
-------
tids_out: numpy array
Array of template_ids which have passed all cuts
"""
# Get the initial list of templates:
tids_out = np.arange(bank['mass1'].size) \
if template_ids is None else template_ids[:]
if (statistic is None) ^ (ifos is None):
raise NotImplementedError("Either both or neither of statistic and "
"ifos must be supplied.")
if not template_cut_dict:
# No cuts are defined in the dictionary: just return the
# list of all tids
return tids_out
# Loop through the different cuts, and apply them
for parameter_cut_function, cut_thresh in template_cut_dict.items():
# The function and threshold are stored as a tuple so unpack it
parameter, cut_function = parameter_cut_function
if parameter in bank_conv.conversion_options:
# Calculate the parameter values using the bank property helper
values = bank_conv.get_bank_property(parameter, bank, tids_out)
# Only keep templates which pass this cut
tids_out = tids_out[cut_function(values, cut_thresh)]
elif parameter in template_fit_param_choices:
if statistic and ifos:
tids_out = apply_template_fit_cut(statistic,
ifos,
parameter_cut_function,
cut_thresh,
tids_out)
else:
raise ValueError("Cut parameter " + parameter + " not recognised."
" This shouldn't happen with input sanitisation")
return tids_out
| 18,453
| 39.647577
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/events/coinc.py
|
# Copyright (C) 2015 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module contains functions for calculating and manipulating
coincident triggers.
"""
import numpy, logging, pycbc.pnutils, pycbc.conversions, copy, lal
from pycbc.detector import Detector, ppdets
from .eventmgr_cython import coincbuffer_expireelements
from .eventmgr_cython import coincbuffer_numgreater
from .eventmgr_cython import timecoincidence_constructidxs
from .eventmgr_cython import timecoincidence_constructfold
from .eventmgr_cython import timecoincidence_getslideint
from .eventmgr_cython import timecoincidence_findidxlen
from .eventmgr_cython import timecluster_cython
def background_bin_from_string(background_bins, data):
""" Return template ids for each bin as defined by the format string
Parameters
----------
bins: list of strings
List of strings which define how a background bin is taken from the
list of templates.
data: dict of numpy.ndarrays
Dict with parameter key values and numpy.ndarray values which define
the parameters of the template bank to bin up.
Returns
-------
bins: dict
Dictionary of location indices indexed by a bin name
"""
used = numpy.array([], dtype=numpy.uint32)
bins = {}
for mbin in background_bins:
locs = None
name, bin_type_list, boundary_list = tuple(mbin.split(':'))
bin_type_list = bin_type_list.split(',')
boundary_list = boundary_list.split(',')
for bin_type, boundary in zip(bin_type_list, boundary_list):
if boundary[0:2] == 'lt':
member_func = lambda vals, bd=boundary : vals < float(bd[2:])
elif boundary[0:2] == 'gt':
member_func = lambda vals, bd=boundary : vals > float(bd[2:])
else:
raise RuntimeError("Can't parse boundary condition! Must begin "
"with 'lt' or 'gt'")
if bin_type == 'component' and boundary[0:2] == 'lt':
# maximum component mass is less than boundary value
vals = numpy.maximum(data['mass1'], data['mass2'])
elif bin_type == 'component' and boundary[0:2] == 'gt':
# minimum component mass is greater than bdary
vals = numpy.minimum(data['mass1'], data['mass2'])
elif bin_type == 'total':
vals = data['mass1'] + data['mass2']
elif bin_type == 'chirp':
vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta(
data['mass1'], data['mass2'])[0]
elif bin_type == 'ratio':
vals = pycbc.conversions.q_from_mass1_mass2(
data['mass1'], data['mass2'])
elif bin_type == 'eta':
vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta(
data['mass1'], data['mass2'])[1]
elif bin_type == 'chi_eff':
vals = pycbc.conversions.chi_eff(data['mass1'], data['mass2'],
data['spin1z'], data['spin2z'])
elif bin_type == 'SEOBNRv2Peak':
vals = pycbc.pnutils.get_freq('fSEOBNRv2Peak',
data['mass1'], data['mass2'],
data['spin1z'], data['spin2z'])
elif bin_type == 'SEOBNRv4Peak':
vals = pycbc.pnutils.get_freq('fSEOBNRv4Peak', data['mass1'],
data['mass2'], data['spin1z'],
data['spin2z'])
elif bin_type == 'SEOBNRv2duration':
vals = pycbc.pnutils.get_imr_duration(
data['mass1'], data['mass2'],
data['spin1z'], data['spin2z'],
data['f_lower'], approximant='SEOBNRv2')
elif bin_type == 'SEOBNRv4duration':
vals = pycbc.pnutils.get_imr_duration(
data['mass1'][:], data['mass2'][:],
data['spin1z'][:], data['spin2z'][:],
data['f_lower'][:], approximant='SEOBNRv4')
else:
raise ValueError('Invalid bin type %s' % bin_type)
sub_locs = member_func(vals)
del vals
sub_locs = numpy.where(sub_locs)[0]
if locs is not None:
# find intersection of boundary conditions
locs = numpy.intersect1d(locs, sub_locs)
else:
locs = sub_locs
# make sure we don't reuse anything from an earlier bin
locs = numpy.delete(locs, numpy.where(numpy.in1d(locs, used))[0])
used = numpy.concatenate([used, locs])
bins[name] = locs
return bins
def timeslide_durations(start1, start2, end1, end2, timeslide_offsets):
""" Find the coincident time for each timeslide.
Find the coincident time for each timeslide, where the first time vector
is slid to the right by the offset in the given timeslide_offsets vector.
Parameters
----------
start1: numpy.ndarray
Array of the start of valid analyzed times for detector 1
start2: numpy.ndarray
Array of the start of valid analyzed times for detector 2
end1: numpy.ndarray
Array of the end of valid analyzed times for detector 1
end2: numpy.ndarray
Array of the end of valid analyzed times for detector 2
timseslide_offset: numpy.ndarray
Array of offsets (in seconds) for each timeslide
Returns
--------
durations: numpy.ndarray
Array of coincident time for each timeslide in the offset array
"""
from . import veto
durations = []
seg2 = veto.start_end_to_segments(start2, end2)
for offset in timeslide_offsets:
seg1 = veto.start_end_to_segments(start1 + offset, end1 + offset)
durations.append(abs((seg1 & seg2).coalesce()))
return numpy.array(durations)
def time_coincidence(t1, t2, window, slide_step=0):
""" Find coincidences by time window
Parameters
----------
t1 : numpy.ndarray
Array of trigger times from the first detector
t2 : numpy.ndarray
Array of trigger times from the second detector
window : float
Coincidence window maximum time difference, arbitrary units (usually s)
slide_step : float (default 0)
If calculating background coincidences, the interval between background
slides, arbitrary units (usually s)
Returns
-------
idx1 : numpy.ndarray
Array of indices into the t1 array for coincident triggers
idx2 : numpy.ndarray
Array of indices into the t2 array
slide : numpy.ndarray
Array of slide ids
"""
if slide_step:
length1 = len(t1)
length2 = len(t2)
fold1 = numpy.zeros(length1, dtype=numpy.float64)
fold2 = numpy.zeros(length2, dtype=numpy.float64)
timecoincidence_constructfold(fold1, fold2, t1, t2, slide_step,
length1, length2)
else:
fold1 = t1
fold2 = t2
sort1 = fold1.argsort()
sort2 = fold2.argsort()
fold1 = fold1[sort1]
fold2 = fold2[sort2]
if slide_step:
# FIXME explain this
fold2 = numpy.concatenate([fold2 - slide_step, fold2,
fold2 + slide_step])
left = fold2.searchsorted(fold1 - window)
right = fold2.searchsorted(fold1 + window)
lenidx = timecoincidence_findidxlen(left, right, len(left))
idx1 = numpy.zeros(lenidx, dtype=numpy.uint32)
idx2 = numpy.zeros(lenidx, dtype=numpy.uint32)
timecoincidence_constructidxs(idx1, idx2, sort1, sort2, left, right,
len(left), len(sort2))
slide = numpy.zeros(lenidx, dtype=numpy.int32)
if slide_step:
timecoincidence_getslideint(slide, t1, t2, idx1, idx2, slide_step)
else:
slide = numpy.zeros(len(idx1))
return idx1, idx2, slide
def time_multi_coincidence(times, slide_step=0, slop=.003,
pivot='H1', fixed='L1'):
""" Find multi detector coincidences.
Parameters
----------
times: dict of numpy.ndarrays
Dictionary keyed by ifo of single ifo trigger times
slide_step: float
Interval between time slides
slop: float
The amount of time to add to the TOF between detectors for coincidence
pivot: str
The ifo to which time shifts are applied in first stage coincidence
fixed: str
The other ifo used in first stage coincidence, subsequently used as a
time reference for additional ifos. All other ifos are not time shifted
relative to this ifo
Returns
-------
ids: dict of arrays of int
Dictionary keyed by ifo with ids of trigger times forming coincidences.
Coincidence is tested for every pair of ifos that can be formed from
the input dict: only those tuples of times passing all tests are
recorded
slide: array of int
Slide ids of coincident triggers in pivot ifo
"""
def win(ifo1, ifo2):
d1 = Detector(ifo1)
d2 = Detector(ifo2)
return d1.light_travel_time_to_detector(d2) + slop
# Find coincs between the 'pivot' and 'fixed' detectors as in 2-ifo case
pivot_id, fix_id, slide = time_coincidence(times[pivot], times[fixed],
win(pivot, fixed),
slide_step=slide_step)
# Additional detectors do not slide independently of the 'fixed' one
# Each trigger in an additional detector must be concident with both
# triggers in an existing coincidence
# Slide 'pivot' trigger times to be coincident with trigger times in
# 'fixed' detector
fixed_time = times[fixed][fix_id]
pivot_time = times[pivot][pivot_id] - slide_step * slide
ctimes = {fixed: fixed_time, pivot: pivot_time}
ids = {fixed: fix_id, pivot: pivot_id}
dep_ifos = [ifo for ifo in times.keys() if ifo != fixed and ifo != pivot]
for ifo1 in dep_ifos:
# FIXME - make this loop into a function?
# otime is extra ifo time in original trigger order
otime = times[ifo1]
# tsort gives ordering from original order to time sorted order
tsort = otime.argsort()
time1 = otime[tsort]
# Find coincidences between dependent ifo triggers and existing coincs
# - Cycle over fixed and pivot
# - At the 1st iteration, the fixed and pivot triggers are reduced to
# those for which the first out of fixed/pivot forms a coinc with ifo1
# - At the 2nd iteration, we are left with triggers for which both
# fixed and pivot are coincident with ifo1
# - If there is more than 1 dependent ifo, ones that were previously
# tested against fixed and pivot are now present for testing with new
# dependent ifos
for ifo2 in ids:
logging.info('added ifo %s, testing against %s' % (ifo1, ifo2))
w = win(ifo1, ifo2)
left = time1.searchsorted(ctimes[ifo2] - w)
right = time1.searchsorted(ctimes[ifo2] + w)
# Any times within time1 coincident with the time in ifo2 have
# indices between 'left' and 'right'
# 'nz' indexes into times in ifo2 which have coincidences with ifo1
# times
nz = (right - left).nonzero()
if len(right - left):
rlmax = (right - left).max()
if len(nz[0]) and rlmax > 1:
# We expect at most one coincident time in ifo1, assuming
# trigger spacing in ifo1 > time window.
# However there are rare corner cases at starts/ends of inspiral
# jobs. For these, arbitrarily keep the first trigger and
# discard the second (and any subsequent ones).
logging.warning('Triggers in %s are closer than coincidence '
'window, 1 or more coincs will be discarded. '
'This is a warning, not an error.' % ifo1)
# identify indices of times in ifo1 that form coincs with ifo2
dep_ids = left[nz]
# slide is array of slide ids attached to pivot ifo
slide = slide[nz]
for ifo in ctimes:
# cycle over fixed and pivot & any previous additional ifos
# reduce times and IDs to just those forming a coinc with ifo1
ctimes[ifo] = ctimes[ifo][nz]
ids[ifo] = ids[ifo][nz]
# undo time sorting on indices of ifo1 triggers, add ifo1 ids and times
# to dicts for testing against any additional detectrs
ids[ifo1] = tsort[dep_ids]
ctimes[ifo1] = otime[ids[ifo1]]
return ids, slide
def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, **kwargs):
"""Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time1: numpy.ndarray
first time vector
time2: numpy.ndarray
second time vector
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
length to cluster over
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences.
"""
if len(time1) == 0 or len(time2) == 0:
logging.info('No coinc triggers in one, or both, ifos.')
return numpy.array([])
if numpy.isfinite(slide):
# for a time shifted coinc, time1 is greater than time2 by approximately timeslide_id*slide
# adding this quantity gives a mean coinc time located around time1
time = (time1 + time2 + timeslide_id * slide) / 2
else:
time = 0.5 * (time2 + time1)
tslide = timeslide_id.astype(numpy.longdouble)
time = time.astype(numpy.longdouble)
span = (time.max() - time.min()) + window * 10
time = time + span * tslide
logging.info('Clustering events over %s s window', window)
cidx = cluster_over_time(stat, time, window, **kwargs)
logging.info('%d triggers remaining', len(cidx))
return cidx
def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window,
**kwargs):
"""Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time_coincs: tuple of numpy.ndarrays
trigger times for each ifo, or -1 if an ifo does not participate in a coinc
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
duration of clustering window in seconds
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences
"""
time_coinc_zip = list(zip(*time_coincs))
if len(time_coinc_zip) == 0:
logging.info('No coincident triggers.')
return numpy.array([])
time_avg_num = []
#find number of ifos and mean time over participating ifos for each coinc
for tc in time_coinc_zip:
time_avg_num.append(mean_if_greater_than_zero(tc))
time_avg, num_ifos = zip(*time_avg_num)
time_avg = numpy.array(time_avg)
num_ifos = numpy.array(num_ifos)
# shift all but the pivot ifo by (num_ifos-1) * timeslide_id * slide
# this leads to a mean coinc time located around pivot time
if numpy.isfinite(slide):
nifos_minusone = (num_ifos - numpy.ones_like(num_ifos))
time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos
tslide = timeslide_id.astype(numpy.longdouble)
time_avg = time_avg.astype(numpy.longdouble)
span = (time_avg.max() - time_avg.min()) + window * 10
time_avg = time_avg + span * tslide
logging.info('Clustering events over %s s window', window)
cidx = cluster_over_time(stat, time_avg, window, **kwargs)
logging.info('%d triggers remaining', len(cidx))
return cidx
def mean_if_greater_than_zero(vals):
""" Calculate mean over numerical values, ignoring values less than zero.
E.g. used for mean time over coincident triggers when timestamps are set
to -1 for ifos not included in the coincidence.
Parameters
----------
vals: iterator of numerical values
values to be mean averaged
Returns
-------
mean: float
The mean of the values in the original vector which are
greater than zero
num_above_zero: int
The number of entries in the vector which are above zero
"""
vals = numpy.array(vals)
above_zero = vals > 0
return vals[above_zero].mean(), above_zero.sum()
def cluster_over_time(stat, time, window, method='python',
argmax=numpy.argmax):
"""Cluster generalized transient events over time via maximum stat over a
symmetric sliding window
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time: numpy.ndarray
time to use for clustering
window: float
length to cluster over
method: string
Either "cython" to use the cython implementation, or "python" to use
the pure python version.
argmax: function
the function used to calculate the maximum value
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences.
"""
indices = []
time_sorting = time.argsort()
stat = stat[time_sorting]
time = time[time_sorting]
left = time.searchsorted(time - window)
right = time.searchsorted(time + window)
indices = numpy.zeros(len(left), dtype=numpy.uint32)
logging.debug('%d triggers before clustering', len(time))
if method == 'cython':
j = timecluster_cython(indices, left, right, stat, len(left))
elif method == 'python':
# i is the index we are inspecting, j is the next one to save
i = 0
j = 0
while i < len(left):
l = left[i]
r = right[i]
# If there are no other points to compare it is obviously the max
if (r - l) == 1:
indices[j] = i
j += 1
i += 1
continue
# Find the location of the maximum within the time interval
# around i
max_loc = argmax(stat[l:r]) + l
# If this point is the max, we can skip to the right boundary
if max_loc == i:
indices[j] = i
i = r
j += 1
# If the max is later than i, we can skip to it
elif max_loc > i:
i = max_loc
elif max_loc < i:
i += 1
else:
raise ValueError(f'Do not recognize method {method}')
indices = indices[:j]
logging.debug('%d triggers remaining', len(indices))
return time_sorting[indices]
class MultiRingBuffer(object):
"""Dynamic size n-dimensional ring buffer that can expire elements."""
def __init__(self, num_rings, max_time, dtype, min_buffer_size=16,
buffer_increment=8, resize_invalid_fraction=0.4):
"""
Parameters
----------
num_rings: int
The number of ring buffers to create. They all will have the same
intrinsic size and will expire at the same time.
max_time: int
The maximum "time" an element can exist in each ring.
dtype: numpy.dtype
The type of each element in the ring buffer.
min_buffer_size: int (optional: default=16)
All ring buffers will be initialized to this length. If a buffer is
made larger it will no smaller than this value. Buffers may become
smaller than this length at any given time as triggers are expired.
buffer_increment: int (optional: default=8)
When increasing ring buffers, add this many points. Be careful if
changing this and min_buffer_size from default values, it is
possible to get stuck in a mode where the buffers are always being
resized.
resize_invalid_fraction: float (optional:default=0.4)
If this fraction of any buffer contains unused data points then
resize it to contain only valid points. As with the previous two
options, be careful changing default values, it is
possible to get stuck in a mode where the buffers are always being
resized.
"""
self.max_time = max_time
self.buffer = []
self.buffer_expire = []
self.valid_ends = []
self.valid_starts = []
self.min_buffer_size = min_buffer_size
self.buffer_increment = buffer_increment
self.resize_invalid_fraction = resize_invalid_fraction
for _ in range(num_rings):
self.buffer.append(numpy.zeros(self.min_buffer_size, dtype=dtype))
self.buffer_expire.append(numpy.zeros(self.min_buffer_size,
dtype=int))
self.valid_ends.append(0)
self.valid_starts.append(0)
self.time = 0
@property
def filled_time(self):
return min(self.time, self.max_time)
def num_elements(self):
count = 0
for idx, a in enumerate(self.buffer):
vals = self.valid_starts[idx]
vale = self.valid_ends[idx]
count += len(a[vals:vale])
return count
@property
def nbytes(self):
return sum([a.nbytes for a in self.buffer])
def discard_last(self, indices):
"""Discard the triggers added in the latest update"""
for i in indices:
self.valid_ends[i] -= 1
def advance_time(self):
"""Advance the internal time increment by 1, expiring any triggers
that are now too old.
"""
self.time += 1
def add(self, indices, values):
"""Add triggers in 'values' to the buffers indicated by the indices
"""
for i, v in zip(indices, values):
# Expand ring buffer size if needed
if self.valid_ends[i] == len(self.buffer[i]):
# First clear out any old triggers before resizing
self.update_valid_start(i)
self.check_expired_triggers(i)
# Then increase arrays by buffer_increment
self.buffer[i] = numpy.resize(
self.buffer[i],
max(
len(self.buffer[i]) + self.buffer_increment,
self.min_buffer_size
)
)
self.buffer_expire[i] = numpy.resize(
self.buffer_expire[i],
max(
len(self.buffer[i]) + self.buffer_increment,
self.min_buffer_size
)
)
curr_pos = self.valid_ends[i]
self.buffer[i][curr_pos] = v
self.buffer_expire[i][curr_pos] = self.time
self.valid_ends[i] = self.valid_ends[i] + 1
self.advance_time()
def valid_slice(self, buffer_index):
"""Return the valid slice for this buffer index"""
ret_slice = slice(
self.valid_starts[buffer_index],
self.valid_ends[buffer_index]
)
return ret_slice
def expire_vector(self, buffer_index):
"""Return the expiration vector of a given ring buffer """
return self.buffer_expire[buffer_index][self.valid_slice(buffer_index)]
def update_valid_start(self, buffer_index):
"""Update the valid_start for the given buffer index"""
expired = self.time - self.max_time
exp = self.buffer_expire[buffer_index]
j = self.valid_starts[buffer_index]
while j < self.valid_ends[buffer_index]:
# Everything before this j must be expired
if exp[j] >= expired:
break
j += 1
self.valid_starts[buffer_index] = j
def check_expired_triggers(self, buffer_index):
"""Check if we should free memory for this buffer index.
Check what fraction of triggers are expired in the specified buffer
and if it is more than the allowed fraction (set by
self.resize_invalid_fraction) resize the array to remove them.
"""
val_start = self.valid_starts[buffer_index]
val_end = self.valid_ends[buffer_index]
buf_len = len(self.buffer[buffer_index])
invalid_limit = self.resize_invalid_fraction * buf_len
if (buf_len - val_end) + val_start > invalid_limit:
# If self.resize_invalid_fraction of stored triggers are expired
# or are not set, free up memory
self.buffer_expire[buffer_index] = self.buffer_expire[buffer_index][val_start:val_end].copy()
self.buffer[buffer_index] = self.buffer[buffer_index][val_start:val_end].copy()
self.valid_ends[buffer_index] -= val_start
self.valid_starts[buffer_index] = 0
def data(self, buffer_index):
"""Return the data vector for a given ring buffer"""
self.update_valid_start(buffer_index)
self.check_expired_triggers(buffer_index)
return self.buffer[buffer_index][self.valid_slice(buffer_index)]
class CoincExpireBuffer(object):
"""Unordered dynamic sized buffer that handles
multiple expiration vectors.
"""
def __init__(self, expiration, ifos,
initial_size=2**20, dtype=numpy.float32):
"""
Parameters
----------
expiration: int
The 'time' in arbitrary integer units to allow to pass before
removing an element.
ifos: list of strs
List of strings to identify the multiple data expiration times.
initial_size: int, optional
The initial size of the buffer.
dtype: numpy.dtype
The dtype of each element of the buffer.
"""
self.expiration = expiration
self.buffer = numpy.zeros(initial_size, dtype=dtype)
self.index = 0
self.ifos = ifos
self.time = {}
self.timer = {}
for ifo in self.ifos:
self.time[ifo] = 0
self.timer[ifo] = numpy.zeros(initial_size, dtype=numpy.int32)
def __len__(self):
return self.index
@property
def nbytes(self):
"""Returns the approximate memory usage of self.
"""
nbs = [self.timer[ifo].nbytes for ifo in self.ifos]
nbs.append(self.buffer.nbytes)
return sum(nbs)
def increment(self, ifos):
"""Increment without adding triggers"""
self.add([], [], ifos)
def remove(self, num):
"""Remove the the last 'num' elements from the buffer"""
self.index -= num
def add(self, values, times, ifos):
"""Add values to the internal buffer
Parameters
----------
values: numpy.ndarray
Array of elements to add to the internal buffer.
times: dict of arrays
The current time to use for each element being added.
ifos: list of strs
The set of timers to be incremented.
"""
for ifo in ifos:
self.time[ifo] += 1
# Resize the internal buffer if we need more space
if self.index + len(values) >= len(self.buffer):
newlen = len(self.buffer) * 2
for ifo in self.ifos:
self.timer[ifo].resize(newlen)
self.buffer.resize(newlen, refcheck=False)
self.buffer[self.index:self.index+len(values)] = values
if len(values) > 0:
for ifo in self.ifos:
self.timer[ifo][self.index:self.index+len(values)] = times[ifo]
self.index += len(values)
# Remove the expired old elements
if len(ifos) == 2:
# Cython version for two ifo case
self.index = coincbuffer_expireelements(
self.buffer,
self.timer[ifos[0]],
self.timer[ifos[1]],
self.time[ifos[0]],
self.time[ifos[1]],
self.expiration,
self.index
)
else:
# Numpy version for >2 ifo case
keep = None
for ifo in ifos:
kt = self.timer[ifo][:self.index] >= self.time[ifo] - self.expiration
keep = numpy.logical_and(keep, kt) if keep is not None else kt
self.buffer[:keep.sum()] = self.buffer[:self.index][keep]
for ifo in self.ifos:
self.timer[ifo][:keep.sum()] = self.timer[ifo][:self.index][keep]
self.index = keep.sum()
def num_greater(self, value):
"""Return the number of elements larger than 'value'"""
return coincbuffer_numgreater(self.buffer, self.index, value)
@property
def data(self):
"""Return the array of elements"""
return self.buffer[:self.index]
class LiveCoincTimeslideBackgroundEstimator(object):
"""Rolling buffer background estimation."""
def __init__(self, num_templates, analysis_block, background_statistic,
sngl_ranking, stat_files, ifos,
ifar_limit=100,
timeslide_interval=.035,
coinc_threshold=.002,
return_background=False,
**kwargs):
"""
Parameters
----------
num_templates: int
The size of the template bank
analysis_block: int
The number of seconds in each analysis segment
background_statistic: str
The name of the statistic to rank coincident events.
sngl_ranking: str
The single detector ranking to use with the background statistic
stat_files: list of strs
List of filenames that contain information used to construct
various coincident statistics.
ifos: list of strs
List of ifo names that are being analyzed. At the moment this must
be two items such as ['H1', 'L1'].
ifar_limit: float
The largest inverse false alarm rate in years that we would like to
calculate.
timeslide_interval: float
The time in seconds between consecutive timeslide offsets.
coinc_threshold: float
Amount of time allowed to form a coincidence in addition to the
time of flight in seconds.
return_background: boolean
If true, background triggers will also be included in the file
output.
kwargs: dict
Additional options for the statistic to use. See stat.py
for more details on statistic options.
"""
from . import stat
self.num_templates = num_templates
self.analysis_block = analysis_block
stat_class = stat.get_statistic(background_statistic)
self.stat_calculator = stat_class(
sngl_ranking,
stat_files,
ifos=ifos,
**kwargs
)
self.timeslide_interval = timeslide_interval
self.return_background = return_background
self.ifos = ifos
if len(self.ifos) != 2:
raise ValueError("Only a two ifo analysis is supported at this time")
self.lookback_time = (ifar_limit * lal.YRJUL_SI * timeslide_interval) ** 0.5
self.buffer_size = int(numpy.ceil(self.lookback_time / analysis_block))
det0, det1 = Detector(ifos[0]), Detector(ifos[1])
self.time_window = det0.light_travel_time_to_detector(det1) + coinc_threshold
self.coincs = CoincExpireBuffer(self.buffer_size, self.ifos)
self.singles = {}
# temporary array used in `_find_coincs()` to turn `trig_stat`
# into an array much faster than using `numpy.resize()`
self.trig_stat_memory = None
@classmethod
def pick_best_coinc(cls, coinc_results):
"""Choose the best two-ifo coinc by ifar first, then statistic if needed.
This function picks which of the available double-ifo coincs to use.
It chooses the best (highest) ifar. The ranking statistic is used as
a tie-breaker.
A trials factor is applied if multiple types of coincs are possible
at this time given the active ifos.
Parameters
----------
coinc_results: list of coinc result dicts
Dictionary by detector pair of coinc result dicts.
Returns
-------
best: coinc results dict
If there is a coinc, this will contain the 'best' one. Otherwise
it will return the provided dict.
"""
mstat = 0
mifar = 0
mresult = None
# record the trials factor from the possible coincs we could
# maximize over
trials = 0
for result in coinc_results:
# Check that a coinc was possible. See the 'add_singles' method
# to see where this flag was added into the results dict
if 'coinc_possible' in result:
trials += 1
# Check that a coinc exists
if 'foreground/ifar' in result:
ifar = result['foreground/ifar']
stat = result['foreground/stat']
if ifar > mifar or (ifar == mifar and stat > mstat):
mifar = ifar
mstat = stat
mresult = result
# apply trials factor for the best coinc
if mresult:
mresult['foreground/ifar'] = mifar / float(trials)
logging.info('Found %s coinc with ifar %s',
mresult['foreground/type'],
mresult['foreground/ifar'])
return mresult
# If no coinc, just return one of the results dictionaries. They will
# all contain the same results (i.e. single triggers) in this case.
else:
return coinc_results[0]
@classmethod
def from_cli(cls, args, num_templates, analysis_chunk, ifos):
from . import stat
# Allow None inputs
stat_files = args.statistic_files or []
stat_keywords = args.statistic_keywords or []
# flatten the list of lists of filenames to a single list (may be empty)
stat_files = sum(stat_files, [])
kwargs = stat.parse_statistic_keywords_opt(stat_keywords)
return cls(num_templates, analysis_chunk,
args.ranking_statistic,
args.sngl_ranking,
stat_files,
return_background=args.store_background,
ifar_limit=args.background_ifar_limit,
timeslide_interval=args.timeslide_interval,
ifos=ifos,
**kwargs)
@staticmethod
def insert_args(parser):
from . import stat
stat.insert_statistic_option_group(parser)
group = parser.add_argument_group('Coincident Background Estimation')
group.add_argument('--store-background', action='store_true',
help="Return background triggers with zerolag coincidencs")
group.add_argument('--background-ifar-limit', type=float,
help="The limit on inverse false alarm rate to calculate "
"background in years", default=100.0)
group.add_argument('--timeslide-interval', type=float,
help="The interval between timeslides in seconds", default=0.1)
group.add_argument('--ifar-remove-threshold', type=float,
help="NOT YET IMPLEMENTED", default=100.0)
@property
def background_time(self):
"""Return the amount of background time that the buffers contain"""
time = 1.0 / self.timeslide_interval
for ifo in self.singles:
time *= self.singles[ifo].filled_time * self.analysis_block
return time
def save_state(self, filename):
"""Save the current state of the background buffers"""
import pickle
pickle.dump(self, filename)
@staticmethod
def restore_state(filename):
"""Restore state of the background buffers from a file"""
import pickle
return pickle.load(filename)
def ifar(self, coinc_stat):
"""Map a given value of the coincident ranking statistic to an inverse
false-alarm rate (IFAR) using the interally stored background sample.
Parameters
----------
coinc_stat: float
Value of the coincident ranking statistic to be converted.
Returns
-------
ifar: float
Inverse false-alarm rate in unit of years.
ifar_saturated: bool
True if `coinc_stat` is larger than all the available background,
in which case `ifar` is to be considered an upper limit.
"""
n = self.coincs.num_greater(coinc_stat)
ifar = self.background_time / lal.YRJUL_SI / (n + 1)
return ifar, n == 0
def set_singles_buffer(self, results):
"""Create the singles buffer
This creates the singles buffer for each ifo. The dtype is determined
by a representative sample of the single triggers in the results.
Parameters
----------
results: dict of dict
Dict indexed by ifo and then trigger column.
"""
# Determine the dtype from a sample of the data.
self.singles_dtype = []
data = False
for ifo in self.ifos:
if ifo in results and results[ifo] is not False \
and len(results[ifo]['snr']):
data = results[ifo]
break
if data is False:
return
for key in data:
self.singles_dtype.append((key, data[key].dtype))
if 'stat' not in data:
self.singles_dtype.append(('stat', self.stat_calculator.single_dtype))
# Create a ring buffer for each template ifo combination
for ifo in self.ifos:
self.singles[ifo] = MultiRingBuffer(self.num_templates,
self.buffer_size,
self.singles_dtype)
def _add_singles_to_buffer(self, results, ifos):
"""Add single detector triggers to the internal buffer
Parameters
----------
results: dict
Dictionary of dictionaries indexed by ifo and keys such as 'snr',
'chisq', etc. The specific format is determined by the
LiveBatchMatchedFilter class.
Returns
-------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
"""
if len(self.singles.keys()) == 0:
self.set_singles_buffer(results)
# If this *still* didn't work, no triggers in first set, try next time
if len(self.singles.keys()) == 0:
return {}
# convert to single detector trigger values
# FIXME Currently configured to use pycbc live output
# where chisq is the reduced chisq and chisq_dof is the actual DOF
logging.info("adding singles to the background estimate...")
updated_indices = {}
for ifo in ifos:
trigs = results[ifo]
if len(trigs['snr'] > 0):
trigsc = copy.copy(trigs)
trigsc['chisq'] = trigs['chisq'] * trigs['chisq_dof']
trigsc['chisq_dof'] = (trigs['chisq_dof'] + 2) / 2
single_stat = self.stat_calculator.single(trigsc)
else:
single_stat = numpy.array([], ndmin=1,
dtype=self.stat_calculator.single_dtype)
trigs['stat'] = single_stat
# add each single detector trigger to the and advance the buffer
data = numpy.zeros(len(single_stat), dtype=self.singles_dtype)
for key, value in trigs.items():
data[key] = value
self.singles[ifo].add(trigs['template_id'], data)
updated_indices[ifo] = trigs['template_id']
return updated_indices
def _find_coincs(self, results, valid_ifos):
"""Look for coincs within the set of single triggers
Parameters
----------
results: dict
Dictionary of dictionaries indexed by ifo and keys such as 'snr',
'chisq', etc. The specific format is determined by the
LiveBatchMatchedFilter class.
valid_ifos: list of strs
List of ifos for which new triggers might exist. This must be a
subset of self.ifos. If an ifo is in self.ifos but not in this list
either the ifo is down, or its data has been flagged as "bad".
Returns
-------
num_background: int
Number of time shifted coincidences found.
coinc_results: dict of arrays
A dictionary of arrays containing the coincident results.
"""
# For each new single detector trigger find the allowed coincidences
# Record the template and the index of the single trigger that forms
# each coincidence
# Initialize
cstat = [[]]
offsets = []
ctimes = {self.ifos[0]:[], self.ifos[1]:[]}
single_expire = {self.ifos[0]:[], self.ifos[1]:[]}
template_ids = [[]]
trigger_ids = {self.ifos[0]:[[]], self.ifos[1]:[[]]}
# Calculate all the permutations of coincident triggers for each
# new single detector trigger collected
# Currently only two detectors are supported.
# For each ifo, check its newly added triggers for (zerolag and time
# shift) coincs with all currently stored triggers in the other ifo.
# Do this by keeping the ifo with new triggers fixed and time shifting
# the other ifo. The list 'shift_vec' must be in the same order as
# self.ifos and contain -1 for the shift_ifo / 0 for the fixed_ifo.
for fixed_ifo, shift_ifo, shift_vec in zip(
[self.ifos[0], self.ifos[1]],
[self.ifos[1], self.ifos[0]],
[[0, -1], [-1, 0]]
):
if fixed_ifo not in valid_ifos:
# This ifo is not online now, so no new triggers or coincs
continue
# Find newly added triggers in fixed_ifo
trigs = results[fixed_ifo]
# Loop over them one trigger at a time
for i in range(len(trigs['end_time'])):
trig_stat = trigs['stat'][i]
trig_time = trigs['end_time'][i]
template = trigs['template_id'][i]
# Get current shift_ifo triggers in the same template
times = self.singles[shift_ifo].data(template)['end_time']
stats = self.singles[shift_ifo].data(template)['stat']
# Perform coincidence. i1 is the list of trigger indices in the
# shift_ifo which make coincs, slide is the corresponding slide
# index.
# (The second output would just be a list of zeroes as we only
# have one trigger in the fixed_ifo.)
i1, _, slide = time_coincidence(times,
numpy.array(trig_time, ndmin=1,
dtype=numpy.float64),
self.time_window,
self.timeslide_interval)
# Make a copy of the fixed ifo trig_stat for each coinc.
# NB for some statistics the "stat" entry holds more than just
# a ranking number. E.g. for the phase time consistency test,
# it must also contain the phase, time and sensitivity.
if self.trig_stat_memory is None:
self.trig_stat_memory = numpy.zeros(
1,
dtype=trig_stat.dtype
)
while len(self.trig_stat_memory) < len(i1):
self.trig_stat_memory = numpy.resize(
self.trig_stat_memory,
len(self.trig_stat_memory)*2
)
self.trig_stat_memory[:len(i1)] = trig_stat
# Force data into form needed by stat.py and then compute the
# ranking statistic values.
sngls_list = [[fixed_ifo, self.trig_stat_memory[:len(i1)]],
[shift_ifo, stats[i1]]]
c = self.stat_calculator.rank_stat_coinc(
sngls_list,
slide,
self.timeslide_interval,
shift_vec
)
# Store data about new triggers: slide index, stat value and
# times.
offsets.append(slide)
cstat.append(c)
ctimes[shift_ifo].append(times[i1])
ctimes[fixed_ifo].append(numpy.zeros(len(c),
dtype=numpy.float64))
ctimes[fixed_ifo][-1].fill(trig_time)
# As background triggers are removed after a certain time, we
# need to log when this will be for new background triggers.
single_expire[shift_ifo].append(
self.singles[shift_ifo].expire_vector(template)[i1]
)
single_expire[fixed_ifo].append(numpy.zeros(len(c),
dtype=numpy.int32))
single_expire[fixed_ifo][-1].fill(
self.singles[fixed_ifo].time - 1
)
# Save the template and trigger ids to keep association
# to singles. The trigger was just added so it must be in
# the last position: we mark this with -1 so the
# slicing picks the right point
template_ids.append(numpy.zeros(len(c)) + template)
trigger_ids[shift_ifo].append(i1)
trigger_ids[fixed_ifo].append(numpy.zeros(len(c)) - 1)
cstat = numpy.concatenate(cstat)
template_ids = numpy.concatenate(template_ids).astype(numpy.int32)
for ifo in valid_ifos:
trigger_ids[ifo] = numpy.concatenate(trigger_ids[ifo]).astype(numpy.int32)
logging.info(
"%s: %s background and zerolag coincs",
ppdets(self.ifos, "-"), len(cstat)
)
# Cluster the triggers we've found
# (both zerolag and shifted are handled together)
num_zerolag = 0
num_background = 0
if len(cstat) > 0:
offsets = numpy.concatenate(offsets)
ctime0 = numpy.concatenate(ctimes[self.ifos[0]]).astype(numpy.float64)
ctime1 = numpy.concatenate(ctimes[self.ifos[1]]).astype(numpy.float64)
logging.info("Clustering %s coincs", ppdets(self.ifos, "-"))
cidx = cluster_coincs(cstat, ctime0, ctime1, offsets,
self.timeslide_interval,
self.analysis_block + 2*self.time_window,
method='cython')
offsets = offsets[cidx]
zerolag_idx = (offsets == 0)
bkg_idx = (offsets != 0)
for ifo in self.ifos:
single_expire[ifo] = numpy.concatenate(single_expire[ifo])
single_expire[ifo] = single_expire[ifo][cidx][bkg_idx]
self.coincs.add(cstat[cidx][bkg_idx], single_expire, valid_ifos)
num_zerolag = zerolag_idx.sum()
num_background = bkg_idx.sum()
elif len(valid_ifos) > 0:
self.coincs.increment(valid_ifos)
# Collect coinc results for saving
coinc_results = {}
# Save information about zerolag triggers
if num_zerolag > 0:
idx = cidx[zerolag_idx][0]
zerolag_cstat = cstat[cidx][zerolag_idx]
ifar, ifar_sat = self.ifar(zerolag_cstat)
zerolag_results = {
'foreground/ifar': ifar,
'foreground/ifar_saturated': ifar_sat,
'foreground/stat': zerolag_cstat,
'foreground/type': '-'.join(self.ifos)
}
template = template_ids[idx]
for ifo in self.ifos:
trig_id = trigger_ids[ifo][idx]
single_data = self.singles[ifo].data(template)[trig_id]
for key in single_data.dtype.names:
path = f'foreground/{ifo}/{key}'
zerolag_results[path] = single_data[key]
coinc_results.update(zerolag_results)
# Save some summary statistics about the background
coinc_results['background/time'] = numpy.array([self.background_time])
coinc_results['background/count'] = len(self.coincs.data)
# Save all the background triggers
if self.return_background:
coinc_results['background/stat'] = self.coincs.data
return num_background, coinc_results
def backout_last(self, updated_singles, num_coincs):
"""Remove the recently added singles and coincs
Parameters
----------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
num_coincs: int
The number of coincs that were just added to the internal buffer
of coincident triggers
"""
for ifo in updated_singles:
self.singles[ifo].discard_last(updated_singles[ifo])
self.coincs.remove(num_coincs)
def add_singles(self, results):
"""Add singles to the background estimate and find candidates
Parameters
----------
results: dict
Dictionary of dictionaries indexed by ifo and keys such as 'snr',
'chisq', etc. The specific format is determined by the
LiveBatchMatchedFilter class.
Returns
-------
coinc_results: dict of arrays
A dictionary of arrays containing the coincident results.
"""
# Let's see how large everything is
logging.info(
"%s: %s coincs, %s bytes",
ppdets(self.ifos, "-"), len(self.coincs), self.coincs.nbytes
)
# If there are no results just return
valid_ifos = [k for k in results.keys() if results[k] and k in self.ifos]
if len(valid_ifos) == 0: return {}
# Add single triggers to the internal buffer
self._add_singles_to_buffer(results, ifos=valid_ifos)
# Calculate zerolag and background coincidences
_, coinc_results = self._find_coincs(results, valid_ifos=valid_ifos)
# record if a coinc is possible in this chunk
if len(valid_ifos) == 2:
coinc_results['coinc_possible'] = True
return coinc_results
__all__ = [
"background_bin_from_string",
"timeslide_durations",
"time_coincidence",
"time_multi_coincidence",
"cluster_coincs",
"cluster_coincs_multiifo",
"mean_if_greater_than_zero",
"cluster_over_time",
"MultiRingBuffer",
"CoincExpireBuffer",
"LiveCoincTimeslideBackgroundEstimator"
]
| 53,283
| 37.978786
| 105
|
py
|
pycbc
|
pycbc-master/pycbc/events/coherent.py
|
# Copyright (C) 2022 Andrew Williamson
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module contains functions for calculating and manipulating coherent
triggers.
"""
import numpy as np
def get_coinc_indexes(idx_dict, time_delay_idx):
"""Return the indexes corresponding to coincident triggers
Parameters
----------
idx_dict: dict
Dictionary of indexes of triggers above threshold in each
detector
time_delay_idx: dict
Dictionary giving time delay index (time_delay*sample_rate) for
each ifo
Returns
-------
coinc_idx: list
List of indexes for triggers in geocent time that appear in
multiple detectors
"""
coinc_list = np.array([], dtype=int)
for ifo in idx_dict.keys():
# Create list of indexes above threshold in single detector in geocent
# time. Can then search for triggers that appear in multiple detectors
# later.
if len(idx_dict[ifo]) != 0:
coinc_list = np.hstack(
[coinc_list, idx_dict[ifo] - time_delay_idx[ifo]]
)
# Search through coinc_idx for repeated indexes. These must have been loud
# in at least 2 detectors.
counts = np.unique(coinc_list, return_counts=True)
coinc_idx = counts[0][counts[1] > 1]
return coinc_idx
def get_coinc_triggers(snrs, idx, t_delay_idx):
"""Returns the coincident triggers from the longer SNR timeseries
Parameters
----------
snrs: dict
Dictionary of single detector SNR time series
idx: list
List of geocentric time indexes of coincident triggers
t_delay_idx: dict
Dictionary of indexes corresponding to light travel time from
geocenter for each detector
Returns
-------
coincs: dict
Dictionary of coincident trigger SNRs in each detector
"""
# loops through snrs
# %len(snrs[ifo]) was included as part of a wrap-around solution
coincs = {
ifo: snrs[ifo][(idx + t_delay_idx[ifo]) % len(snrs[ifo])]
for ifo in snrs}
return coincs
def coincident_snr(snr_dict, index, threshold, time_delay_idx):
"""Calculate the coincident SNR for all coincident triggers above
threshold
Parameters
----------
snr_dict: dict
Dictionary of individual detector SNRs
index: list
List of indexes (geocentric) for which to calculate coincident
SNR
threshold: float
Coincident SNR threshold. Triggers below this are cut
time_delay_idx: dict
Dictionary of time delay from geocenter in indexes for each
detector
Returns
-------
rho_coinc: numpy.ndarray
Coincident SNR values for surviving triggers
index: list
The subset of input indexes corresponding to triggers that
survive the cuts
coinc_triggers: dict
Dictionary of individual detector SNRs for triggers that
survive cuts
"""
# Restrict the snr timeseries to just the interesting points
coinc_triggers = get_coinc_triggers(snr_dict, index, time_delay_idx)
# Calculate the coincident snr
snr_array = np.array(
[coinc_triggers[ifo] for ifo in coinc_triggers.keys()]
)
rho_coinc = abs(np.sqrt(np.sum(snr_array * snr_array.conj(), axis=0)))
# Apply threshold
thresh_indexes = rho_coinc > threshold
index = index[thresh_indexes]
coinc_triggers = get_coinc_triggers(snr_dict, index, time_delay_idx)
rho_coinc = rho_coinc[thresh_indexes]
return rho_coinc, index, coinc_triggers
def get_projection_matrix(f_plus, f_cross, sigma, projection="standard"):
"""Calculate the matrix that projects the signal onto the network.
Definitions can be found in Fairhurst (2018) [arXiv:1712.04724].
For the standard projection see Eq. 8, and for left/right
circular projections see Eq. 21, with further discussion in
Appendix A. See also Williamson et al. (2014) [arXiv:1410.6042]
for discussion in context of the GRB search with restricted
binary inclination angles.
Parameters
----------
f_plus: dict
Dictionary containing the plus antenna response factors for
each IFO
f_cross: dict
Dictionary containing the cross antenna response factors for
each IFO
sigma: dict
Dictionary of the sensitivity weights for each IFO
projection: optional, {string, 'standard'}
The signal polarization to project. Choice of 'standard'
(unrestricted; default), 'right' or 'left' (circular
polarizations)
Returns
-------
projection_matrix: np.ndarray
The matrix that projects the signal onto the detector network
"""
# Calculate the weighted antenna responses
keys = sorted(sigma.keys())
w_p = np.array([sigma[ifo] * f_plus[ifo] for ifo in keys])
w_c = np.array([sigma[ifo] * f_cross[ifo] for ifo in keys])
# Get the projection matrix associated with the requested projection
if projection == "standard":
denom = np.dot(w_p, w_p) * np.dot(w_c, w_c) - np.dot(w_p, w_c) ** 2
projection_matrix = (
np.dot(w_c, w_c) * np.outer(w_p, w_p)
+ np.dot(w_p, w_p) * np.outer(w_c, w_c)
- np.dot(w_p, w_c) * (np.outer(w_p, w_c) + np.outer(w_c, w_p))
) / denom
elif projection == "left":
projection_matrix = (
np.outer(w_p, w_p)
+ np.outer(w_c, w_c)
+ (np.outer(w_p, w_c) - np.outer(w_c, w_p)) * 1j
) / (np.dot(w_p, w_p) + np.dot(w_c, w_c))
elif projection == "right":
projection_matrix = (
np.outer(w_p, w_p)
+ np.outer(w_c, w_c)
+ (np.outer(w_c, w_p) - np.outer(w_p, w_c)) * 1j
) / (np.dot(w_p, w_p) + np.dot(w_c, w_c))
else:
raise ValueError(
f'Unknown projection: {projection}. Allowed values are: '
'"standard", "left", and "right"')
return projection_matrix
def coherent_snr(
snr_triggers, index, threshold, projection_matrix, coinc_snr=None
):
"""Calculate the coherent SNR for a given set of triggers. See
Eq. 2.26 of Harry & Fairhurst (2011) [arXiv:1012.4939].
Parameters
----------
snr_triggers: dict
Dictionary of the normalised complex snr time series for each
ifo
index: numpy.ndarray
Array of the indexes corresponding to triggers
threshold: float
Coherent SNR threshold. Triggers below this are cut
projection_matrix: numpy.ndarray
Matrix that projects the signal onto the network
coinc_snr: Optional- The coincident snr for each trigger.
Returns
-------
rho_coh: numpy.ndarray
Array of coherent SNR for the detector network
index: numpy.ndarray
Indexes that survive cuts
snrv: dict
Dictionary of individual deector triggers that survive cuts
coinc_snr: list or None (default: None)
The coincident SNR values for triggers surviving the coherent
cut
"""
# Calculate rho_coh
snr_array = np.array(
[snr_triggers[ifo] for ifo in sorted(snr_triggers.keys())]
)
snr_proj = np.inner(snr_array.conj().transpose(), projection_matrix)
rho_coh2 = sum(snr_proj.transpose() * snr_array)
rho_coh = abs(np.sqrt(rho_coh2))
# Apply thresholds
above = rho_coh > threshold
index = index[above]
coinc_snr = [] if coinc_snr is None else coinc_snr
if len(coinc_snr) != 0:
coinc_snr = coinc_snr[above]
snrv = {
ifo: snr_triggers[ifo][above]
for ifo in snr_triggers.keys()
}
rho_coh = rho_coh[above]
return rho_coh, index, snrv, coinc_snr
def network_chisq(chisq, chisq_dof, snr_dict):
"""Calculate the network chi-squared statistic. This is the sum of
SNR-weighted individual detector chi-squared values. See Eq. 5.4
of Dorrington (2019) [http://orca.cardiff.ac.uk/id/eprint/128124].
Parameters
----------
chisq: dict
Dictionary of individual detector chi-squared statistics
chisq_dof: dict
Dictionary of the number of degrees of freedom of the
chi-squared statistic
snr_dict: dict
Dictionary of complex individual detector SNRs
Returns
-------
net_chisq: list
Network chi-squared values
"""
ifos = sorted(snr_dict.keys())
chisq_per_dof = dict.fromkeys(ifos)
for ifo in ifos:
chisq_per_dof[ifo] = chisq[ifo] / chisq_dof[ifo]
chisq_per_dof[ifo][chisq_per_dof[ifo] < 1] = 1
snr2 = {
ifo: np.real(np.array(snr_dict[ifo]) * np.array(snr_dict[ifo]).conj())
for ifo in ifos
}
coinc_snr2 = sum(snr2.values())
snr2_ratio = {ifo: snr2[ifo] / coinc_snr2 for ifo in ifos}
net_chisq = sum([chisq_per_dof[ifo] * snr2_ratio[ifo] for ifo in ifos])
return net_chisq
def null_snr(
rho_coh, rho_coinc, apply_cut=True, null_min=5.25, null_grad=0.2,
null_step=20.0, index=None, snrv=None
):
"""Calculate the null SNR and optionally apply threshold cut where
null SNR > null_min where coherent SNR < null_step
and null SNR > (null_grad * rho_coh + null_min) elsewhere. See
Eq. 3.1 of Harry & Fairhurst (2011) [arXiv:1012.4939] or
Eqs. 11 and 12 of Williamson et al. (2014) [arXiv:1410.6042]..
Parameters
----------
rho_coh: numpy.ndarray
Array of coherent snr triggers
rho_coinc: numpy.ndarray
Array of coincident snr triggers
apply_cut: bool
Apply a cut and downweight on null SNR determined by null_min,
null_grad, null_step (default True)
null_min: scalar
Any trigger with null SNR below this is retained
null_grad: scalar
Gradient of null SNR cut where coherent SNR > null_step
null_step: scalar
The threshold in coherent SNR rho_coh above which the null SNR
threshold increases as null_grad * rho_coh
index: dict or None (optional; default None)
Indexes of triggers. If given, will remove triggers that fail
cuts
snrv: dict of None (optional; default None)
Individual detector SNRs. If given will remove triggers that
fail cut
Returns
-------
null: numpy.ndarray
Null SNR for surviving triggers
rho_coh: numpy.ndarray
Coherent SNR for surviving triggers
rho_coinc: numpy.ndarray
Coincident SNR for suviving triggers
index: dict
Indexes for surviving triggers
snrv: dict
Single detector SNRs for surviving triggers
"""
index = {} if index is None else index
snrv = {} if snrv is None else snrv
# Calculate null SNRs
null2 = rho_coinc ** 2 - rho_coh ** 2
# Numerical errors may make this negative and break the sqrt, so set
# negative values to 0.
null2[null2 < 0] = 0
null = null2 ** 0.5
if apply_cut:
# Make cut on null.
keep = (
((null < null_min) & (rho_coh <= null_step))
| (
(null < (rho_coh * null_grad + null_min))
& (rho_coh > null_step)
)
)
index = index[keep]
rho_coh = rho_coh[keep]
snrv = {ifo: snrv[ifo][keep] for ifo in snrv}
rho_coinc = rho_coinc[keep]
null = null[keep]
return null, rho_coh, rho_coinc, index, snrv
def reweight_snr_by_null(
network_snr, null, coherent, null_min=5.25, null_grad=0.2,
null_step=20.0):
"""Re-weight the detection statistic as a function of the null SNR.
See Eq. 16 of Williamson et al. (2014) [arXiv:1410.6042].
Parameters
----------
network_snr: numpy.ndarray
Array containing SNR statistic to be re-weighted
null: numpy.ndarray
Null SNR array
coherent:
Coherent SNR array
Returns
-------
rw_snr: numpy.ndarray
Re-weighted SNR for each trigger
"""
downweight = (
((null > null_min - 1) & (coherent <= null_step))
| (
(null > (coherent * null_grad + null_min - 1))
& (coherent > null_step)
)
)
rw_fac = np.where(
coherent > null_step,
1 + null - (null_min - 1) - (coherent - null_step) * null_grad,
1 + null - (null_min - 1)
)
rw_snr = np.where(downweight, network_snr / rw_fac, network_snr)
return rw_snr
def reweightedsnr_cut(rw_snr, rw_snr_threshold):
"""
Performs a cut on reweighted snr based on a given threshold
Parameters
----------
rw_snr: array of reweighted snr
rw_snr_threshhold: any reweighted snr below this threshold is set to 0
Returns
-------
rw_snr: array of reweighted snr with cut values as 0
"""
if rw_snr_threshold is not None:
rw_snr = np.where(rw_snr < rw_snr_threshold, 0, rw_snr)
return rw_snr
| 13,726
| 32.644608
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/events/threshold_cpu.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy
from .simd_threshold_cython import parallel_thresh_cluster, parallel_threshold
from .eventmgr import _BaseThresholdCluster
from .. import opt
if opt.HAVE_GETCONF:
default_segsize = opt.LEVEL2_CACHE_SIZE / numpy.dtype('complex64').itemsize
else:
# Seems to work for Sandy Bridge/Ivy Bridge/Haswell, for now?
default_segsize = 32768
def threshold_numpy(series, value):
arr = series.data
locs = numpy.where(arr.real**2 + arr.imag**2 > value**2)[0]
vals = arr[locs]
return locs, vals
outl = None
outv = None
count = None
def threshold_inline(series, value):
arr = numpy.array(series.data, copy=False, dtype=numpy.complex64)
global outl, outv, count
if outl is None or len(outl) < len(series):
outl = numpy.zeros(len(series), dtype=numpy.uint32)
outv = numpy.zeros(len(series), dtype=numpy.complex64)
count = numpy.zeros(1, dtype=numpy.uint32)
N = len(series)
threshold = value**2.0
parallel_threshold(N, arr, outv, outl, count, threshold)
num = count[0]
if num > 0:
return outl[0:num], outv[0:num]
else:
return numpy.array([], numpy.uint32), numpy.array([], numpy.float32)
# threshold_numpy can also be used here, but for now we use the inline code
# in all instances. Not sure why we're defining threshold *and* threshold_only
# but we are, and I'm not going to change this at this point.
threshold = threshold_inline
threshold_only = threshold_inline
class CPUThresholdCluster(_BaseThresholdCluster):
def __init__(self, series):
self.series = numpy.array(series.data, copy=False,
dtype=numpy.complex64)
self.slen = numpy.uint32(len(series))
self.outv = numpy.zeros(self.slen, numpy.complex64)
self.outl = numpy.zeros(self.slen, numpy.uint32)
self.segsize = numpy.uint32(default_segsize)
def threshold_and_cluster(self, threshold, window):
self.count = parallel_thresh_cluster(self.series, self.slen,
self.outv, self.outl,
numpy.float32(threshold),
numpy.uint32(window),
self.segsize)
if self.count > 0:
return self.outv[0:self.count], self.outl[0:self.count]
else:
return numpy.array([], dtype = numpy.complex64), numpy.array([], dtype = numpy.uint32)
def _threshold_cluster_factory(series):
return CPUThresholdCluster
| 3,511
| 37.593407
| 98
|
py
|
pycbc
|
pycbc-master/pycbc/events/triggers.py
|
# Copyright (C) 2017 Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This modules contains functions for reading single and coincident triggers
from the command line.
"""
import h5py
import numpy
from pycbc import conversions, pnutils
from pycbc.events import coinc
import pycbc.detector
def insert_bank_bins_option_group(parser):
""" Add options to the optparser object for selecting templates in bins.
Parameters
-----------
parser : object
OptionParser instance.
"""
bins_group = parser.add_argument_group(
"Options for selecting templates in bins.")
bins_group.add_argument("--bank-bins", nargs="+", default=None,
help="Ordered list of mass bin upper boundaries. "
"An ordered list of type-boundary pairs, "
"applied sequentially. Must provide a name "
"(can be any unique string for tagging "
"purposes), the parameter to bin "
"on, and the membership condition via "
"'lt' / 'gt' operators. "
"Ex. name1:component:lt2 name2:total:lt15")
bins_group.add_argument("--bank-file", default=None,
help="HDF format template bank file.")
bins_group.add_argument("--f-lower", default=None,
help="Low frequency cutoff in Hz.")
return bins_group
def bank_bins_from_cli(opts):
""" Parses the CLI options related to binning templates in the bank.
Parameters
----------
opts : object
Result of parsing the CLI with OptionParser.
Results
-------
bins_idx : dict
A dict with bin names as key and an array of their indices as value.
bank : dict
A dict of the datasets from the bank file.
"""
bank = {}
fp = h5py.File(opts.bank_file)
for key in fp.keys():
bank[key] = fp[key][:]
bank["f_lower"] = float(opts.f_lower) if opts.f_lower else None
if opts.bank_bins:
bins_idx = coinc.background_bin_from_string(opts.bank_bins, bank)
else:
bins_idx = {"all" : numpy.arange(0, len(bank[tuple(fp.keys())[0]]))}
fp.close()
return bins_idx, bank
def get_mass_spin(bank, tid):
"""
Helper function
Parameters
----------
bank : h5py File object
Bank parameter file
tid : integer or array of int
Indices of the entries to be returned
Returns
-------
m1, m2, s1z, s2z : tuple of floats or arrays of floats
Parameter values of the bank entries
"""
m1 = bank['mass1'][:][tid]
m2 = bank['mass2'][:][tid]
s1z = bank['spin1z'][:][tid]
s2z = bank['spin2z'][:][tid]
return m1, m2, s1z, s2z
def get_param(par, args, m1, m2, s1z, s2z):
"""
Helper function
Parameters
----------
par : string
Name of parameter to calculate
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
m1 : float or array of floats
First binary component mass (etc.)
Returns
-------
parvals : float or array of floats
Calculated parameter values
"""
if par == 'mchirp':
parvals = conversions.mchirp_from_mass1_mass2(m1, m2)
elif par == 'mtotal':
parvals = m1 + m2
elif par == 'eta':
parvals = conversions.eta_from_mass1_mass2(m1, m2)
elif par in ['chi_eff', 'effective_spin']:
parvals = conversions.chi_eff(m1, m2, s1z, s2z)
elif par == 'template_duration':
# default to SEOBNRv4 duration function
if not hasattr(args, 'approximant') or args.approximant is None:
args.approximant = "SEOBNRv4"
parvals = pnutils.get_imr_duration(m1, m2, s1z, s2z, args.f_lower,
args.approximant)
if args.min_duration:
parvals += args.min_duration
elif par == 'tau0':
parvals = conversions.tau0_from_mass1_mass2(m1, m2, args.f_lower)
elif par == 'tau3':
parvals = conversions.tau3_from_mass1_mass2(m1, m2, args.f_lower)
elif par in pnutils.named_frequency_cutoffs.keys():
parvals = pnutils.frequency_cutoff_from_name(par, m1, m2, s1z, s2z)
else:
# try asking for a LALSimulation frequency function
parvals = pnutils.get_freq(par, m1, m2, s1z, s2z)
return parvals
def get_found_param(injfile, bankfile, trigfile, param, ifo, args=None):
"""
Translates some popular trigger parameters into functions that calculate
them from an hdf found injection file
Parameters
----------
injfile: hdf5 File object
Injection file of format known to ANitz (DOCUMENTME)
bankfile: hdf5 File object or None
Template bank file
trigfile: hdf5 File object or None
Single-detector trigger file
param: string
Parameter to be calculated for the recovered triggers
ifo: string or None
Standard ifo name, ex. 'L1'
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
Returns
-------
[return value]: NumPy array of floats, array of boolean
The calculated parameter values and a Boolean mask indicating which
injections were found in the given ifo (if supplied)
"""
foundtmp = injfile["found_after_vetoes/template_id"][:]
# will record whether inj was found in the given ifo
found_in_ifo = numpy.ones_like(foundtmp, dtype=bool)
if trigfile is not None:
try: # old 2-ifo behaviour
# get the name of the ifo in the injection file, eg "detector_1"
# and the integer from that name
ifolabel = [name for name, val in injfile.attrs.items() if \
"detector" in name and val == ifo][0]
foundtrg = injfile["found_after_vetoes/trigger_id" + ifolabel[-1]]
except IndexError: # multi-ifo
foundtrg = injfile["found_after_vetoes/%s/trigger_id" % ifo]
# multi-ifo pipeline assigns -1 for inj not found in specific ifo
found_in_ifo = foundtrg[:] != -1
if bankfile is not None and param in bankfile.keys():
return bankfile[param][:][foundtmp], found_in_ifo
elif trigfile is not None and param in trigfile[ifo].keys():
return trigfile[ifo][param][:][foundtrg], found_in_ifo
else:
assert bankfile
b = bankfile
return get_param(param, args, b['mass1'][:], b['mass2'][:],
b['spin1z'][:], b['spin2z'][:])[foundtmp],\
found_in_ifo
def get_inj_param(injfile, param, ifo, args=None):
"""
Translates some popular injection parameters into functions that calculate
them from an hdf found injection file
Parameters
----------
injfile: hdf5 File object
Injection file of format known to ANitz (DOCUMENTME)
param: string
Parameter to be calculated for the injected signals
ifo: string
Standard detector name, ex. 'L1'
args: Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
Returns
-------
[return value]: NumPy array of floats
The calculated parameter values
"""
det = pycbc.detector.Detector(ifo)
inj = injfile["injections"]
if param in inj.keys():
return inj["injections/"+param]
if param == "end_time_"+ifo[0].lower():
return inj['end_time'][:] + det.time_delay_from_earth_center(
inj['longitude'][:],
inj['latitude'][:],
inj['end_time'][:])
else:
return get_param(param, args, inj['mass1'][:], inj['mass2'][:],
inj['spin1z'][:], inj['spin2z'][:])
| 8,750
| 35.924051
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/events/stat.py
|
# Copyright (C) 2016 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module contains functions for calculating coincident ranking statistic
values.
"""
import logging
import numpy
import h5py
from . import ranking
from . import coinc_rate
from .eventmgr_cython import logsignalrateinternals_computepsignalbins
from .eventmgr_cython import logsignalrateinternals_compute2detrate
class Stat(object):
"""Base class which should be extended to provide a coincident statistic"""
def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
"""
Create a statistic class instance
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed for some statistics
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, needed for some statistics
The list of detector names
"""
self.files = {}
files = files or []
for filename in files:
with h5py.File(filename, 'r') as f:
stat = f.attrs['stat']
if hasattr(stat, 'decode'):
stat = stat.decode()
if stat in self.files:
raise RuntimeError("We already have one file with stat attr ="
" %s. Can't provide more than one!" % stat)
logging.info("Found file %s for stat %s", filename, stat)
self.files[stat] = filename
# Provide the dtype of the single detector method's output
# This is used by background estimation codes that need to maintain
# a buffer of such values.
self.single_dtype = numpy.float32
# True if a larger single detector statistic will produce a larger
# coincident statistic
self.single_increasing = True
self.ifos = ifos or []
self.sngl_ranking = sngl_ranking
self.sngl_ranking_kwargs = {}
for key, value in kwargs.items():
if key.startswith('sngl_ranking_'):
self.sngl_ranking_kwargs[key[13:]] = value
def get_sngl_ranking(self, trigs):
"""
Returns the ranking for the single detector triggers.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
return ranking.get_sngls_ranking_from_trigs(
trigs,
self.sngl_ranking,
**self.sngl_ranking_kwargs
)
def single(self, trigs): # pylint:disable=unused-argument
"""
Calculate the necessary single detector information
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
err_msg = "This function is a stub that should be overridden by the "
err_msg += "sub-classes. You shouldn't be seeing this error!"
raise NotImplementedError(err_msg)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for a single detector candidate
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
err_msg = "This function is a stub that should be overridden by the "
err_msg += "sub-classes. You shouldn't be seeing this error!"
raise NotImplementedError(err_msg)
def rank_stat_coinc(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
"""
err_msg = "This function is a stub that should be overridden by the "
err_msg += "sub-classes. You shouldn't be seeing this error!"
raise NotImplementedError(err_msg)
def _check_coinc_lim_subclass(self, allowed_names):
"""
Check that we are not using coinc_lim_for_thresh when not valid.
coinc_lim_for_thresh is only defined for the statistic it is present
in. If we subclass, we must check explicitly that it is still valid and
indicate this in the code. If the code does not have this explicit
check you will see the failure message here.
Parameters
-----------
allowed_names : list
list of allowed classes for the specific sub-classed method.
"""
if type(self).__name__ not in allowed_names:
err_msg = "This is being called from a subclass which has not "
err_msg += "been checked for validity with this method. If it is "
err_msg += "valid for the subclass to come here, include in the "
err_msg += "list of allowed_names above."
raise NotImplementedError(err_msg)
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
"""
err_msg = "This function is a stub that should be overridden by the "
err_msg += "sub-classes. You shouldn't be seeing this error!"
raise NotImplementedError(err_msg)
class QuadratureSumStatistic(Stat):
"""Calculate the quadrature sum coincident detection statistic"""
def single(self, trigs):
"""
Calculate the necessary single detector information
Here just the ranking is computed and returned.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
return self.get_sngl_ranking(trigs)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for a single detector candidate
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
return self.single(single_info[1])
def rank_stat_coinc(self, sngls_list, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
Parameters
----------
sngls_list: list
List of (ifo, single detector statistic) tuples
slide: (unused in this statistic)
step: (unused in this statistic)
to_shift: list
List of integers indicating what multiples of the time shift will
be applied (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
cstat = sum(sngl[1] ** 2. for sngl in sngls_list) ** 0.5
# For single-detector "cuts" the single ranking is set to -1
for sngls in sngls_list:
cstat[sngls == -1] = 0
return cstat
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
# Safety against subclassing and not rethinking this
allowed_names = ['QuadratureSumStatistic']
self._check_coinc_lim_subclass(allowed_names)
s0 = thresh ** 2. - sum(sngl[1] ** 2. for sngl in s)
s0[s0 < 0] = 0
return s0 ** 0.5
class PhaseTDStatistic(QuadratureSumStatistic):
"""
Statistic that re-weights combined newsnr using coinc parameters.
The weighting is based on the PDF of time delays, phase differences and
amplitude ratios between triggers in different ifos.
"""
def __init__(self, sngl_ranking, files=None, ifos=None,
pregenerate_hist=True, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, unused here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, needed here
The list of detector names
pregenerate_hist: bool, optional
If False, do not pregenerate histogram on class instantiation.
Default is True.
"""
QuadratureSumStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
self.single_dtype = [
('snglstat', numpy.float32),
('coa_phase', numpy.float32),
('end_time', numpy.float64),
('sigmasq', numpy.float32),
('snr', numpy.float32)
]
# Assign attribute so that it can be replaced with other functions
self.has_hist = False
self.hist_ifos = None
self.ref_snr = 5.0
self.relsense = {}
self.swidth = self.pwidth = self.twidth = None
self.srbmin = self.srbmax = None
self.max_penalty = None
self.pdtype = []
self.weights = {}
self.param_bin = {}
self.two_det_flag = (len(ifos) == 2)
self.two_det_weights = {}
# Some memory
self.pdif = numpy.zeros(128, dtype=numpy.float64)
self.tdif = numpy.zeros(128, dtype=numpy.float64)
self.sdif = numpy.zeros(128, dtype=numpy.float64)
self.tbin = numpy.zeros(128, dtype=numpy.int32)
self.pbin = numpy.zeros(128, dtype=numpy.int32)
self.sbin = numpy.zeros(128, dtype=numpy.int32)
if pregenerate_hist and not len(ifos) == 1:
self.get_hist()
def get_hist(self, ifos=None):
"""
Read in a signal density file for the ifo combination
Parameters
----------
ifos: list
The list of ifos. Needed if not given when initializing the class
instance.
"""
ifos = ifos or self.ifos
selected = None
for name in self.files:
# Pick out the statistic files that provide phase / time/ amp
# relationships and match to the ifos in use
if 'phasetd_newsnr' in name:
ifokey = name.split('_')[2]
num = len(ifokey) / 2
if num != len(ifos):
continue
match = [ifo in ifokey for ifo in ifos]
if False in match:
continue
selected = name
break
if selected is None and len(ifos) > 1:
raise RuntimeError("Couldn't figure out which stat file to use")
logging.info("Using signal histogram %s for ifos %s", selected, ifos)
weights = {}
param = {}
with h5py.File(self.files[selected], 'r') as histfile:
self.hist_ifos = histfile.attrs['ifos']
# Patch for pre-hdf5=3.0 histogram files
try:
logging.info("Decoding hist ifos ..")
self.hist_ifos = [i.decode('UTF-8') for i in self.hist_ifos]
except (UnicodeDecodeError, AttributeError):
pass
# Histogram bin attributes
self.twidth = histfile.attrs['twidth']
self.pwidth = histfile.attrs['pwidth']
self.swidth = histfile.attrs['swidth']
self.srbmin = histfile.attrs['srbmin']
self.srbmax = histfile.attrs['srbmax']
relfac = histfile.attrs['sensitivity_ratios']
for ifo in self.hist_ifos:
weights[ifo] = histfile[ifo]['weights'][:]
param[ifo] = histfile[ifo]['param_bin'][:]
n_ifos = len(self.hist_ifos)
bin_volume = (self.twidth * self.pwidth * self.swidth) ** (n_ifos - 1)
self.hist_max = - 1. * numpy.inf
# Read histogram for each ifo, to use if that ifo has smallest SNR in
# the coinc
for ifo in self.hist_ifos:
# renormalise to PDF
self.weights[ifo] = \
weights[ifo] / (weights[ifo].sum() * bin_volume)
if param[ifo].dtype == numpy.int8:
# Older style, incorrectly sorted histogram file
ncol = param[ifo].shape[1]
self.pdtype = [('c%s' % i, param[ifo].dtype) for i in range(ncol)]
self.param_bin[ifo] = numpy.zeros(len(self.weights[ifo]),
dtype=self.pdtype)
for i in range(ncol):
self.param_bin[ifo]['c%s' % i] = param[ifo][:, i]
lsort = self.param_bin[ifo].argsort()
self.param_bin[ifo] = self.param_bin[ifo][lsort]
self.weights[ifo] = self.weights[ifo][lsort]
else:
# New style, efficient histogram file
# param bin and weights have already been sorted
self.param_bin[ifo] = param[ifo]
self.pdtype = self.param_bin[ifo].dtype
# Max_penalty is a small number to assigned to any bins without
# histogram entries. All histograms in a given file have the same
# min entry by design, so use the min of the last one read in.
self.max_penalty = self.weights[ifo].min()
self.hist_max = max(self.hist_max, self.weights[ifo].max())
if self.two_det_flag:
# The density of signals is computed as a function of 3 binned
# parameters: time difference (t), phase difference (p) and
# SNR ratio (s). These are computed for each combination of
# detectors, so for detectors 6 differences are needed. However
# many combinations of these parameters are highly unlikely and
# no instances of these combinations occurred when generating
# the statistic files. Rather than storing a bunch of 0s, these
# values are just not stored at all. This reduces the size of
# the statistic file, but means we have to identify the correct
# value to read for every trigger. For 2 detectors we can
# expand the weights lookup table here, basically adding in all
# the "0" values. This makes looking up a value in the
# "weights" table a O(N) rather than O(NlogN) operation. It
# sacrifices RAM to do this, so is a good tradeoff for 2
# detectors, but not for 3!
if not hasattr(self, 'c0_size'):
self.c0_size = {}
self.c1_size = {}
self.c2_size = {}
self.c0_size[ifo] = numpy.int32(
2 * (abs(self.param_bin[ifo]['c0']).max() + 1)
)
self.c1_size[ifo] = numpy.int32(
2 * (abs(self.param_bin[ifo]['c1']).max() + 1)
)
self.c2_size[ifo] = numpy.int32(
2 * (abs(self.param_bin[ifo]['c2']).max() + 1)
)
array_size = [self.c0_size[ifo], self.c1_size[ifo],
self.c2_size[ifo]]
dtypec = self.weights[ifo].dtype
self.two_det_weights[ifo] = \
numpy.zeros(array_size, dtype=dtypec) + self.max_penalty
id0 = self.param_bin[ifo]['c0'].astype(numpy.int32) \
+ self.c0_size[ifo] // 2
id1 = self.param_bin[ifo]['c1'].astype(numpy.int32) \
+ self.c1_size[ifo] // 2
id2 = self.param_bin[ifo]['c2'].astype(numpy.int32) \
+ self.c2_size[ifo] // 2
self.two_det_weights[ifo][id0, id1, id2] = self.weights[ifo]
for ifo, sense in zip(self.hist_ifos, relfac):
self.relsense[ifo] = sense
self.has_hist = True
def logsignalrate(self, stats, shift, to_shift):
"""
Calculate the normalized log rate density of signals via lookup
Parameters
----------
stats: dict of dicts
Single-detector quantities for each detector
shift: numpy array of float
Time shift vector for each coinc to be ranked
to_shift: list of ints
Multiple of the time shift to apply, ordered as self.ifos
Returns
-------
value: log of coinc signal rate density for the given single-ifo
triggers and time shifts
"""
# Convert time shift vector to dict, as hist ifos and self.ifos may
# not be in same order
to_shift = {ifo: s for ifo, s in zip(self.ifos, to_shift)}
if not self.has_hist:
self.get_hist()
# Figure out which ifo of the contributing ifos has the smallest SNR,
# to use as reference for choosing the signal histogram.
snrs = numpy.array([numpy.array(stats[ifo]['snr'], ndmin=1)
for ifo in self.ifos])
smin = snrs.argmin(axis=0)
# Store a list of the triggers using each ifo as reference
rtypes = {ifo: numpy.where(smin == j)[0]
for j, ifo in enumerate(self.ifos)}
# Get reference ifo information
rate = numpy.zeros(len(shift), dtype=numpy.float32)
ps = {ifo: numpy.array(stats[ifo]['coa_phase'], ndmin=1)
for ifo in self.ifos}
ts = {ifo: numpy.array(stats[ifo]['end_time'], ndmin=1)
for ifo in self.ifos}
ss = {ifo: numpy.array(stats[ifo]['snr'], ndmin=1)
for ifo in self.ifos}
sigs = {ifo: numpy.array(stats[ifo]['sigmasq'], ndmin=1)
for ifo in self.ifos}
for ref_ifo in self.ifos:
rtype = rtypes[ref_ifo]
pref = ps[ref_ifo]
tref = ts[ref_ifo]
sref = ss[ref_ifo]
sigref = sigs[ref_ifo]
senseref = self.relsense[self.hist_ifos[0]]
binned = []
other_ifos = [ifo for ifo in self.ifos if ifo != ref_ifo]
for ifo in other_ifos:
# Assign cached memory
length = len(rtype)
while length > len(self.pdif):
newlen = len(self.pdif) * 2
self.pdif = numpy.zeros(newlen, dtype=numpy.float64)
self.tdif = numpy.zeros(newlen, dtype=numpy.float64)
self.sdif = numpy.zeros(newlen, dtype=numpy.float64)
self.pbin = numpy.zeros(newlen, dtype=numpy.int32)
self.tbin = numpy.zeros(newlen, dtype=numpy.int32)
self.sbin = numpy.zeros(newlen, dtype=numpy.int32)
# Calculate differences
logsignalrateinternals_computepsignalbins(
self.pdif,
self.tdif,
self.sdif,
self.pbin,
self.tbin,
self.sbin,
ps[ifo],
ts[ifo],
ss[ifo],
sigs[ifo],
pref,
tref,
sref,
sigref,
shift,
rtype,
self.relsense[ifo],
senseref,
self.twidth,
self.pwidth,
self.swidth,
to_shift[ref_ifo],
to_shift[ifo],
length
)
binned += [
self.tbin[:length],
self.pbin[:length],
self.sbin[:length]
]
# Read signal weight from precalculated histogram
if self.two_det_flag:
# High-RAM, low-CPU option for two-det
logsignalrateinternals_compute2detrate(
binned[0],
binned[1],
binned[2],
self.c0_size[ref_ifo],
self.c1_size[ref_ifo],
self.c2_size[ref_ifo],
rate,
rtype,
sref,
self.two_det_weights[ref_ifo],
self.max_penalty,
self.ref_snr,
len(rtype)
)
else:
# Low[er]-RAM, high[er]-CPU option for >two det
# Convert binned to same dtype as stored in hist
nbinned = numpy.zeros(len(binned[1]), dtype=self.pdtype)
for i, b in enumerate(binned):
nbinned[f'c{i}'] = b
loc = numpy.searchsorted(self.param_bin[ref_ifo], nbinned)
loc[loc == len(self.weights[ref_ifo])] = 0
rate[rtype] = self.weights[ref_ifo][loc]
# These weren't in our histogram so give them max penalty
# instead of random value
missed = numpy.where(
self.param_bin[ref_ifo][loc] != nbinned
)[0]
rate[rtype[missed]] = self.max_penalty
# Scale by signal population SNR
rate[rtype] *= (sref[rtype] / self.ref_snr) ** -4.0
return numpy.log(rate)
def single(self, trigs):
"""
Calculate the necessary single detector information
Here the ranking as well as phase, endtime and sigma-squared values.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information. 'snr', 'chisq',
'chisq_dof', 'coa_phase', 'end_time', and 'sigmasq' are required
keys.
Returns
-------
numpy.ndarray
Array of single detector parameter values
"""
sngl_stat = self.get_sngl_ranking(trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
return numpy.array(singles, ndmin=1)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for a single detector candidate
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
return self.single(single_info[1])
def rank_stat_coinc(self, sngls_list, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic, defined in Eq 2 of
[Nitz et al, 2017](https://doi.org/10.3847/1538-4357/aa8f50).
"""
rstat = sum(s[1]['snglstat'] ** 2 for s in sngls_list)
cstat = rstat + 2. * self.logsignalrate(dict(sngls_list),
slide * step,
to_shift)
cstat[cstat < 0] = 0
return cstat ** 0.5
def coinc_lim_for_thresh(self, sngls_list, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest.
Calculate the required single detector statistic to exceed the
threshold for each of the input triggers.
"""
# Safety against subclassing and not rethinking this
allowed_names = ['PhaseTDStatistic']
self._check_coinc_lim_subclass(allowed_names)
if not self.has_hist:
self.get_hist()
lim_stat = [b['snglstat'] for a, b in sngls_list if a == limifo][0]
s1 = thresh ** 2. - lim_stat ** 2.
# Assume best case scenario and use maximum signal rate
s1 -= 2. * self.hist_max
s1[s1 < 0] = 0
return s1 ** 0.5
class ExpFitStatistic(QuadratureSumStatistic):
"""
Detection statistic using an exponential falloff noise model.
Statistic approximates the negative log noise coinc rate density per
template over single-ifo newsnr values.
"""
def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, not used here
The list of detector names
"""
if not files:
raise RuntimeError("Statistic files not specified")
QuadratureSumStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
# the stat file attributes are hard-coded as '%{ifo}-fit_coeffs'
parsed_attrs = [f.split('-') for f in self.files.keys()]
self.bg_ifos = [at[0] for at in parsed_attrs if
(len(at) == 2 and at[1] == 'fit_coeffs')]
if not len(self.bg_ifos):
raise RuntimeError("None of the statistic files has the required "
"attribute called {ifo}-fit_coeffs !")
self.fits_by_tid = {}
self.alphamax = {}
for i in self.bg_ifos:
self.fits_by_tid[i] = self.assign_fits(i)
self.get_ref_vals(i)
self.single_increasing = False
def assign_fits(self, ifo):
"""
Extract fits from fit files
Parameters
-----------
ifo: str
The detector to get fits for.
Returns
-------
rate_dict: dict
A dictionary containing the fit information in the `alpha`, `rate`
and `thresh` keys/.
"""
coeff_file = h5py.File(self.files[f'{ifo}-fit_coeffs'], 'r')
template_id = coeff_file['template_id'][:]
# the template_ids and fit coeffs are stored in an arbitrary order
# create new arrays in template_id order for easier recall
tid_sort = numpy.argsort(template_id)
fits_by_tid_dict = {}
fits_by_tid_dict['smoothed_fit_coeff'] = \
coeff_file['fit_coeff'][:][tid_sort]
fits_by_tid_dict['smoothed_rate_above_thresh'] = \
coeff_file['count_above_thresh'][:][tid_sort].astype(float)
fits_by_tid_dict['smoothed_rate_in_template'] = \
coeff_file['count_in_template'][:][tid_sort].astype(float)
# The by-template fits may have been stored in the smoothed fits file
if 'fit_by_template' in coeff_file:
coeff_fbt = coeff_file['fit_by_template']
fits_by_tid_dict['fit_by_fit_coeff'] = \
coeff_fbt['fit_coeff'][:][tid_sort]
fits_by_tid_dict['fit_by_rate_above_thresh'] = \
coeff_fbt['count_above_thresh'][:][tid_sort].astype(float)
fits_by_tid_dict['fit_by_rate_in_template'] = \
coeff_file['count_in_template'][:][tid_sort].astype(float)
# Keep the fit threshold in fits_by_tid
fits_by_tid_dict['thresh'] = coeff_file.attrs['stat_threshold']
coeff_file.close()
return fits_by_tid_dict
def get_ref_vals(self, ifo):
"""
Get the largest `alpha` value over all templates for given ifo.
This is stored in `self.alphamax[ifo]` in the class instance.
Parameters
-----------
ifo: str
The detector to get fits for.
"""
self.alphamax[ifo] = self.fits_by_tid[ifo]['smoothed_fit_coeff'].max()
def find_fits(self, trigs):
"""
Get fit coeffs for a specific ifo and template id(s)
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
The coincidence executable will always call this using a bunch of
trigs from a single template, there template_num is stored as an
attribute and we just return the single value for all templates.
If multiple templates are in play we must return arrays.
Returns
--------
alphai: float or numpy array
The alpha fit value(s)
ratei: float or numpy array
The rate fit value(s)
thresh: float or numpy array
The thresh fit value(s)
"""
try:
tnum = trigs.template_num # exists if accessed via coinc_findtrigs
ifo = trigs.ifo
except AttributeError:
tnum = trigs['template_id'] # exists for SingleDetTriggers
assert len(self.ifos) == 1
# Should be exactly one ifo provided
ifo = self.ifos[0]
# fits_by_tid is a dictionary of dictionaries of arrays
# indexed by ifo / coefficient name / template_id
alphai = self.fits_by_tid[ifo]['smoothed_fit_coeff'][tnum]
ratei = self.fits_by_tid[ifo]['smoothed_rate_above_thresh'][tnum]
thresh = self.fits_by_tid[ifo]['thresh']
return alphai, ratei, thresh
def lognoiserate(self, trigs):
"""
Calculate the log noise rate density over single-ifo ranking
Read in single trigger information, compute the ranking
and rescale by the fitted coefficients alpha and rate
Parameters
-----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
---------
lognoisel: numpy.array
Array of log noise rate density for each input trigger.
"""
alphai, ratei, thresh = self.find_fits(trigs)
sngl_stat = self.get_sngl_ranking(trigs)
# alphai is constant of proportionality between single-ifo newsnr and
# negative log noise likelihood in given template
# ratei is rate of trigs in given template compared to average
# thresh is stat threshold used in given ifo
lognoisel = - alphai * (sngl_stat - thresh) + numpy.log(alphai) + \
numpy.log(ratei)
return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
def single(self, trigs):
"""
Calculate the necessary single detector information
In this case the ranking rescaled (see the lognoiserate method here).
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
return self.lognoiserate(trigs)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for a single detector candidate
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
err_msg = "Sorry! No-one has implemented this method yet! "
raise NotImplementedError(err_msg)
def rank_stat_coinc(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
"""
err_msg = "Sorry! No-one has implemented this method yet! "
raise NotImplementedError(err_msg)
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
"""
err_msg = "Sorry! No-one has implemented this method yet! "
raise NotImplementedError(err_msg)
# Keeping this here to help write the new coinc method.
def coinc_OLD(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the final coinc ranking statistic"""
# Approximate log likelihood ratio by summing single-ifo negative
# log noise likelihoods
loglr = - s0 - s1
# add squares of threshold stat values via idealized Gaussian formula
threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos]
loglr += sum([t**2. / 2. for t in threshes])
# convert back to a coinc-SNR-like statistic
# via log likelihood ratio \propto rho_c^2 / 2
return (2. * loglr) ** 0.5
# Keeping this here to help write the new coinc_lim method
def coinc_lim_for_thresh_OLD(self, s0, thresh):
"""Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
thresh: float
The threshold on the coincident statistic.
Returns
-------
numpy.ndarray
Array of limits on the second detector single statistic to
exceed thresh.
"""
s1 = - (thresh ** 2.) / 2. - s0
threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos]
s1 += sum([t**2. / 2. for t in threshes])
return s1
class ExpFitCombinedSNR(ExpFitStatistic):
"""
Reworking of ExpFitStatistic designed to resemble network SNR
Use a monotonic function of the negative log noise rate density which
approximates combined (new)snr for coincs with similar newsnr in each ifo
"""
def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, not used here
The list of detector names
"""
ExpFitStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos,
**kwargs)
# for low-mass templates the exponential slope alpha \approx 6
self.alpharef = 6.
self.single_increasing = True
def use_alphamax(self):
"""
Compute the reference alpha from the fit files.
Use the harmonic mean of the maximum individual ifo slopes as the
reference value of alpha.
"""
inv_alphas = [1. / self.alphamax[i] for i in self.bg_ifos]
self.alpharef = 1. / (sum(inv_alphas) / len(inv_alphas))
def single(self, trigs):
"""
Calculate the necessary single detector information
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
logr_n = self.lognoiserate(trigs)
_, _, thresh = self.find_fits(trigs)
# shift by log of reference slope alpha
logr_n += -1. * numpy.log(self.alpharef)
# add threshold and rescale by reference slope
stat = thresh - (logr_n / self.alpharef)
return numpy.array(stat, ndmin=1, dtype=numpy.float32)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for single detector candidates
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
if self.single_increasing:
sngl_multiifo = single_info[1]['snglstat']
else:
sngl_multiifo = -1.0 * single_info[1]['snglstat']
return sngl_multiifo
def rank_stat_coinc(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
Parameters
----------
sngls_list: list
List of (ifo, single detector statistic) tuples
slide: (unused in this statistic)
step: (unused in this statistic)
to_shift: list
List of integers indicating what multiples of the time shift will
be applied (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
# scale by 1/sqrt(number of ifos) to resemble network SNR
return sum(sngl[1] for sngl in s) / len(s)**0.5
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
# Safety against subclassing and not rethinking this
allowed_names = ['ExpFitCombinedSNR']
self._check_coinc_lim_subclass(allowed_names)
return thresh * ((len(s) + 1) ** 0.5) - sum(sngl[1] for sngl in s)
class PhaseTDExpFitStatistic(PhaseTDStatistic, ExpFitCombinedSNR):
"""
Statistic combining exponential noise model with signal histogram PDF
"""
def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, needed here
The list of detector names
"""
# read in both foreground PDF and background fit info
ExpFitCombinedSNR.__init__(self, sngl_ranking, files=files, ifos=ifos,
**kwargs)
# need the self.single_dtype value from PhaseTDStatistic
PhaseTDStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
def single(self, trigs):
"""
Calculate the necessary single detector information
In this case the ranking rescaled (see the lognoiserate method here)
with the phase, end time, sigma and SNR values added in.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
# same single-ifo stat as ExpFitCombinedSNR
sngl_stat = ExpFitCombinedSNR.single(self, trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
return numpy.array(singles, ndmin=1)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for a single detector candidate
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
err_msg = "Sorry! No-one has implemented this method yet! "
raise NotImplementedError(err_msg)
def rank_stat_coinc(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
"""
err_msg = "Sorry! No-one has implemented this method yet! "
raise NotImplementedError(err_msg)
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
"""
err_msg = "Sorry! No-one has implemented this method yet! "
raise NotImplementedError(err_msg)
# Keeping the old statistic code here for now to help with reimplementing
def coinc_OLD(self, s0, s1, slide, step):
# logsignalrate function inherited from PhaseTDStatistic
logr_s = self.logsignalrate(s0, s1, slide * step)
# rescale by ExpFitCombinedSNR reference slope as for sngl stat
cstat = s0['snglstat'] + s1['snglstat'] + logr_s / self.alpharef
# cut off underflowing and very small values
cstat[cstat < 8.] = 8.
# scale to resemble network SNR
return cstat / (2.**0.5)
def coinc_lim_for_thresh_OLD(self, s0, thresh):
# if the threshold is below this value all triggers will
# pass because of rounding in the coinc method
if thresh <= (8. / (2.**0.5)):
return -1. * numpy.ones(len(s0['snglstat'])) * numpy.inf
if not self.has_hist:
self.get_hist()
# Assume best case scenario and use maximum signal rate
logr_s = self.hist_max
s1 = (2 ** 0.5) * thresh - s0['snglstat'] - logr_s / self.alpharef
return s1
class ExpFitBgRateStatistic(ExpFitStatistic):
"""
Detection statistic using an exponential falloff noise model.
Statistic calculates the log noise coinc rate for each
template over single-ifo newsnr values.
"""
def __init__(self, sngl_ranking, files=None, ifos=None,
benchmark_lograte=-14.6, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, not used here
The list of detector names
benchmark_lograte: float, default=-14.6
benchmark_lograte is log of a representative noise trigger rate.
The default comes from H1L1 (O2) and is 4.5e-7 Hz.
"""
super(ExpFitBgRateStatistic, self).__init__(sngl_ranking,
files=files, ifos=ifos,
**kwargs)
self.benchmark_lograte = benchmark_lograte
# Reassign the rate to be number per time rather than an arbitrarily
# normalised number
for ifo in self.bg_ifos:
self.reassign_rate(ifo)
def reassign_rate(self, ifo):
"""
Reassign the rate to be number per time rather
Reassign the rate to be number per time rather than an arbitrarily
normalised number.
Parameters
-----------
ifo: str
The ifo to consider.
"""
with h5py.File(self.files[f'{ifo}-fit_coeffs'], 'r') as coeff_file:
analysis_time = float(coeff_file.attrs['analysis_time'])
fbt = 'fit_by_template' in coeff_file
self.fits_by_tid[ifo]['smoothed_rate_above_thresh'] /= analysis_time
self.fits_by_tid[ifo]['smoothed_rate_in_template'] /= analysis_time
# The by-template fits may have been stored in the smoothed fits file
if fbt:
self.fits_by_tid[ifo]['fit_by_rate_above_thresh'] /= analysis_time
self.fits_by_tid[ifo]['fit_by_rate_in_template'] /= analysis_time
def rank_stat_coinc(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
Parameters
----------
sngls_list: list
List of (ifo, single detector statistic) tuples
slide: (unused in this statistic)
step: (unused in this statistic)
to_shift: list
List of integers indicating what multiples of the time shift will
be applied (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
# ranking statistic is -ln(expected rate density of noise triggers)
# plus normalization constant
sngl_dict = {sngl[0]: sngl[1] for sngl in s}
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_dict, kwargs['time_addition'])
loglr = - ln_noise_rate + self.benchmark_lograte
return loglr
def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs):
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
# Safety against subclassing and not rethinking this
allowed_names = ['ExpFitBgRateStatistic']
self._check_coinc_lim_subclass(allowed_names)
sngl_dict = {sngl[0]: sngl[1] for sngl in s}
sngl_dict[limifo] = numpy.zeros(len(s[0][1]))
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_dict, kwargs['time_addition'])
loglr = - thresh - ln_noise_rate + self.benchmark_lograte
return loglr
class ExpFitFgBgNormStatistic(PhaseTDStatistic,
ExpFitBgRateStatistic):
"""
Statistic combining PhaseTD, ExpFitBg and additional foreground info.
"""
def __init__(self, sngl_ranking, files=None, ifos=None,
reference_ifos='H1,L1', **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs
The list of detector names
reference_ifos: string of comma separated ifo prefixes
Detectors to be used as the reference network for network
sensitivity comparisons. Each must be in fits_by_tid
"""
# read in background fit info and store it
ExpFitBgRateStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
# if ifos not already set, determine via background fit info
self.ifos = self.ifos or self.bg_ifos
# PhaseTD statistic single_dtype plus network sensitivity benchmark
PhaseTDStatistic.__init__(self, sngl_ranking, files=files,
ifos=self.ifos, **kwargs)
self.single_dtype.append(('benchmark_logvol', numpy.float32))
for ifo in self.bg_ifos:
self.assign_median_sigma(ifo)
ref_ifos = reference_ifos.split(',')
# benchmark_logvol is a benchmark sensitivity array over template id
hl_net_med_sigma = numpy.amin([self.fits_by_tid[ifo]['median_sigma']
for ifo in ref_ifos], axis=0)
self.benchmark_logvol = 3.0 * numpy.log(hl_net_med_sigma)
self.single_increasing = False
def assign_median_sigma(self, ifo):
"""
Read and sort the median_sigma values from input files.
Parameters
----------
ifo: str
The ifo to consider.
"""
with h5py.File(self.files[f'{ifo}-fit_coeffs'], 'r') as coeff_file:
template_id = coeff_file['template_id'][:]
tid_sort = numpy.argsort(template_id)
self.fits_by_tid[ifo]['median_sigma'] = \
coeff_file['median_sigma'][:][tid_sort]
def lognoiserate(self, trigs, alphabelow=6):
"""
Calculate the log noise rate density over single-ifo ranking
Read in single trigger information, make the newsnr statistic
and rescale by the fitted coefficients alpha and rate
Parameters
-----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
alphabelow: float, default=6
Use this slope to fit the noise triggers below the point at which
fits are present in the input files.
Returns
---------
lognoisel: numpy.array
Array of log noise rate density for each input trigger.
"""
alphai, ratei, thresh = self.find_fits(trigs)
newsnr = self.get_sngl_ranking(trigs)
# Above the threshold we use the usual fit coefficient (alpha)
# below threshold use specified alphabelow
bt = newsnr < thresh
lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \
numpy.log(ratei)
lognoiselbt = - alphabelow * (newsnr - thresh) + \
numpy.log(alphabelow) + numpy.log(ratei)
lognoisel[bt] = lognoiselbt[bt]
return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
def single(self, trigs):
"""
Calculate the necessary single detector information
In this case the ranking rescaled (see the lognoiserate method here)
with the phase, end time, sigma, SNR, template_id and the
benchmark_logvol values added in.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
# single-ifo stat = log of noise rate
sngl_stat = self.lognoiserate(trigs)
# populate other fields to calculate phase/time/amp consistency
# and sigma comparison
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
try:
tnum = trigs.template_num # exists if accessed via coinc_findtrigs
except AttributeError:
tnum = trigs['template_id'] # exists for SingleDetTriggers
# Should only be one ifo fit file provided
assert len(self.ifos) == 1
# Store benchmark log volume as single-ifo information since the coinc
# method does not have access to template id
singles['benchmark_logvol'] = self.benchmark_logvol[tnum]
return numpy.array(singles, ndmin=1)
def rank_stat_single(self, single_info,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the statistic for single detector candidates
Parameters
----------
single_info: tuple
Tuple containing two values. The first is the ifo (str) and the
second is the single detector triggers.
Returns
-------
numpy.ndarray
The array of single detector statistics
"""
sngls = single_info[1]
ln_noise_rate = sngls['snglstat']
ln_noise_rate -= self.benchmark_lograte
network_sigmasq = sngls['sigmasq']
network_logvol = 1.5 * numpy.log(network_sigmasq)
benchmark_logvol = sngls['benchmark_logvol']
network_logvol -= benchmark_logvol
ln_s = -4 * numpy.log(sngls['snr'] / self.ref_snr)
loglr = network_logvol - ln_noise_rate + ln_s
# cut off underflowing and very small values
loglr[loglr < -30.] = -30.
return loglr
def rank_stat_coinc(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""
Calculate the coincident detection statistic.
Parameters
----------
sngls_list: list
List of (ifo, single detector statistic) tuples
slide: (unused in this statistic)
step: (unused in this statistic)
to_shift: list
List of integers indicating what multiples of the time shift will
be applied (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s}
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_rates, kwargs['time_addition'])
ln_noise_rate -= self.benchmark_lograte
# Network sensitivity for a given coinc type is approximately
# determined by the least sensitive ifo
network_sigmasq = numpy.amin([sngl[1]['sigmasq'] for sngl in s],
axis=0)
# Volume \propto sigma^3 or sigmasq^1.5
network_logvol = 1.5 * numpy.log(network_sigmasq)
# Get benchmark log volume as single-ifo information :
# benchmark_logvol for a given template is not ifo-dependent, so
# choose the first ifo for convenience
benchmark_logvol = s[0][1]['benchmark_logvol']
network_logvol -= benchmark_logvol
# Use prior histogram to get Bayes factor for signal vs noise
# given the time, phase and SNR differences between IFOs
# First get signal PDF logr_s
stat = {ifo: st for ifo, st in s}
logr_s = self.logsignalrate(stat, slide * step, to_shift)
# Find total volume of phase-time-amplitude space occupied by noise
# coincs
# Extent of time-difference space occupied
noise_twindow = coinc_rate.multiifo_noise_coincident_area(
self.hist_ifos, kwargs['time_addition'])
# Volume is the allowed time difference window, multiplied by 2pi for
# each phase difference dimension and by allowed range of SNR ratio
# for each SNR ratio dimension : there are (n_ifos - 1) dimensions
# for both phase and SNR
n_ifos = len(self.hist_ifos)
hist_vol = noise_twindow * \
(2 * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \
(n_ifos - 1)
# Noise PDF is 1/volume, assuming a uniform distribution of noise
# coincs
logr_n = - numpy.log(hist_vol)
# Combine to get final statistic: log of
# ((rate of signals / rate of noise) * PTA Bayes factor)
loglr = network_logvol - ln_noise_rate + logr_s - logr_n
# cut off underflowing and very small values
loglr[loglr < -30.] = -30.
return loglr
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
# Safety against subclassing and not rethinking this
allowed_names = ['ExpFitFgBgNormStatistic',
'ExpFitFgBgNormBBHStatistic',
'DQExpFitFgBgNormStatistic',
'ExpFitFgBgKDEStatistic']
self._check_coinc_lim_subclass(allowed_names)
if not self.has_hist:
self.get_hist()
# if the threshold is below this value all triggers will
# pass because of rounding in the coinc method
if thresh <= -30:
return numpy.ones(len(s[0][1]['snglstat'])) * numpy.inf
sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s}
# Add limifo to singles dict so that overlap time is calculated correctly
sngl_rates[limifo] = numpy.zeros(len(s[0][1]))
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_rates, kwargs['time_addition'])
ln_noise_rate -= self.benchmark_lograte
# Assume best case and use the maximum sigma squared from all triggers
network_sigmasq = numpy.ones(len(s[0][1])) * kwargs['max_sigmasq']
# Volume \propto sigma^3 or sigmasq^1.5
network_logvol = 1.5 * numpy.log(network_sigmasq)
# Get benchmark log volume as single-ifo information :
# benchmark_logvol for a given template is not ifo-dependent, so
# choose the first ifo for convenience
benchmark_logvol = s[0][1]['benchmark_logvol']
network_logvol -= benchmark_logvol
# Assume best case scenario and use maximum signal rate
logr_s = numpy.log(self.hist_max
* (kwargs['min_snr'] / self.ref_snr) ** -4.0)
# Find total volume of phase-time-amplitude space occupied by noise
# coincs
# Extent of time-difference space occupied
noise_twindow = coinc_rate.multiifo_noise_coincident_area(
self.hist_ifos, kwargs['time_addition'])
# Volume is the allowed time difference window, multiplied by 2pi for
# each phase difference dimension and by allowed range of SNR ratio
# for each SNR ratio dimension : there are (n_ifos - 1) dimensions
# for both phase and SNR
n_ifos = len(self.hist_ifos)
hist_vol = noise_twindow * \
(2 * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \
(n_ifos - 1)
# Noise PDF is 1/volume, assuming a uniform distribution of noise
# coincs
logr_n = - numpy.log(hist_vol)
loglr = - thresh + network_logvol - ln_noise_rate + logr_s - logr_n
return loglr
class ExpFitFgBgNormBBHStatistic(ExpFitFgBgNormStatistic):
"""
The ExpFitFgBgNormStatistic with a mass weighting factor.
This is the same as the ExpFitFgBgNormStatistic except the likelihood
is multiplied by a signal rate prior modelled as uniform over chirp mass.
As templates are distributed roughly according to mchirp^(-11/3) we
weight by the inverse of this. This ensures that loud events at high mass
where template density is sparse are not swamped by events at lower masses
where template density is high.
"""
def __init__(self, sngl_ranking, files=None, ifos=None,
max_chirp_mass=None, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, not used here
The list of detector names
max_chirp_mass: float, default=None
If given, if a template's chirp mass is above this value it will
be reweighted as if it had this chirp mass. This is to avoid the
problem where the distribution fails to be accurate at high mass
and we can have a case where a single highest-mass template might
produce *all* the loudest background (and foreground) events.
"""
ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
self.mcm = max_chirp_mass
self.curr_mchirp = None
def logsignalrate(self, stats, shift, to_shift):
"""
Calculate the normalized log rate density of signals via lookup
This calls back to the Parent class and then applies the chirp mass
weighting factor.
Parameters
----------
stats: list of dicts giving single-ifo quantities, ordered as
self.ifos
shift: numpy array of float, size of the time shift vector for each
coinc to be ranked
to_shift: list of int, multiple of the time shift to apply ordered
as self.ifos
Returns
-------
value: log of coinc signal rate density for the given single-ifo
triggers and time shifts
"""
# Model signal rate as uniform over chirp mass, background rate is
# proportional to mchirp^(-11/3) due to density of templates
logr_s = ExpFitFgBgNormStatistic.logsignalrate(
self,
stats,
shift,
to_shift
)
logr_s += numpy.log((self.curr_mchirp / 20.0) ** (11./3.0))
return logr_s
def single(self, trigs):
"""
Calculate the necessary single detector information
In this case the ranking rescaled (see the lognoiserate method here)
with the phase, end time, sigma, SNR, template_id and the
benchmark_logvol values added in. This also stored the current chirp
mass for use when computing the coinc statistic values.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
from pycbc.conversions import mchirp_from_mass1_mass2
self.curr_mchirp = mchirp_from_mass1_mass2(trigs.param['mass1'],
trigs.param['mass2'])
if self.mcm is not None:
# Careful - input might be a str, so cast to float
self.curr_mchirp = min(self.curr_mchirp, float(self.mcm))
return ExpFitFgBgNormStatistic.single(self, trigs)
def coinc_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
loglr = ExpFitFgBgNormStatistic.coinc_lim_for_thresh(
self, s, thresh, limifo, **kwargs)
loglr += numpy.log((self.curr_mchirp / 20.0) ** (11./3.0))
return loglr
class ExpFitFgBgKDEStatistic(ExpFitFgBgNormStatistic):
"""
The ExpFitFgBgNormStatistic with an additional mass and spin weighting
factor determined by KDE statistic files.
This is the same as the ExpFitFgBgNormStatistic except the likelihood
ratio is multiplied by the ratio of signal KDE to template KDE over some
parameters covering the bank.
"""
def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, not used here
The list of detector names
"""
ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
# The stat file attributes are hard-coded as 'signal-kde_file'
# and 'template-kde_file'
parsed_attrs = [f.split('-') for f in self.files.keys()]
self.kde_names = [at[0] for at in parsed_attrs if
(len(at) == 2 and at[1] == 'kde_file')]
assert sorted(self.kde_names) == ['signal', 'template'], \
"Two stat files are required, they should have stat attr " \
"'signal-kde_file' and 'template-kde_file' respectively"
self.kde_by_tid = {}
for kname in self.kde_names:
self.assign_kdes(kname)
# This will hold the template ids of the events for the statistic
# calculation
self.curr_tnum = None
def assign_kdes(self, kname):
"""
Extract values from KDE files
Parameters
-----------
kname: str
Used to label the kde files.
"""
with h5py.File(self.files[kname+'-kde_file'], 'r') as kde_file:
self.kde_by_tid[kname+'_kdevals'] = kde_file['data_kde'][:]
def single(self, trigs):
"""
Calculate the necessary single detector information including getting
template ids from single detector triggers.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information
Returns
-------
numpy.ndarray
The array of single detector values
"""
try:
# template_num exists if accessed via coinc_findtrigs
self.curr_tnum = trigs.template_num
except AttributeError:
# exists for SingleDetTriggers
self.curr_tnum = trigs['template_id']
return ExpFitFgBgNormStatistic.single(self, trigs)
def logsignalrate(self, stats, shift, to_shift):
"""
Calculate the normalized log rate density of signals via lookup.
This calls back to the parent class and then applies the ratio_kde
weighting factor.
Parameters
----------
stats: list of dicts giving single-ifo quantities, ordered as
self.ifos
shift: numpy array of float, size of the time shift vector for each
coinc to be ranked
to_shift: list of int, multiple of the time shift to apply ordered
as self.ifos
Returns
-------
value: log of coinc signal rate density for the given single-ifo
triggers and time shifts
"""
logr_s = ExpFitFgBgNormStatistic.logsignalrate(self, stats, shift,
to_shift)
signal_kde = self.kde_by_tid["signal_kdevals"][self.curr_tnum]
template_kde = self.kde_by_tid["template_kdevals"][self.curr_tnum]
logr_s += numpy.log(signal_kde / template_kde)
return logr_s
def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs):
"""
Optimization function to identify coincs too quiet to be of interest
Calculate the required single detector statistic to exceed the
threshold for each of the input trigers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
loglr = ExpFitFgBgNormStatistic.coinc_lim_for_thresh(
self, s, thresh, limifo, **kwargs)
signal_kde = self.kde_by_tid["signal_kdevals"][self.curr_tnum]
template_kde = self.kde_by_tid["template_kdevals"][self.curr_tnum]
loglr += numpy.log(signal_kde / template_kde)
return loglr
class DQExpFitFgBgNormStatistic(ExpFitFgBgNormStatistic):
"""
The ExpFitFgBgNormStatistic with DQ-based reranking.
This is the same as the ExpFitFgBgNormStatistic except the likelihood
ratio is corrected via estimating relative noise trigger rates based on
the DQ time series.
"""
def __init__(self, sngl_ranking, files=None, ifos=None,
**kwargs):
"""
Parameters
----------
sngl_ranking: str
The name of the ranking to use for the single-detector triggers.
files: list of strs, needed here
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of strs, not used here
The list of detector names
"""
ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files,
ifos=ifos, **kwargs)
self.dq_val_by_time = {}
self.dq_bin_by_id = {}
for k in self.files.keys():
parsed_attrs = k.split('-')
if len(parsed_attrs) < 3:
continue
if parsed_attrs[2] == 'dq_ts_reference':
ifo = parsed_attrs[0]
dq_type = parsed_attrs[1]
dq_vals = self.assign_dq_val(k)
dq_bins = self.assign_bin_id(k)
if ifo not in self.dq_val_by_time:
self.dq_val_by_time[ifo] = {}
self.dq_bin_by_id[ifo] = {}
self.dq_val_by_time[ifo][dq_type] = dq_vals
self.dq_bin_by_id[ifo][dq_type] = dq_bins
def assign_bin_id(self, key):
"""
Assign bin ID values
Assign each template id to a bin name based on a
referenced statistic file.
Parameters
----------
key: str
statistic file key string
Returns
---------
bin_dict: dict of strs
Dictionary containing the bin name for each template id
"""
ifo = key.split('-')[0]
with h5py.File(self.files[key], 'r') as dq_file:
bin_names = dq_file.attrs['names'][:]
locs = []
names = []
for bin_name in bin_names:
bin_locs = dq_file[ifo + '/locs/' + bin_name][:]
locs = list(locs)+list(bin_locs.astype(int))
names = list(names)+list([bin_name]*len(bin_locs))
bin_dict = dict(zip(locs, names))
return bin_dict
def assign_dq_val(self, key):
"""
Assign dq values to each time for every bin based on a
referenced statistic file.
Parameters
----------
key: str
statistic file key string
Returns
---------
dq_dict: dict of {time: dq_value} dicts for each bin
Dictionary containing the mapping between the time
and the dq value for each individual bin.
"""
ifo = key.split('-')[0]
with h5py.File(self.files[key], 'r') as dq_file:
times = dq_file[ifo+'/times'][:]
bin_names = dq_file.attrs['names'][:]
dq_dict = {}
for bin_name in bin_names:
dq_vals = dq_file[ifo+'/dq_vals/'+bin_name][:]
dq_dict[bin_name] = dict(zip(times, dq_vals))
return dq_dict
def find_dq_val(self, trigs):
"""Get dq values for a specific ifo and times"""
time = trigs['end_time'].astype(int)
try:
tnum = trigs.template_num
ifo = trigs.ifo
except AttributeError:
tnum = trigs['template_id']
assert len(self.ifos) == 1
# Should be exactly one ifo provided
ifo = self.ifos[0]
dq_val = numpy.zeros(len(time))
if ifo in self.dq_val_by_time:
for (i, t) in enumerate(time):
for k in self.dq_val_by_time[ifo].keys():
if isinstance(tnum, numpy.ndarray):
bin_name = self.dq_bin_by_id[ifo][k][tnum[i]]
else:
bin_name = self.dq_bin_by_id[ifo][k][tnum]
val = self.dq_val_by_time[ifo][k][bin_name][int(t)]
dq_val[i] = max(dq_val[i], val)
return dq_val
def lognoiserate(self, trigs):
"""
Calculate the log noise rate density over single-ifo ranking
Read in single trigger information, compute the ranking
and rescale by the fitted coefficients alpha and rate
Parameters
-----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information.
Returns
---------
lognoisel: numpy.array
Array of log noise rate density for each input trigger.
"""
logr_n = ExpFitFgBgNormStatistic.lognoiserate(
self, trigs)
logr_n += self.find_dq_val(trigs)
return logr_n
statistic_dict = {
'quadsum': QuadratureSumStatistic,
'single_ranking_only': QuadratureSumStatistic,
'phasetd': PhaseTDStatistic,
'exp_fit_stat': ExpFitStatistic,
'exp_fit_csnr': ExpFitCombinedSNR,
'phasetd_exp_fit_stat': PhaseTDExpFitStatistic,
'dq_phasetd_exp_fit_fgbg_norm': DQExpFitFgBgNormStatistic,
'exp_fit_bg_rate': ExpFitBgRateStatistic,
'phasetd_exp_fit_fgbg_norm': ExpFitFgBgNormStatistic,
'phasetd_exp_fit_fgbg_bbh_norm': ExpFitFgBgNormBBHStatistic,
'phasetd_exp_fit_fgbg_kde': ExpFitFgBgKDEStatistic,
}
def get_statistic(stat):
"""
Error-handling sugar around dict lookup for coincident statistics
Parameters
----------
stat : string
Name of the coincident statistic
Returns
-------
class
Subclass of Stat base class
Raises
------
RuntimeError
If the string is not recognized as corresponding to a Stat subclass
"""
try:
return statistic_dict[stat]
except KeyError:
raise RuntimeError('%s is not an available detection statistic' % stat)
def insert_statistic_option_group(parser, default_ranking_statistic=None):
"""
Add ranking statistic options to the optparser object.
Adds the options used to initialize a PyCBC Stat class.
Parameters
-----------
parser : object
OptionParser instance.
default_ranking_statisic : str
Allows setting a default statistic for the '--ranking-statistic'
option. The option is no longer required if a default is provided.
Returns
--------
strain_opt_group : optparser.argument_group
The argument group that is added to the parser.
"""
statistic_opt_group = parser.add_argument_group(
"Options needed to initialize a PyCBC Stat class for computing the "
"ranking of events from a PyCBC search."
)
statistic_opt_group.add_argument(
"--ranking-statistic",
default=default_ranking_statistic,
choices=statistic_dict.keys(),
required=True if default_ranking_statistic is None else False,
help="The coinc ranking statistic to calculate"
)
statistic_opt_group.add_argument(
"--sngl-ranking",
choices=ranking.sngls_ranking_function_dict.keys(),
required=True,
help="The single-detector trigger ranking to use."
)
statistic_opt_group.add_argument(
"--statistic-files",
nargs='*',
action='append',
default=[],
help="Files containing ranking statistic info"
)
statistic_opt_group.add_argument(
"--statistic-keywords",
nargs='*',
default=[],
help="Provide additional key-word arguments to be sent to "
"the statistic class when it is initialized. Should "
"be given in format --statistic-keywords "
"KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ..."
)
return statistic_opt_group
def parse_statistic_keywords_opt(stat_kwarg_list):
"""
Parse the list of statistic keywords into an appropriate dictionary.
Take input from the input argument ["KWARG1:VALUE1", "KWARG2:VALUE2",
"KWARG3:VALUE3"] and convert into a dictionary.
Parameters
----------
stat_kwarg_list : list
Statistic keywords in list format
Returns
-------
stat_kwarg_dict : dict
Statistic keywords in dict format
"""
stat_kwarg_dict = {}
for inputstr in stat_kwarg_list:
try:
key, value = inputstr.split(':')
stat_kwarg_dict[key] = value
except ValueError:
err_txt = "--statistic-keywords must take input in the " \
"form KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ... " \
"Received {}".format(' '.join(stat_kwarg_list))
raise ValueError(err_txt)
return stat_kwarg_dict
def get_statistic_from_opts(opts, ifos):
"""
Return a Stat class from an optparser object.
This will assume that the options in the statistic_opt_group are present
and will use these options to call stat.get_statistic and initialize the
appropriate Stat subclass with appropriate kwargs.
Parameters
----------
opts : optparse.OptParser instance
The command line options
ifos : list
The list of detector names
Returns
-------
class
Subclass of Stat base class
"""
# Allow None inputs
if opts.statistic_files is None:
opts.statistic_files = []
if opts.statistic_keywords is None:
opts.statistic_keywords = []
# flatten the list of lists of filenames to a single list (may be empty)
opts.statistic_files = sum(opts.statistic_files, [])
extra_kwargs = parse_statistic_keywords_opt(opts.statistic_keywords)
stat_class = get_statistic(opts.ranking_statistic)(
opts.sngl_ranking,
opts.statistic_files,
ifos=ifos,
**extra_kwargs
)
return stat_class
| 84,871
| 36.587245
| 82
|
py
|
pycbc
|
pycbc-master/pycbc/events/__init__.py
|
"""
This packages contains modules for clustering events
"""
from .eventmgr import *
from .veto import *
from .coinc import *
| 128
| 13.333333
| 52
|
py
|
pycbc
|
pycbc-master/pycbc/events/ranking.py
|
""" This module contains functions for calculating single-ifo ranking
statistic values
"""
import numpy
def effsnr(snr, reduced_x2, fac=250.):
"""Calculate the effective SNR statistic. See (S5y1 paper) for definition.
"""
snr = numpy.array(snr, ndmin=1, dtype=numpy.float64)
rchisq = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64)
esnr = snr / (1 + snr ** 2 / fac) ** 0.25 / rchisq ** 0.25
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return esnr
else:
return esnr[0]
def newsnr(snr, reduced_x2, q=6., n=2.):
"""Calculate the re-weighted SNR statistic ('newSNR') from given SNR and
reduced chi-squared values. See http://arxiv.org/abs/1208.3491 for
definition. Previous implementation in glue/ligolw/lsctables.py
"""
nsnr = numpy.array(snr, ndmin=1, dtype=numpy.float64)
reduced_x2 = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64)
# newsnr is only different from snr if reduced chisq > 1
ind = numpy.where(reduced_x2 > 1.)[0]
nsnr[ind] *= (0.5 * (1. + reduced_x2[ind] ** (q/n))) ** (-1./q)
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def newsnr_sgveto(snr, brchisq, sgchisq):
""" Combined SNR derived from NewSNR and Sine-Gaussian Chisq"""
nsnr = numpy.array(newsnr(snr, brchisq), ndmin=1)
sgchisq = numpy.array(sgchisq, ndmin=1)
t = numpy.array(sgchisq > 4, ndmin=1)
if len(t):
nsnr[t] = nsnr[t] / (sgchisq[t] / 4.0) ** 0.5
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def newsnr_sgveto_psdvar(snr, brchisq, sgchisq, psd_var_val,
min_expected_psdvar=0.65):
""" Combined SNR derived from SNR, reduced Allen chisq, sine-Gaussian chisq and
PSD variation statistic"""
# If PSD var is lower than the 'minimum usually expected value' stop this
# being used in the statistic. This low value might arise because a
# significant fraction of the "short" PSD period was gated (for instance).
psd_var_val = numpy.array(psd_var_val, copy=True)
psd_var_val[psd_var_val < min_expected_psdvar] = 1.
scaled_snr = snr * (psd_var_val ** -0.5)
scaled_brchisq = brchisq * (psd_var_val ** -1.)
nsnr = newsnr_sgveto(scaled_snr, scaled_brchisq, sgchisq)
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def newsnr_sgveto_psdvar_threshold(snr, brchisq, sgchisq, psd_var_val,
min_expected_psdvar=0.65,
brchisq_threshold=10.0,
psd_var_val_threshold=10.0):
""" newsnr_sgveto_psdvar with thresholds applied.
This is the newsnr_sgveto_psdvar statistic with additional options
to threshold on chi-squared or PSD variation.
"""
nsnr = newsnr_sgveto_psdvar(snr, brchisq, sgchisq, psd_var_val,
min_expected_psdvar=min_expected_psdvar)
nsnr = numpy.array(nsnr, ndmin=1)
nsnr[brchisq > brchisq_threshold] = 1.
nsnr[psd_var_val > psd_var_val_threshold] = 1.
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def newsnr_sgveto_psdvar_scaled(snr, brchisq, sgchisq, psd_var_val,
scaling=0.33, min_expected_psdvar=0.65):
""" Combined SNR derived from NewSNR, Sine-Gaussian Chisq and scaled PSD
variation statistic. """
nsnr = numpy.array(newsnr_sgveto(snr, brchisq, sgchisq), ndmin=1)
psd_var_val = numpy.array(psd_var_val, ndmin=1, copy=True)
psd_var_val[psd_var_val < min_expected_psdvar] = 1.
# Default scale is 0.33 as tuned from analysis of data from O2 chunks
nsnr = nsnr / psd_var_val ** scaling
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def newsnr_sgveto_psdvar_scaled_threshold(snr, bchisq, sgchisq, psd_var_val,
threshold=2.0):
""" Combined SNR derived from NewSNR and Sine-Gaussian Chisq, and
scaled psd variation.
"""
nsnr = newsnr_sgveto_psdvar_scaled(snr, bchisq, sgchisq, psd_var_val)
nsnr = numpy.array(nsnr, ndmin=1)
nsnr[bchisq > threshold] = 1.
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def get_snr(trigs):
"""
Return SNR from a trigs/dictionary object
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
'snr' is a required key
Returns
-------
numpy.ndarray
Array of snr values
"""
return numpy.array(trigs['snr'][:], ndmin=1, dtype=numpy.float32)
def get_newsnr(trigs):
"""
Calculate newsnr ('reweighted SNR') for a trigs/dictionary object
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
'chisq_dof', 'snr', and 'chisq' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr = newsnr(trigs['snr'][:], trigs['chisq'][:] / dof)
return numpy.array(nsnr, ndmin=1, dtype=numpy.float32)
def get_newsnr_sgveto(trigs):
"""
Calculate newsnr re-weigthed by the sine-gaussian veto
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
'chisq_dof', 'snr', 'sg_chisq' and 'chisq' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg = newsnr_sgveto(trigs['snr'][:],
trigs['chisq'][:] / dof,
trigs['sg_chisq'][:])
return numpy.array(nsnr_sg, ndmin=1, dtype=numpy.float32)
def get_newsnr_sgveto_psdvar(trigs):
"""
Calculate snr re-weighted by Allen chisq, sine-gaussian veto and
psd variation statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg_psd = \
newsnr_sgveto_psdvar(trigs['snr'][:], trigs['chisq'][:] / dof,
trigs['sg_chisq'][:],
trigs['psd_var_val'][:])
return numpy.array(nsnr_sg_psd, ndmin=1, dtype=numpy.float32)
def get_newsnr_sgveto_psdvar_threshold(trigs):
"""
Calculate newsnr re-weighted by the sine-gaussian veto and scaled
psd variation statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg_psdt = newsnr_sgveto_psdvar_threshold(
trigs['snr'][:], trigs['chisq'][:] / dof,
trigs['sg_chisq'][:],
trigs['psd_var_val'][:]
)
return numpy.array(nsnr_sg_psdt, ndmin=1, dtype=numpy.float32)
def get_newsnr_sgveto_psdvar_scaled(trigs):
"""
Calculate newsnr re-weighted by the sine-gaussian veto and scaled
psd variation statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg_psdscale = \
newsnr_sgveto_psdvar_scaled(
trigs['snr'][:], trigs['chisq'][:] / dof,
trigs['sg_chisq'][:],
trigs['psd_var_val'][:])
return numpy.array(nsnr_sg_psdscale, ndmin=1, dtype=numpy.float32)
def get_newsnr_sgveto_psdvar_scaled_threshold(trigs):
"""
Calculate newsnr re-weighted by the sine-gaussian veto and scaled
psd variation statistic. A further threshold is applied to the
reduced chisq.
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg_psdt = \
newsnr_sgveto_psdvar_scaled_threshold(
trigs['snr'][:], trigs['chisq'][:] / dof,
trigs['sg_chisq'][:],
trigs['psd_var_val'][:])
return numpy.array(nsnr_sg_psdt, ndmin=1, dtype=numpy.float32)
sngls_ranking_function_dict = {
'snr': get_snr,
'newsnr': get_newsnr,
'new_snr': get_newsnr,
'newsnr_sgveto': get_newsnr_sgveto,
'newsnr_sgveto_psdvar': get_newsnr_sgveto_psdvar,
'newsnr_sgveto_psdvar_threshold': get_newsnr_sgveto_psdvar_threshold,
'newsnr_sgveto_psdvar_scaled': get_newsnr_sgveto_psdvar_scaled,
'newsnr_sgveto_psdvar_scaled_threshold': get_newsnr_sgveto_psdvar_scaled_threshold,
}
def get_sngls_ranking_from_trigs(trigs, statname, **kwargs):
"""
Return ranking for all trigs given a statname.
Compute the single-detector ranking for a list of input triggers for a
specific statname.
Parameters
-----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
statname:
The statistic to use.
"""
# Identify correct function
try:
sngl_func = sngls_ranking_function_dict[statname]
except KeyError as exc:
err_msg = 'Single-detector ranking {} not recognized'.format(statname)
raise ValueError(err_msg) from exc
# NOTE: In the sngl_funcs all the kwargs are explicitly stated, so any
# kwargs sent here must be known to the function.
return sngl_func(trigs, **kwargs)
| 10,863
| 32.223242
| 87
|
py
|
pycbc
|
pycbc-master/pycbc/events/single.py
|
""" utilities for assigning FAR to single detector triggers
"""
import logging
import h5py
import numpy as np
from pycbc.events import ranking, trigger_fits as fits
from pycbc.types import MultiDetOptionAction
from pycbc import conversions as conv
from pycbc import bin_utils
class LiveSingle(object):
def __init__(self, ifo,
newsnr_threshold=10.0,
reduced_chisq_threshold=5,
duration_threshold=0,
fit_file=None,
sngl_ifar_est_dist=None,
fixed_ifar=None):
self.ifo = ifo
self.fit_file = fit_file
self.sngl_ifar_est_dist = sngl_ifar_est_dist
self.fixed_ifar = fixed_ifar
self.thresholds = {
"newsnr": newsnr_threshold,
"reduced_chisq": reduced_chisq_threshold,
"duration": duration_threshold}
@staticmethod
def insert_args(parser):
parser.add_argument('--single-newsnr-threshold', nargs='+',
type=float, action=MultiDetOptionAction,
help='Newsnr min threshold for single triggers. '
'Can be given as a single value or as '
'detector-value pairs, e.g. H1:6 L1:7 V1:6.5')
parser.add_argument('--single-reduced-chisq-threshold', nargs='+',
type=float, action=MultiDetOptionAction,
help='Maximum reduced chi-squared threshold for '
'single triggers. Can be given as a single '
'value or as detector-value pairs, e.g. '
'H1:2 L1:2 V1:3')
parser.add_argument('--single-duration-threshold', nargs='+',
type=float, action=MultiDetOptionAction,
help='Minimum duration threshold for single '
'triggers. Can be given as a single value '
'or as detector-value pairs, e.g. H1:6 L1:6 '
'V1:8')
parser.add_argument('--single-fixed-ifar', nargs='+',
type=float, action=MultiDetOptionAction,
help='A fixed value for IFAR, still uses cuts '
'defined by command line. Can be given as '
'a single value or as detector-value pairs, '
'e.g. H1:0.001 L1:0.001 V1:0.0005')
parser.add_argument('--single-fit-file',
help='File which contains definitons of fit '
'coefficients and counts for specific '
'single trigger IFAR fitting.')
parser.add_argument('--sngl-ifar-est-dist', nargs='+',
action=MultiDetOptionAction,
help='Which trigger distribution to use when '
'calculating IFAR of single triggers. '
'Can be given as a single value or as '
'detector-value pairs, e.g. H1:mean '
'L1:mean V1:conservative')
@staticmethod
def verify_args(args, parser, ifos):
sngl_opts = [args.single_reduced_chisq_threshold,
args.single_duration_threshold,
args.single_newsnr_threshold,
args.sngl_ifar_est_dist]
sngl_opts_str = ("--single-reduced-chisq-threshold, "
"--single-duration-threshold, "
"--single-newsnr-threshold, "
"--sngl-ifar-est-dist")
if any(sngl_opts) and not all(sngl_opts):
parser.error(f"Single detector trigger options ({sngl_opts_str}) "
"must either all be given or none.")
if args.enable_single_detector_upload \
and not args.enable_gracedb_upload:
parser.error("--enable-single-detector-upload requires "
"--enable-gracedb-upload to be set.")
sngl_optional_opts = [args.single_fixed_ifar,
args.single_fit_file]
sngl_optional_opts_str = ("--single-fixed-ifar, "
"--single-fit-file")
if any(sngl_optional_opts) and not all(sngl_opts):
parser.error("Optional singles options "
f"({sngl_optional_opts_str}) given but no "
f"required options ({sngl_opts_str}) are.")
for ifo in ifos:
# Check which option(s) are needed for each IFO and if they exist:
# Notes for the logic here:
# args.sngl_ifar_est_dist.default_set is True if single value has
# been set to be the same for all values
# bool(args.sngl_ifar_est_dist) is True if option is given
if args.sngl_ifar_est_dist and \
not args.sngl_ifar_est_dist.default_set \
and not args.sngl_ifar_est_dist[ifo]:
# Option has been given, different for each IFO,
# and this one is not present
parser.error("All IFOs required in --single-ifar-est-dist "
"if IFO-specific options are given.")
if not args.sngl_ifar_est_dist[ifo] == 'fixed':
if not args.single_fit_file:
# Fixed IFAR option doesnt need the fits file
parser.error(f"Single detector trigger fits file must be "
"given if --single-ifar-est-dist is not "
f"fixed for all ifos (at least {ifo} has "
f"option {args.sngl_ifar_est_dist[ifo]}).")
if ifo in args.single_fixed_ifar:
parser.error(f"Value {args.single_fixed_ifar[ifo]} given "
f"for {ifo} in --single-fixed-ifar, but "
f"--single-ifar-est-dist for {ifo} "
f"is {args.sngl_ifar_est_dist[ifo]}, not "
"fixed.")
else:
# Check that the fixed IFAR value has actually been
# given if using this instead of a distribution
if not args.single_fixed_ifar[ifo]:
parser.error(f"--single-fixed-ifar must be "
"given if --single-ifar-est-dist is fixed. "
f"This is true for at least {ifo}.")
# Return value is a boolean whether we are analysing singles or not
# The checks already performed mean that all(sngl_opts) is okay
return all(sngl_opts)
@classmethod
def from_cli(cls, args, ifo):
return cls(
ifo, newsnr_threshold=args.single_newsnr_threshold[ifo],
reduced_chisq_threshold=args.single_reduced_chisq_threshold[ifo],
duration_threshold=args.single_duration_threshold[ifo],
fixed_ifar=args.single_fixed_ifar,
fit_file=args.single_fit_file,
sngl_ifar_est_dist=args.sngl_ifar_est_dist[ifo]
)
def check(self, trigs, data_reader):
""" Look for a single detector trigger that passes the thresholds in
the current data.
"""
# Apply cuts to trigs before clustering
# Cut on snr so that triggers which could not reach newsnr
# threshold do not have newsnr calculated
valid_idx = (trigs['template_duration'] >
self.thresholds['duration']) & \
(trigs['chisq'] <
self.thresholds['reduced_chisq']) & \
(trigs['snr'] >
self.thresholds['newsnr'])
if not np.any(valid_idx):
return None
cutdurchi_trigs = {k: trigs[k][valid_idx] for k in trigs}
# This uses the pycbc live convention of chisq always meaning the
# reduced chisq.
nsnr_all = ranking.newsnr(cutdurchi_trigs['snr'],
cutdurchi_trigs['chisq'])
nsnr_idx = nsnr_all > self.thresholds['newsnr']
if not np.any(nsnr_idx):
return None
cutall_trigs = {k: cutdurchi_trigs[k][nsnr_idx]
for k in trigs}
# 'cluster' by taking the maximal newsnr value over the trigger set
i = nsnr_all[nsnr_idx].argmax()
# calculate the (inverse) false-alarm rate
nsnr = nsnr_all[nsnr_idx][i]
dur = cutall_trigs['template_duration'][i]
ifar = self.calculate_ifar(nsnr, dur)
if ifar is None:
return None
# fill in a new candidate event
candidate = {
f'foreground/{self.ifo}/{k}': cutall_trigs[k][i] for k in trigs
}
candidate['foreground/stat'] = nsnr
candidate['foreground/ifar'] = ifar
candidate['HWINJ'] = data_reader.near_hwinj()
return candidate
def calculate_ifar(self, sngl_ranking, duration):
logging.info("Calculating IFAR")
if self.fixed_ifar and self.ifo in self.fixed_ifar:
return self.fixed_ifar[self.ifo]
try:
with h5py.File(self.fit_file, 'r') as fit_file:
bin_edges = fit_file['bins_edges'][:]
live_time = fit_file[self.ifo].attrs['live_time']
thresh = fit_file.attrs['fit_threshold']
dist_grp = fit_file[self.ifo][self.sngl_ifar_est_dist]
rates = dist_grp['counts'][:] / live_time
coeffs = dist_grp['fit_coeff'][:]
except FileNotFoundError:
logging.error(
'Single fit file %s not found; '
'dropping a potential single-detector candidate!',
self.fit_file
)
return None
bins = bin_utils.IrregularBins(bin_edges)
dur_bin = bins[duration]
rate = rates[dur_bin]
coeff = coeffs[dur_bin]
rate_louder = rate * fits.cum_fit('exponential', [sngl_ranking],
coeff, thresh)[0]
# apply a trials factor of the number of duration bins
rate_louder *= len(rates)
return conv.sec_to_year(1. / rate_louder)
| 10,489
| 45.211454
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/events/coinc_rate.py
|
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module contains functions for calculating expected rates of noise
and signal coincidences.
"""
import itertools
import logging
import numpy
import pycbc.detector
def multiifo_noise_lograte(log_rates, slop):
"""
Calculate the expected rate of noise coincidences for multiple
combinations of detectors
Parameters
----------
log_rates: dict
Key: ifo string, Value: sequence of log single-detector trigger rates,
units assumed to be Hz
slop: float
time added to maximum time-of-flight between detectors to account
for timing error
Returns
-------
expected_log_rates: dict
Key: ifo combination string
Value: expected log coincidence rate in the combination, units log Hz
"""
expected_log_rates = {}
# Order of ifos must be stable in output dict keys, so sort them
ifos = sorted(list(log_rates.keys()))
ifostring = ' '.join(ifos)
# Calculate coincidence for all-ifo combination
expected_log_rates[ifostring] = \
combination_noise_lograte(log_rates, slop)
# If more than one possible coincidence type exists,
# calculate coincidence for subsets through recursion
if len(ifos) > 2:
# Calculate rate for each 'miss-one-out' detector combination
subsets = itertools.combinations(ifos, len(ifos) - 1)
for subset in subsets:
rates_subset = {}
for ifo in subset:
rates_subset[ifo] = log_rates[ifo]
sub_coinc_rates = multiifo_noise_lograte(rates_subset, slop)
# add these sub-coincidences to the overall dictionary
for sub_coinc in sub_coinc_rates:
expected_log_rates[sub_coinc] = sub_coinc_rates[sub_coinc]
return expected_log_rates
def combination_noise_rate(rates, slop):
"""
Calculate the expected rate of noise coincidences for a combination of
detectors
WARNING: for high stat values, numerical underflow can occur
Parameters
----------
rates: dict
Key: ifo string, Value: sequence of single-detector trigger rates,
units assumed to be Hz
slop: float
time added to maximum time-of-flight between detectors to account
for timing error
Returns
-------
numpy array
Expected coincidence rate in the combination, units Hz
"""
logging.warning('combination_noise_rate() is liable to numerical '
'underflows, use combination_noise_lograte '
'instead')
log_rates = {k: numpy.log(r) for (k, r) in rates.items()}
# exp may underflow
return numpy.exp(combination_noise_lograte(log_rates, slop))
def combination_noise_lograte(log_rates, slop):
"""
Calculate the expected rate of noise coincidences for a combination of
detectors given log of single detector noise rates
Parameters
----------
log_rates: dict
Key: ifo string, Value: sequence of log single-detector trigger rates,
units assumed to be Hz
slop: float
time added to maximum time-of-flight between detectors to account
for timing error
Returns
-------
numpy array
Expected log coincidence rate in the combination, units Hz
"""
# multiply product of trigger rates by the overlap time
allowed_area = multiifo_noise_coincident_area(list(log_rates), slop)
# list(dict.values()) is python-3-proof
rateprod = numpy.sum(list(log_rates.values()), axis=0)
return numpy.log(allowed_area) + rateprod
def multiifo_noise_coincident_area(ifos, slop):
"""
Calculate the total extent of time offset between 2 detectors,
or area of the 2d space of time offsets for 3 detectors, for
which a coincidence can be generated
Cannot yet handle more than 3 detectors.
Parameters
----------
ifos: list of strings
list of interferometers
slop: float
extra time to add to maximum time-of-flight for timing error
Returns
-------
allowed_area: float
area in units of seconds^(n_ifos-1) that coincident values can fall in
"""
# set up detector objects
dets = {}
for ifo in ifos:
dets[ifo] = pycbc.detector.Detector(ifo)
n_ifos = len(ifos)
if n_ifos == 2:
allowed_area = 2. * \
(dets[ifos[0]].light_travel_time_to_detector(dets[ifos[1]]) + slop)
elif n_ifos == 3:
tofs = numpy.zeros(n_ifos)
ifo2_num = []
# calculate travel time between detectors (plus extra for timing error)
# TO DO: allow for different timing errors between different detectors
for i, ifo in enumerate(ifos):
ifo2_num.append(int(numpy.mod(i + 1, n_ifos)))
det0 = dets[ifo]
det1 = dets[ifos[ifo2_num[i]]]
tofs[i] = det0.light_travel_time_to_detector(det1) + slop
# combine these to calculate allowed area
allowed_area = 0
for i, _ in enumerate(ifos):
allowed_area += 2 * tofs[i] * tofs[ifo2_num[i]] - tofs[i]**2
else:
raise NotImplementedError("Not able to deal with more than 3 ifos")
return allowed_area
def multiifo_signal_coincident_area(ifos):
"""
Calculate the area in which signal time differences are physically allowed
Parameters
----------
ifos: list of strings
list of interferometers
Returns
-------
allowed_area: float
area in units of seconds^(n_ifos-1) that coincident signals will occupy
"""
n_ifos = len(ifos)
if n_ifos == 2:
det0 = pycbc.detector.Detector(ifos[0])
det1 = pycbc.detector.Detector(ifos[1])
allowed_area = 2 * det0.light_travel_time_to_detector(det1)
elif n_ifos == 3:
dets = {}
tofs = numpy.zeros(n_ifos)
ifo2_num = []
# set up detector objects
for ifo in ifos:
dets[ifo] = pycbc.detector.Detector(ifo)
# calculate travel time between detectors
for i, ifo in enumerate(ifos):
ifo2_num.append(int(numpy.mod(i + 1, n_ifos)))
det0 = dets[ifo]
det1 = dets[ifos[ifo2_num[i]]]
tofs[i] = det0.light_travel_time_to_detector(det1)
# calculate allowed area
phi_12 = numpy.arccos((tofs[0]**2 + tofs[1]**2 - tofs[2]**2)
/ (2 * tofs[0] * tofs[1]))
allowed_area = numpy.pi * tofs[0] * tofs[1] * numpy.sin(phi_12)
else:
raise NotImplementedError("Not able to deal with more than 3 ifos")
return allowed_area
| 6,808
| 31.117925
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/psd/estimate.py
|
# Copyright (C) 2012 Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilites to estimate PSDs from data.
"""
import numpy
from pycbc.types import Array, FrequencySeries, TimeSeries, zeros
from pycbc.types import real_same_precision_as, complex_same_precision_as
from pycbc.fft import fft, ifft
# Change to True in front-end if you want this function to use caching
# This is a mostly-hidden optimization option that most users will not want
# to use. It is used in PyCBC Live
USE_CACHING_FOR_WELCH_FFTS = False
USE_CACHING_FOR_INV_SPEC_TRUNC = False
# If using caching we want output to be unique if called at different places
# (and if called from different modules/functions), these unique IDs acheive
# that. The numbers are not significant, only that they are unique.
WELCH_UNIQUE_ID = 438716587
INVSPECTRUNC_UNIQUE_ID = 100257896
def median_bias(n):
"""Calculate the bias of the median average PSD computed from `n` segments.
Parameters
----------
n : int
Number of segments used in PSD estimation.
Returns
-------
ans : float
Calculated bias.
Raises
------
ValueError
For non-integer or non-positive `n`.
Notes
-----
See arXiv:gr-qc/0509116 appendix B for details.
"""
if type(n) is not int or n <= 0:
raise ValueError('n must be a positive integer')
if n >= 1000:
return numpy.log(2)
ans = 1
for i in range(1, (n - 1) // 2 + 1):
ans += 1.0 / (2*i + 1) - 1.0 / (2*i)
return ans
def welch(timeseries, seg_len=4096, seg_stride=2048, window='hann',
avg_method='median', num_segments=None, require_exact_data_fit=False):
"""PSD estimator based on Welch's method.
Parameters
----------
timeseries : TimeSeries
Time series for which the PSD is to be estimated.
seg_len : int
Segment length in samples.
seg_stride : int
Separation between consecutive segments, in samples.
window : {'hann', numpy.ndarray}
Function used to window segments before Fourier transforming, or
a `numpy.ndarray` that specifies the window.
avg_method : {'median', 'mean', 'median-mean'}
Method used for averaging individual segment PSDs.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
Raises
------
ValueError
For invalid choices of `seg_len`, `seg_stride` `window` and
`avg_method` and for inconsistent combinations of len(`timeseries`),
`seg_len` and `seg_stride`.
Notes
-----
See arXiv:gr-qc/0509116 for details.
"""
from pycbc.strain.strain import execute_cached_fft
window_map = {
'hann': numpy.hanning
}
# sanity checks
if isinstance(window, numpy.ndarray) and window.size != seg_len:
raise ValueError('Invalid window: incorrect window length')
if not isinstance(window, numpy.ndarray) and window not in window_map:
raise ValueError('Invalid window: unknown window {!r}'.format(window))
if avg_method not in ('mean', 'median', 'median-mean'):
raise ValueError('Invalid averaging method')
if type(seg_len) is not int or type(seg_stride) is not int \
or seg_len <= 0 or seg_stride <= 0:
raise ValueError('Segment length and stride must be positive integers')
if timeseries.precision == 'single':
fs_dtype = numpy.complex64
elif timeseries.precision == 'double':
fs_dtype = numpy.complex128
num_samples = len(timeseries)
if num_segments is None:
num_segments = int(num_samples // seg_stride)
# NOTE: Is this not always true?
if (num_segments - 1) * seg_stride + seg_len > num_samples:
num_segments -= 1
if not require_exact_data_fit:
data_len = (num_segments - 1) * seg_stride + seg_len
# Get the correct amount of data
if data_len < num_samples:
diff = num_samples - data_len
start = diff // 2
end = num_samples - diff // 2
# Want this to be integers so if diff is odd, catch it here.
if diff % 2:
start = start + 1
timeseries = timeseries[start:end]
num_samples = len(timeseries)
if data_len > num_samples:
err_msg = "I was asked to estimate a PSD on %d " %(data_len)
err_msg += "data samples. However the data provided only contains "
err_msg += "%d data samples." %(num_samples)
if num_samples != (num_segments - 1) * seg_stride + seg_len:
raise ValueError('Incorrect choice of segmentation parameters')
if not isinstance(window, numpy.ndarray):
window = window_map[window](seg_len)
w = Array(window.astype(timeseries.dtype))
# calculate psd of each segment
delta_f = 1. / timeseries.delta_t / seg_len
if not USE_CACHING_FOR_WELCH_FFTS:
segment_tilde = FrequencySeries(
numpy.zeros(int(seg_len / 2 + 1)),
delta_f=delta_f,
dtype=fs_dtype,
)
segment_psds = []
for i in range(num_segments):
segment_start = i * seg_stride
segment_end = segment_start + seg_len
segment = timeseries[segment_start:segment_end]
assert len(segment) == seg_len
if not USE_CACHING_FOR_WELCH_FFTS:
fft(segment * w, segment_tilde)
else:
segment_tilde = execute_cached_fft(segment * w,
uid=WELCH_UNIQUE_ID)
seg_psd = abs(segment_tilde * segment_tilde.conj()).numpy()
#halve the DC and Nyquist components to be consistent with TO10095
seg_psd[0] /= 2
seg_psd[-1] /= 2
segment_psds.append(seg_psd)
segment_psds = numpy.array(segment_psds)
if avg_method == 'mean':
psd = numpy.mean(segment_psds, axis=0)
elif avg_method == 'median':
psd = numpy.median(segment_psds, axis=0) / median_bias(num_segments)
elif avg_method == 'median-mean':
odd_psds = segment_psds[::2]
even_psds = segment_psds[1::2]
odd_median = numpy.median(odd_psds, axis=0) / \
median_bias(len(odd_psds))
even_median = numpy.median(even_psds, axis=0) / \
median_bias(len(even_psds))
psd = (odd_median + even_median) / 2
psd *= 2 * delta_f * seg_len / (w*w).sum()
return FrequencySeries(psd, delta_f=delta_f, dtype=timeseries.dtype,
epoch=timeseries.start_time)
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None):
"""Modify a PSD such that the impulse response associated with its inverse
square root is no longer than `max_filter_len` time samples. In practice
this corresponds to a coarse graining or smoothing of the PSD.
Parameters
----------
psd : FrequencySeries
PSD whose inverse spectrum is to be truncated.
max_filter_len : int
Maximum length of the time-domain filter in samples.
low_frequency_cutoff : {None, int}
Frequencies below `low_frequency_cutoff` are zeroed in the output.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
Returns
-------
psd : FrequencySeries
PSD whose inverse spectrum has been truncated.
Raises
------
ValueError
For invalid types or values of `max_filter_len` and `low_frequency_cutoff`.
Notes
-----
See arXiv:gr-qc/0509116 for details.
"""
from pycbc.strain.strain import execute_cached_fft, execute_cached_ifft
# sanity checks
if type(max_filter_len) is not int or max_filter_len <= 0:
raise ValueError('max_filter_len must be a positive integer')
if low_frequency_cutoff is not None and \
(low_frequency_cutoff < 0 or
low_frequency_cutoff > psd.sample_frequencies[-1]):
raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD')
N = (len(psd)-1)*2
inv_asd = FrequencySeries(zeros(len(psd)), delta_f=psd.delta_f, \
dtype=complex_same_precision_as(psd))
kmin = 1
if low_frequency_cutoff:
kmin = int(low_frequency_cutoff / psd.delta_f)
inv_asd[kmin:N//2] = (1.0 / psd[kmin:N//2]) ** 0.5
if not USE_CACHING_FOR_INV_SPEC_TRUNC:
q = TimeSeries(
numpy.zeros(N),
delta_t=(N / psd.delta_f),
dtype=real_same_precision_as(psd)
)
ifft(inv_asd, q)
else:
q = execute_cached_ifft(inv_asd, copy_output=False,
uid=INVSPECTRUNC_UNIQUE_ID)
trunc_start = max_filter_len // 2
trunc_end = N - max_filter_len // 2
if trunc_end < trunc_start:
raise ValueError('Invalid value in inverse_spectrum_truncation')
if trunc_method == 'hann':
trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype)
q[0:trunc_start] *= trunc_window[-trunc_start:]
q[trunc_end:N] *= trunc_window[0:max_filter_len//2]
if trunc_start < trunc_end:
q[trunc_start:trunc_end] = 0
if not USE_CACHING_FOR_INV_SPEC_TRUNC:
psd_trunc = FrequencySeries(
numpy.zeros(len(psd)),
delta_f=psd.delta_f,
dtype=complex_same_precision_as(psd)
)
fft(q, psd_trunc)
else:
psd_trunc = execute_cached_fft(q, copy_output=False,
uid=INVSPECTRUNC_UNIQUE_ID)
psd_trunc *= psd_trunc.conj()
psd_out = 1. / abs(psd_trunc)
return psd_out
def interpolate(series, delta_f):
"""Return a new PSD that has been interpolated to the desired delta_f.
Parameters
----------
series : FrequencySeries
Frequency series to be interpolated.
delta_f : float
The desired delta_f of the output
Returns
-------
interpolated series : FrequencySeries
A new FrequencySeries that has been interpolated.
"""
new_n = (len(series)-1) * series.delta_f / delta_f + 1
samples = numpy.arange(0, numpy.rint(new_n)) * delta_f
interpolated_series = numpy.interp(samples, series.sample_frequencies.numpy(), series.numpy())
return FrequencySeries(interpolated_series, epoch=series.epoch,
delta_f=delta_f, dtype=series.dtype)
def bandlimited_interpolate(series, delta_f):
"""Return a new PSD that has been interpolated to the desired delta_f.
Parameters
----------
series : FrequencySeries
Frequency series to be interpolated.
delta_f : float
The desired delta_f of the output
Returns
-------
interpolated series : FrequencySeries
A new FrequencySeries that has been interpolated.
"""
series = FrequencySeries(series, dtype=complex_same_precision_as(series), delta_f=series.delta_f)
N = (len(series) - 1) * 2
delta_t = 1.0 / series.delta_f / N
new_N = int(1.0 / (delta_t * delta_f))
new_n = new_N // 2 + 1
series_in_time = TimeSeries(zeros(N), dtype=real_same_precision_as(series), delta_t=delta_t)
ifft(series, series_in_time)
padded_series_in_time = TimeSeries(zeros(new_N), dtype=series_in_time.dtype, delta_t=delta_t)
padded_series_in_time[0:N//2] = series_in_time[0:N//2]
padded_series_in_time[new_N-N//2:new_N] = series_in_time[N//2:N]
interpolated_series = FrequencySeries(zeros(new_n), dtype=series.dtype, delta_f=delta_f)
fft(padded_series_in_time, interpolated_series)
return interpolated_series
| 12,318
| 34.399425
| 101
|
py
|
pycbc
|
pycbc-master/pycbc/psd/variation.py
|
""" PSD Variation """
import numpy
from numpy.fft import rfft, irfft
import scipy.signal as sig
import pycbc.psd
from pycbc.types import TimeSeries
from pycbc.filter import resample_to_delta_t
def mean_square(data, delta_t, srate, short_stride, stride):
""" Calculate mean square of given time series once per stride
First of all this function calculate the mean square of given time
series once per short_stride. This is used to find and remove
outliers due to short glitches. Here an outlier is defined as any
element which is greater than two times the average of its closest
neighbours. Every outlier is substituted with the average of the
corresponding adjacent elements.
Then, every second the function compute the mean square of the
smoothed time series, within the stride.
Parameters
----------
data : numpy.ndarray
delta_t : float
Duration of the time series
srate : int
Sample rate of the data were it given as a TimeSeries
short_stride : float
Stride duration for outlier removal
stride ; float
Stride duration
Returns
-------
m_s: List
Mean square of given time series
"""
# Calculate mean square of data once per short stride and replace
# outliers
short_ms = numpy.mean(data.reshape(-1, int(srate * short_stride)) ** 2,
axis=1)
# Define an array of averages that is used to substitute outliers
ave = 0.5 * (short_ms[2:] + short_ms[:-2])
outliers = short_ms[1:-1] > (2. * ave)
short_ms[1:-1][outliers] = ave[outliers]
# Calculate mean square of data every step within a window equal to
# stride seconds
m_s = []
inv_time = int(1. / short_stride)
for index in range(int(delta_t - stride + 1)):
m_s.append(numpy.mean(short_ms[inv_time * index:inv_time *
int(index+stride)]))
return m_s
def calc_filt_psd_variation(strain, segment, short_segment, psd_long_segment,
psd_duration, psd_stride, psd_avg_method, low_freq,
high_freq):
""" Calculates time series of PSD variability
This function first splits the segment up into 512 second chunks. It
then calculates the PSD over this 512 second. The PSD is used to
to create a filter that is the composition of three filters:
1. Bandpass filter between f_low and f_high.
2. Weighting filter which gives the rough response of a CBC template.
3. Whitening filter.
Next it makes the convolution of this filter with the stretch of data.
This new time series is given to the "mean_square" function, which
computes the mean square of the timeseries within an 8 seconds window,
once per second.
The result, which is the variance of the S/N in that stride for the
Parseval theorem, is then stored in a timeseries.
Parameters
----------
strain : TimeSeries
Input strain time series to estimate PSDs
segment : {float, 8}
Duration of the segments for the mean square estimation in seconds.
short_segment : {float, 0.25}
Duration of the short segments for the outliers removal.
psd_long_segment : {float, 512}
Duration of the long segments for PSD estimation in seconds.
psd_duration : {float, 8}
Duration of FFT segments for long term PSD estimation, in seconds.
psd_stride : {float, 4}
Separation between FFT segments for long term PSD estimation, in
seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq : {float, 20}
Minimum frequency to consider the comparison between PSDs.
high_freq : {float, 480}
Maximum frequency to consider the comparison between PSDs.
Returns
-------
psd_var : TimeSeries
Time series of the variability in the PSD estimation
"""
# Calculate strain precision
if strain.precision == 'single':
fs_dtype = numpy.float32
elif strain.precision == 'double':
fs_dtype = numpy.float64
# Convert start and end times immediately to floats
start_time = float(strain.start_time)
end_time = float(strain.end_time)
# Resample the data
strain = resample_to_delta_t(strain, 1.0 / 2048)
srate = int(strain.sample_rate)
# Fix the step for the PSD estimation and the time to remove at the
# edge of the time series.
step = 1.0
strain_crop = 8.0
# Find the times of the long segments
times_long = numpy.arange(start_time, end_time,
psd_long_segment - 2 * strain_crop
- segment + step)
# Create a bandpass filter between low_freq and high_freq
filt = sig.firwin(4 * srate, [low_freq, high_freq], pass_zero=False,
window='hann', nyq=srate / 2)
filt.resize(int(psd_duration * srate))
# Fourier transform the filter and take the absolute value to get
# rid of the phase.
filt = abs(rfft(filt))
psd_var_list = []
for tlong in times_long:
# Calculate PSD for long segment
if tlong + psd_long_segment <= float(end_time):
astrain = strain.time_slice(tlong, tlong + psd_long_segment)
plong = pycbc.psd.welch(
astrain,
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method)
else:
astrain = strain.time_slice(tlong, end_time)
plong = pycbc.psd.welch(
strain.time_slice(end_time - psd_long_segment,
end_time),
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method)
astrain = astrain.numpy()
freqs = numpy.array(plong.sample_frequencies, dtype=fs_dtype)
plong = plong.numpy()
# Make the weighting filter - bandpass, which weight by f^-7/6,
# and whiten. The normalization is chosen so that the variance
# will be one if this filter is applied to white noise which
# already has a variance of one.
fweight = freqs ** (-7./6.) * filt / numpy.sqrt(plong)
fweight[0] = 0.
norm = (sum(abs(fweight) ** 2) / (len(fweight) - 1.)) ** -0.5
fweight = norm * fweight
fwhiten = numpy.sqrt(2. / srate) / numpy.sqrt(plong)
fwhiten[0] = 0.
full_filt = sig.hann(int(psd_duration * srate)) * numpy.roll(
irfft(fwhiten * fweight), int(psd_duration / 2) * srate)
# Convolve the filter with long segment of data
wstrain = sig.fftconvolve(astrain, full_filt, mode='same')
wstrain = wstrain[int(strain_crop * srate):-int(strain_crop * srate)]
# compute the mean square of the chunk of data
delta_t = len(wstrain) * strain.delta_t
variation = mean_square(wstrain, delta_t, srate, short_segment, segment)
psd_var_list.append(numpy.array(variation, dtype=wstrain.dtype))
# Package up the time series to return
psd_var = TimeSeries(numpy.concatenate(psd_var_list), delta_t=step,
epoch=start_time + strain_crop + segment)
return psd_var
def find_trigger_value(psd_var, idx, start, sample_rate):
""" Find the PSD variation value at a particular time with the filter
method. If the time is outside the timeseries bound, 1. is given.
Parameters
----------
psd_var : TimeSeries
Time series of the varaibility in the PSD estimation
idx : numpy.ndarray
Time indices of the triggers
start : float
GPS start time
sample_rate : float
Sample rate defined in ini file
Returns
-------
vals : Array
PSD variation value at a particular time
"""
# Find gps time of the trigger
time = start + idx / sample_rate
# Extract the PSD variation at trigger time through linear
# interpolation
if not hasattr(psd_var, 'cached_psd_var_interpolant'):
from scipy import interpolate
psd_var.cached_psd_var_interpolant = \
interpolate.interp1d(psd_var.sample_times.numpy(), psd_var.numpy(),
fill_value=1.0, bounds_error=False)
vals = psd_var.cached_psd_var_interpolant(time)
return vals
| 8,554
| 37.886364
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/psd/analytical.py
|
#!/usr/bin/python
# Copyright (C) 2012-2016 Alex Nitz, Tito Dal Canton, Leo Singer
# 2022 Shichao Wu
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides reference PSDs from LALSimulation and pycbc.psd.analytical_space.
More information about how to use these ground-based detectors' PSD can be
found in the guide about :ref:`Analytic PSDs from lalsimulation`. For
space-borne ones, see `pycbc.psd.analytical_space` module.
"""
import numbers
from pycbc.types import FrequencySeries
from pycbc.psd.analytical_space import (
analytical_psd_lisa_tdi_1p5_XYZ, analytical_psd_lisa_tdi_2p0_XYZ,
analytical_psd_lisa_tdi_1p5_AE, analytical_psd_lisa_tdi_1p5_T,
sh_transformed_psd_lisa_tdi_XYZ, analytical_psd_lisa_tdi_AE_confusion)
import lal
import numpy
# build a list of usable PSD functions from lalsimulation
_name_prefix = 'SimNoisePSD'
_name_suffix = 'Ptr'
_name_blacklist = ('FromFile', 'MirrorTherm', 'Quantum', 'Seismic', 'Shot', 'SuspTherm')
_psd_list = []
try:
import lalsimulation
for _name in lalsimulation.__dict__:
if _name != _name_prefix and _name.startswith(_name_prefix) and not _name.endswith(_name_suffix):
_name = _name[len(_name_prefix):]
if _name not in _name_blacklist:
_psd_list.append(_name)
except ImportError:
pass
_psd_list = sorted(_psd_list)
# add functions wrapping lalsimulation PSDs
for _name in _psd_list:
exec("""
def %s(length, delta_f, low_freq_cutoff):
\"\"\"Return a FrequencySeries containing the %s PSD from LALSimulation.
\"\"\"
return from_string("%s", length, delta_f, low_freq_cutoff)
""" % (_name, _name, _name))
def get_psd_model_list():
""" Returns a list of available reference PSD functions.
Returns
-------
list
Returns a list of names of reference PSD functions.
"""
return get_lalsim_psd_list() + get_pycbc_psd_list()
def get_lalsim_psd_list():
"""Return a list of available reference PSD functions from LALSimulation.
"""
return _psd_list
def get_pycbc_psd_list():
""" Return a list of available reference PSD functions coded in PyCBC.
Returns
-------
list
Returns a list of names of all reference PSD functions coded in PyCBC.
"""
pycbc_analytical_psd_list = pycbc_analytical_psds.keys()
pycbc_analytical_psd_list = sorted(pycbc_analytical_psd_list)
return pycbc_analytical_psd_list
def from_string(psd_name, length, delta_f, low_freq_cutoff, **kwargs):
"""Generate a frequency series containing a LALSimulation or
built-in space-borne detectors' PSD specified by name.
Parameters
----------
psd_name : string
PSD name as found in LALSimulation (minus the SimNoisePSD prefix)
or pycbc.psd.analytical_space.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series.
low_freq_cutoff : float
Frequencies below this value are set to zero.
**kwargs :
All other keyword arguments are passed to the PSD model.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
# check if valid PSD model
if psd_name not in get_psd_model_list():
raise ValueError(psd_name + ' not found among analytical '
'PSD functions.')
# make sure length has the right type for CreateREAL8FrequencySeries
if not isinstance(length, numbers.Integral) or length <= 0:
raise TypeError('length must be a positive integer')
length = int(length)
# if PSD model is in LALSimulation
if psd_name in get_lalsim_psd_list():
lalseries = lal.CreateREAL8FrequencySeries(
'', lal.LIGOTimeGPS(0), 0, delta_f, lal.DimensionlessUnit, length)
try:
func = lalsimulation.__dict__[
_name_prefix + psd_name + _name_suffix]
except KeyError:
func = lalsimulation.__dict__[_name_prefix + psd_name]
func(lalseries, low_freq_cutoff)
else:
lalsimulation.SimNoisePSD(lalseries, 0, func)
psd = FrequencySeries(lalseries.data.data, delta_f=delta_f)
# if PSD model is coded in PyCBC
else:
func = pycbc_analytical_psds[psd_name]
psd = func(length, delta_f, low_freq_cutoff, **kwargs)
# zero-out content below low-frequency cutoff
kmin = int(low_freq_cutoff / delta_f)
psd.data[:kmin] = 0
return psd
def flat_unity(length, delta_f, low_freq_cutoff):
""" Returns a FrequencySeries of ones above the low_frequency_cutoff.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : int
Low-frequency cutoff for output FrequencySeries.
Returns
-------
FrequencySeries
Returns a FrequencySeries containing the unity PSD model.
"""
fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f)
kmin = int(low_freq_cutoff / fseries.delta_f)
fseries.data[:kmin] = 0
return fseries
# dict of analytical PSDs coded in PyCBC
pycbc_analytical_psds = {
'flat_unity' : flat_unity,
'analytical_psd_lisa_tdi_1p5_XYZ' : analytical_psd_lisa_tdi_1p5_XYZ,
'analytical_psd_lisa_tdi_2p0_XYZ' : analytical_psd_lisa_tdi_2p0_XYZ,
'analytical_psd_lisa_tdi_1p5_AE' : analytical_psd_lisa_tdi_1p5_AE,
'analytical_psd_lisa_tdi_1p5_T' : analytical_psd_lisa_tdi_1p5_T,
'sh_transformed_psd_lisa_tdi_XYZ' : sh_transformed_psd_lisa_tdi_XYZ,
'analytical_psd_lisa_tdi_AE_confusion' : analytical_psd_lisa_tdi_AE_confusion,
}
| 6,384
| 34.870787
| 105
|
py
|
pycbc
|
pycbc-master/pycbc/psd/__init__.py
|
#!/usr/bin/python
# Copyright (C) 2014 Alex Nitz, Andrew Miller, Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import copy
from ligo import segments
from pycbc.psd.read import *
from pycbc.psd.analytical import *
from pycbc.psd.analytical_space import *
from pycbc.psd.estimate import *
from pycbc.psd.variation import *
from pycbc.types import float32,float64
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import DictOptionAction, MultiDetDictOptionAction
from pycbc.types import copy_opts_for_single_ifo
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
def from_cli(opt, length, delta_f, low_frequency_cutoff,
strain=None, dyn_range_factor=1, precision=None):
"""Parses the CLI options related to the noise PSD and returns a
FrequencySeries with the corresponding PSD. If necessary, the PSD is
linearly interpolated to achieve the resolution specified in the CLI.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length,
psd_output).
length : int
The length in samples of the output PSD.
delta_f : float
The frequency step of the output PSD.
low_frequency_cutoff: float
The low frequncy cutoff to use when calculating the PSD.
strain : {None, TimeSeries}
Time series containing the data from which the PSD should be measured,
when psd_estimation is in use.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
-------
psd : FrequencySeries
The frequency series containing the PSD.
"""
f_low = low_frequency_cutoff
sample_rate = (length -1) * 2 * delta_f
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
exclusive_opts = [opt.psd_model, opt.psd_file, opt.asd_file,
psd_estimation]
if sum(map(bool, exclusive_opts)) != 1:
err_msg = "You must specify exactly one of '--psd-file', "
err_msg += "'--psd-model', '--asd-file', '--psd-estimation'"
raise ValueError(err_msg)
if (opt.psd_model or opt.psd_file or opt.asd_file):
# PSD from lalsimulation or file
if opt.psd_model:
psd = from_string(opt.psd_model, length, delta_f, f_low,
**opt.psd_extra_args)
elif opt.psd_file or opt.asd_file:
if opt.asd_file:
psd_file_name = opt.asd_file
else:
psd_file_name = opt.psd_file
if psd_file_name.endswith(('.dat', '.txt')):
is_asd_file = bool(opt.asd_file)
psd = from_txt(psd_file_name, length,
delta_f, f_low, is_asd_file=is_asd_file)
elif opt.asd_file:
err_msg = "ASD files are only valid as ASCII files (.dat or "
err_msg += ".txt). Supplied {}.".format(psd_file_name)
elif psd_file_name.endswith(('.xml', '.xml.gz')):
psd = from_xml(psd_file_name, length, delta_f, f_low,
ifo_string=opt.psd_file_xml_ifo_string,
root_name=opt.psd_file_xml_root_name)
# Set values < flow to the value at flow (if flow > 0)
kmin = int(low_frequency_cutoff / psd.delta_f)
if kmin > 0:
psd[0:kmin] = psd[kmin]
psd *= dyn_range_factor ** 2
elif psd_estimation:
# estimate PSD from data
psd = welch(strain, avg_method=opt.psd_estimation,
seg_len=int(opt.psd_segment_length * sample_rate + 0.5),
seg_stride=int(opt.psd_segment_stride * sample_rate + 0.5),
num_segments=opt.psd_num_segments,
require_exact_data_fit=False)
if delta_f != psd.delta_f:
psd = interpolate(psd, delta_f)
else:
# Shouldn't be possible to get here
raise ValueError("Shouldn't be possible to raise this!")
if opt.psd_inverse_length:
psd = inverse_spectrum_truncation(psd,
int(opt.psd_inverse_length * sample_rate),
low_frequency_cutoff=f_low,
trunc_method=opt.invpsd_trunc_method)
if hasattr(opt, 'psd_output') and opt.psd_output:
(psd.astype(float64) / (dyn_range_factor ** 2)).save(opt.psd_output)
if precision is None:
return psd
elif precision == 'single':
return psd.astype(float32)
elif precision == 'double':
return psd.astype(float64)
else:
err_msg = "If provided the precision kwarg must be either 'single' "
err_msg += "or 'double'. You provided %s." %(precision)
raise ValueError(err_msg)
def from_cli_single_ifo(opt, length, delta_f, low_frequency_cutoff, ifo,
**kwargs):
"""
Get the PSD for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt, length, delta_f, low_frequency_cutoff,
**kwargs)
def from_cli_multi_ifos(opt, length_dict, delta_f_dict,
low_frequency_cutoff_dict, ifos, strain_dict=None,
**kwargs):
"""
Get the PSD for all ifos when using the multi-detector CLI
"""
psd = {}
for ifo in ifos:
if strain_dict is not None:
strain = strain_dict[ifo]
else:
strain = None
psd[ifo] = from_cli_single_ifo(opt, length_dict[ifo], delta_f_dict[ifo],
low_frequency_cutoff_dict[ifo], ifo,
strain=strain, **kwargs)
return psd
def insert_psd_option_group(parser, output=True, include_data_options=True):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model",
help="Get PSD from given analytical model. ",
choices=get_psd_model_list())
psd_options.add_argument("--psd-extra-args",
nargs='+', action=DictOptionAction,
metavar='PARAM:VALUE', default={}, type=float,
help="(optional) Extra arguments passed to "
"the PSD models.")
psd_options.add_argument("--psd-file",
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file",
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-inverse-length", type=float,
help="(Optional) The maximum length of the "
"impulse response of the overwhitening "
"filter (s)")
psd_options.add_argument("--invpsd-trunc-method", default=None,
choices=["hann"],
help="(Optional) What truncation method to use "
"when applying psd-inverse-length. If not "
"provided, a hard truncation will be used.")
# Options specific to XML PSD files
psd_options.add_argument("--psd-file-xml-ifo-string",
help="If using an XML PSD file, use the PSD in "
"the file's PSD dictionary with this "
"ifo string. If not given and only one "
"PSD present in the file return that, if "
"not given and multiple (or zero) PSDs "
"present an exception will be raised.")
psd_options.add_argument("--psd-file-xml-root-name", default='psd',
help="If given use this as the root name for "
"the PSD XML file. If this means nothing "
"to you, then it is probably safe to "
"ignore this option.")
# Options for PSD variation
psd_options.add_argument("--psdvar-segment", type=float,
metavar="SECONDS", help="Length of segment "
"for mean square calculation of PSD variation.")
psd_options.add_argument("--psdvar-short-segment", type=float,
metavar="SECONDS", help="Length of short segment "
"for outliers removal in PSD variability "
"calculation.")
psd_options.add_argument("--psdvar-long-segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-psd-duration", type=float,
metavar="SECONDS", help="Duration of short "
"segments for PSD estimation.")
psd_options.add_argument("--psdvar-psd-stride", type=float,
metavar="SECONDS", help="Separation between PSD "
"estimation segments.")
psd_options.add_argument("--psdvar-low-freq", type=float, metavar="HERTZ",
help="Minimum frequency to consider in strain "
"bandpass.")
psd_options.add_argument("--psdvar-high-freq", type=float, metavar="HERTZ",
help="Maximum frequency to consider in strain "
"bandpass.")
if include_data_options :
psd_options.add_argument("--psd-estimation",
help="Measure PSD from the data, using "
"given average method.",
choices=["mean", "median", "median-mean"])
psd_options.add_argument("--psd-segment-length", type=float,
help="(Required for --psd-estimation) The "
"segment length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float,
help="(Required for --psd-estimation) "
"The separation between consecutive "
"segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, default=None,
help="(Optional, used only with "
"--psd-estimation). If given, PSDs will "
"be estimated using only this number of "
"segments. If more data is given than "
"needed to make this number of segments "
"then excess data will not be used in "
"the PSD estimate. If not enough data "
"is given, the code will fail.")
if output:
psd_options.add_argument("--psd-output",
help="(Optional) Write PSD to specified file")
return psd_options
def insert_psd_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model", nargs="+",
action=MultiDetOptionAction, metavar='IFO:MODEL',
help="Get PSD from given analytical model. "
"Choose from %s" %(', '.join(get_psd_model_list()),))
psd_options.add_argument("--psd-extra-args",
nargs='+', action=MultiDetDictOptionAction,
metavar='DETECTOR:PARAM:VALUE', default={},
type=float, help="(optional) Extra arguments "
"passed to the PSD models.")
psd_options.add_argument("--psd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-estimation", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Measure PSD from the data, using given "
"average method. Choose from "
"mean, median or median-mean.")
psd_options.add_argument("--psd-segment-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Required for --psd-estimation) The segment "
"length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:STRIDE',
help="(Required for --psd-estimation) The separation"
" between consecutive segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, nargs="+",
default=None,
action=MultiDetOptionAction, metavar='IFO:NUM',
help="(Optional, used only with --psd-estimation). "
"If given PSDs will be estimated using only "
"this number of segments. If more data is "
"given than needed to make this number of "
"segments than excess data will not be used in "
"the PSD estimate. If not enough data is given "
"the code will fail.")
psd_options.add_argument("--psd-inverse-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Optional) The maximum length of the impulse"
" response of the overwhitening filter (s)")
psd_options.add_argument("--invpsd-trunc-method", default=None,
choices=["hann"],
help="(Optional) What truncation method to use "
"when applying psd-inverse-length. If not "
"provided, a hard truncation will be used.")
psd_options.add_argument("--psd-output", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="(Optional) Write PSD to specified file")
# Options for PSD variation
psd_options.add_argument("--psdvar-segment", type=float,
metavar="SECONDS", help="Length of segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-short-segment", type=float,
metavar="SECONDS", help="Length of short segment "
"for outliers removal in PSD variability "
"calculation.")
psd_options.add_argument("--psdvar-long-segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-psd-duration", type=float,
metavar="SECONDS", help="Duration of short "
"segments for PSD estimation.")
psd_options.add_argument("--psdvar-psd-stride", type=float,
metavar="SECONDS", help="Separation between PSD "
"estimation segments.")
psd_options.add_argument("--psdvar-low-freq", type=float, metavar="HERTZ",
help="Minimum frequency to consider in strain "
"bandpass.")
psd_options.add_argument("--psdvar-high-freq", type=float, metavar="HERTZ",
help="Maximum frequency to consider in strain "
"bandpass.")
return psd_options
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--psd-file', '--psd-model',
'--psd-estimation', '--asd-file'])
def verify_psd_options(opt, parser):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opt, parser, opt_group)
if psd_estimation:
required_opts(opt, parser,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
def verify_psd_options_multi_ifo(opt, parser, ifos):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opt, parser, ifo, opt_group)
if opt.psd_estimation[ifo]:
required_opts_multi_ifo(opt, parser, ifo,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
def generate_overlapping_psds(opt, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs to cover a stretch of data. This
allows one to analyse a long stretch of data with PSD measurements that
change with time.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
--------
psd_and_times : list of (start, end, PSD) tuples
This is a list of tuples containing one entry for each PSD. The first
and second entries (start, end) in each tuple represent the index
range of the gwstrain data that was used to estimate that PSD. The
third entry (psd) contains the PSD estimate between that interval.
"""
if not opt.psd_estimation:
psd = from_cli(opt, flen, delta_f, flow, strain=gwstrain,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times = [ (0, len(gwstrain), psd) ]
return psds_and_times
# Figure out the data length used for PSD generation
seg_stride = int(opt.psd_segment_stride * gwstrain.sample_rate)
seg_len = int(opt.psd_segment_length * gwstrain.sample_rate)
input_data_len = len(gwstrain)
if opt.psd_num_segments is None:
# FIXME: Should we make --psd-num-segments mandatory?
# err_msg = "You must supply --num-segments."
# raise ValueError(err_msg)
num_segments = int(input_data_len // seg_stride) - 1
else:
num_segments = int(opt.psd_num_segments)
psd_data_len = (num_segments - 1) * seg_stride + seg_len
# How many unique PSD measurements is this?
psds_and_times = []
if input_data_len < psd_data_len:
err_msg = "Input data length must be longer than data length needed "
err_msg += "to estimate a PSD. You specified that a PSD should be "
err_msg += "estimated with %d seconds. " %(psd_data_len)
err_msg += "Input data length is %d seconds. " %(input_data_len)
raise ValueError(err_msg)
elif input_data_len == psd_data_len:
num_psd_measurements = 1
psd_stride = 0
else:
num_psd_measurements = int(2 * (input_data_len-1) / psd_data_len)
psd_stride = int((input_data_len - psd_data_len) / num_psd_measurements)
for idx in range(num_psd_measurements):
if idx == (num_psd_measurements - 1):
start_idx = input_data_len - psd_data_len
end_idx = input_data_len
else:
start_idx = psd_stride * idx
end_idx = psd_data_len + psd_stride * idx
strain_part = gwstrain[start_idx:end_idx]
psd = from_cli(opt, flen, delta_f, flow, strain=strain_part,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times.append( (start_idx, end_idx, psd) )
return psds_and_times
def associate_psds_to_segments(opt, fd_segments, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs covering the data in GWstrain.
Then associate these PSDs with the appropriate segment in strain_segments.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
fd_segments : StrainSegments.fourier_segments() object
The fourier transforms of the various analysis segments. The psd
attribute of each segment is updated to point to the appropriate PSD.
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
"""
psds_and_times = generate_overlapping_psds(opt, gwstrain, flen, delta_f,
flow, dyn_range_factor=dyn_range_factor,
precision=precision)
for fd_segment in fd_segments:
best_psd = None
psd_overlap = 0
inp_seg = segments.segment(fd_segment.seg_slice.start,
fd_segment.seg_slice.stop)
for start_idx, end_idx, psd in psds_and_times:
psd_seg = segments.segment(start_idx, end_idx)
if psd_seg.intersects(inp_seg):
curr_overlap = abs(inp_seg & psd_seg)
if curr_overlap > psd_overlap:
psd_overlap = curr_overlap
best_psd = psd
if best_psd is None:
err_msg = "No PSDs found intersecting segment!"
raise ValueError(err_msg)
fd_segment.psd = best_psd
def associate_psds_to_single_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifo,
dyn_range_factor=1., precision=None):
"""
Associate PSDs to segments for a single ifo when using the multi-detector
CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
associate_psds_to_segments(single_det_opt, fd_segments, gwstrain, flen,
delta_f, flow, dyn_range_factor=dyn_range_factor,
precision=precision)
def associate_psds_to_multi_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifos,
dyn_range_factor=1., precision=None):
"""
Associate PSDs to segments for all ifos when using the multi-detector CLI
"""
for ifo in ifos:
if gwstrain is not None:
strain = gwstrain[ifo]
else:
strain = None
if fd_segments is not None:
segments = fd_segments[ifo]
else:
segments = None
associate_psds_to_single_ifo_segments(opt, segments, strain, flen,
delta_f, flow, ifo, dyn_range_factor=dyn_range_factor,
precision=precision)
| 28,796
| 47.808475
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/psd/analytical_space.py
|
# Copyright (C) 2022 Shichao Wu, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides (semi-)analytical PSDs and sensitivity curves for space
borne detectors, such as LISA. Based on LISA technical note
<LISA-LCST-SGS-TN-001>, LDC manual <LISA-LCST-SGS-MAN-001>,
and paper <10.1088/1361-6382/ab1101>.
"""
import numpy as np
from astropy import constants
from pycbc.psd.read import from_numpy_arrays
def psd_lisa_acc_noise(f, acc_noise_level=3e-15):
""" The PSD of LISA's acceleration noise.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
acc_noise_level : float
The level of acceleration noise.
Returns
-------
s_acc_nu : float or numpy.array
The PSD value or array for acceleration noise.
Notes
-----
Pease see Eq.(11-13) in <LISA-LCST-SGS-TN-001> for more details.
"""
s_acc = acc_noise_level**2 * (1+(4e-4/f)**2)*(1+(f/8e-3)**4)
s_acc_d = s_acc * (2*np.pi*f)**(-4)
s_acc_nu = (2*np.pi*f/constants.c.value)**2 * s_acc_d
return s_acc_nu
def psd_lisa_oms_noise(f, oms_noise_level=15e-12):
""" The PSD of LISA's OMS noise.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
oms_noise_level : float
The level of OMS noise.
Returns
-------
s_oms_nu : float or numpy.array
The PSD value or array for OMS noise.
Notes
-----
Pease see Eq.(9-10) in <LISA-LCST-SGS-TN-001> for more details.
"""
s_oms_d = oms_noise_level**2 * (1+(2e-3/f)**4)
s_oms_nu = s_oms_d * (2*np.pi*f/constants.c.value)**2
return s_oms_nu
def lisa_psd_components(f, acc_noise_level=3e-15, oms_noise_level=15e-12):
""" The PSD of LISA's acceleration and OMS noise.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
[low_freq_component, high_freq_component] : list
The PSD value or array for acceleration and OMS noise.
"""
low_freq_component = psd_lisa_acc_noise(f, acc_noise_level)
high_freq_component = psd_lisa_oms_noise(f, oms_noise_level)
return [low_freq_component, high_freq_component]
def omega_length(f, len_arm=2.5e9):
""" The function to calculate 2*pi*f*LISA_arm_length.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
len_arm : float
The arm length of LISA.
Returns
-------
omega_len : float or numpy.array
The value of 2*pi*f*LISA_arm_length.
"""
omega_len = 2*np.pi*f * len_arm/constants.c.value
return omega_len
def analytical_psd_lisa_tdi_1p5_XYZ(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12):
""" The TDI-1.5 analytical PSD (X,Y,Z channel) for LISA.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
fseries : FrequencySeries
The TDI-1.5 PSD (X,Y,Z channel) for LISA.
Notes
-----
Pease see Eq.(19) in <LISA-LCST-SGS-TN-001> for more details.
"""
len_arm = np.float64(len_arm)
acc_noise_level = np.float64(acc_noise_level)
oms_noise_level = np.float64(oms_noise_level)
psd = []
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
for f in fr:
[s_acc_nu, s_oms_nu] = lisa_psd_components(
f, acc_noise_level, oms_noise_level)
omega_len = omega_length(f, len_arm)
psd.append(16*(np.sin(omega_len))**2 *
(s_oms_nu+s_acc_nu*(3+np.cos(omega_len))))
fseries = from_numpy_arrays(fr, np.array(psd),
length, delta_f, low_freq_cutoff)
return fseries
def analytical_psd_lisa_tdi_2p0_XYZ(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12):
""" The TDI-2.0 analytical PSD (X,Y,Z channel) for LISA.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
fseries : FrequencySeries
The TDI-2.0 PSD (X,Y,Z channel) for LISA.
Notes
-----
Pease see Eq.(20) in <LISA-LCST-SGS-TN-001> for more details.
"""
len_arm = np.float64(len_arm)
acc_noise_level = np.float64(acc_noise_level)
oms_noise_level = np.float64(oms_noise_level)
psd = []
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
for f in fr:
[s_acc_nu, s_oms_nu] = lisa_psd_components(
f, acc_noise_level, oms_noise_level)
omega_len = omega_length(f, len_arm)
psd.append(64*(np.sin(omega_len))**2 * (np.sin(2*omega_len))**2 *
(s_oms_nu+s_acc_nu*(3+np.cos(2*omega_len))))
fseries = from_numpy_arrays(fr, np.array(psd),
length, delta_f, low_freq_cutoff)
return fseries
def analytical_csd_lisa_tdi_1p5_XY(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12):
""" The cross-spectrum density between LISA's TDI channel X and Y.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
fseries : FrequencySeries
The CSD between LISA's TDI-1.5 channel X and Y.
Notes
-----
Pease see Eq.(56) in <LISA-LCST-SGS-MAN-001(Radler)> for more details.
"""
len_arm = np.float64(len_arm)
acc_noise_level = np.float64(acc_noise_level)
oms_noise_level = np.float64(oms_noise_level)
csd = []
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
for f in fr:
omega_len = omega_length(f, len_arm)
[s_acc_nu, s_oms_nu] = lisa_psd_components(
f, acc_noise_level, oms_noise_level)
csd.append(-8*np.sin(omega_len)**2 * np.cos(omega_len) *
(s_oms_nu+4*s_acc_nu))
fseries = from_numpy_arrays(fr, np.array(csd),
length, delta_f, low_freq_cutoff)
return fseries
def analytical_psd_lisa_tdi_1p5_AE(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12):
""" The PSD of LISA's TDI-1.5 channel A and E.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
fseries : FrequencySeries
The PSD of LISA's TDI-1.5 channel A and E.
Notes
-----
Pease see Eq.(58) in <LISA-LCST-SGS-MAN-001(Radler)> for more details.
"""
len_arm = np.float64(len_arm)
acc_noise_level = np.float64(acc_noise_level)
oms_noise_level = np.float64(oms_noise_level)
psd = []
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
for f in fr:
[s_acc_nu, s_oms_nu] = lisa_psd_components(
f, acc_noise_level, oms_noise_level)
omega_len = omega_length(f, len_arm)
psd.append(8*(np.sin(omega_len))**2 *
(4*(1+np.cos(omega_len)+np.cos(omega_len)**2)*s_acc_nu +
(2+np.cos(omega_len))*s_oms_nu))
fseries = from_numpy_arrays(fr, np.array(psd),
length, delta_f, low_freq_cutoff)
return fseries
def analytical_psd_lisa_tdi_1p5_T(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12):
""" The PSD of LISA's TDI-1.5 channel T.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
fseries : FrequencySeries
The PSD of LISA's TDI-1.5 channel T.
Notes
-----
Pease see Eq.(59) in <LISA-LCST-SGS-MAN-001(Radler)> for more details.
"""
len_arm = np.float64(len_arm)
acc_noise_level = np.float64(acc_noise_level)
oms_noise_level = np.float64(oms_noise_level)
psd = []
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
for f in fr:
[s_acc_nu, s_oms_nu] = lisa_psd_components(
f, acc_noise_level, oms_noise_level)
omega_len = omega_length(f, len_arm)
psd.append(32*np.sin(omega_len)**2 * np.sin(omega_len/2)**2 *
(4*s_acc_nu*np.sin(omega_len/2)**2 + s_oms_nu))
fseries = from_numpy_arrays(fr, np.array(psd),
length, delta_f, low_freq_cutoff)
return fseries
def averaged_lisa_fplus_sq_approx(f, len_arm=2.5e9):
""" An approximant for LISA's squared antenna response function,
averaged over sky and polarization angle.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
len_arm : float
The arm length of LISA, in the unit of "m".
Returns
-------
fp_sq_approx : float or numpy.array
The sky and polarization angle averaged squared antenna response.
Notes
-----
Pease see Eq.(36) in <LISA-LCST-SGS-TN-001> for more details.
"""
from scipy.interpolate import interp1d
from astropy.utils.data import download_file
if len_arm != 2.5e9:
raise Exception("Currently only support 'len_arm=2.5e9'.")
# Download the numerical LISA averaged response.
url = "https://zenodo.org/record/7497853/files/AvFXp2_Raw.npy"
file_path = download_file(url, cache=True)
freqs, fp_sq = np.load(file_path)
# Padding the end.
freqs = np.append(freqs, 2)
fp_sq = np.append(fp_sq, 0.0012712348970728724)
fp_sq_interp = interp1d(freqs, fp_sq, kind='linear',
fill_value="extrapolate")
fp_sq_approx = fp_sq_interp(f)/16
return fp_sq_approx
def averaged_response_lisa_tdi_1p5(f, len_arm=2.5e9):
""" LISA's TDI-1.5 response function to GW,
averaged over sky and polarization angle.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
len_arm : float
The arm length of LISA, in the unit of "m".
Returns
-------
response_tdi_1p5 : float or numpy.array
The sky and polarization angle averaged TDI-1.5 response to GW.
Notes
-----
Pease see Eq.(39) in <LISA-LCST-SGS-TN-001> for more details.
"""
omega_len = omega_length(f, len_arm)
ave_fp2 = averaged_lisa_fplus_sq_approx(f, len_arm)
response_tdi_1p5 = (4*omega_len)**2 * np.sin(omega_len)**2 * ave_fp2
return response_tdi_1p5
def averaged_response_lisa_tdi_2p0(f, len_arm=2.5e9):
""" LISA's TDI-2.0 response function to GW,
averaged over sky and polarization angle.
Parameters
----------
f : float or numpy.array
The frequency or frequency range, in the unit of "Hz".
len_arm : float
The arm length of LISA, in the unit of "m".
Returns
-------
response_tdi_2p0 : float or numpy.array
The sky and polarization angle averaged TDI-2.0 response to GW.
Notes
-----
Pease see Eq.(40) in <LISA-LCST-SGS-TN-001> for more details.
"""
omega_len = omega_length(f, len_arm)
response_tdi_1p5 = averaged_response_lisa_tdi_1p5(f, len_arm)
response_tdi_2p0 = response_tdi_1p5 * (2*np.sin(2*omega_len))**2
return response_tdi_2p0
def sensitivity_curve_lisa_semi_analytical(length, delta_f, low_freq_cutoff,
len_arm=2.5e9,
acc_noise_level=3e-15,
oms_noise_level=15e-12):
""" The semi-analytical LISA's sensitivity curve (6-links),
averaged over sky and polarization angle.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
Returns
-------
fseries : FrequencySeries
The sky and polarization angle averaged semi-analytical
LISA's sensitivity curve (6-links).
Notes
-----
Pease see Eq.(42-43) in <LISA-LCST-SGS-TN-001> for more details.
"""
sense_curve = []
len_arm = np.float64(len_arm)
acc_noise_level = np.float64(acc_noise_level)
oms_noise_level = np.float64(oms_noise_level)
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
fp_sq = averaged_lisa_fplus_sq_approx(fr, len_arm)
for i in range(len(fr)):
[s_acc_nu, s_oms_nu] = lisa_psd_components(
fr[i], acc_noise_level, oms_noise_level)
omega_len = 2*np.pi*fr[i] * len_arm/constants.c.value
sense_curve.append((s_oms_nu + s_acc_nu*(3+np.cos(2*omega_len))) /
(omega_len**2*fp_sq[i]))
fseries = from_numpy_arrays(fr, np.array(sense_curve)/2,
length, delta_f, low_freq_cutoff)
return fseries
def sensitivity_curve_lisa_SciRD(length, delta_f, low_freq_cutoff):
""" The analytical LISA's sensitivity curve in SciRD,
averaged over sky and polarization angle.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
Returns
-------
fseries : FrequencySeries
The sky and polarization angle averaged analytical
LISA's sensitivity curve in SciRD.
Notes
-----
Pease see Eq.(114) in <LISA-LCST-SGS-TN-001> for more details.
"""
sense_curve = []
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
for f in fr:
s_I = 5.76e-48 * (1+(4e-4/f)**2)
s_II = 3.6e-41
R = 1 + (f/2.5e-2)**2
sense_curve.append(10/3 * (s_I/(2*np.pi*f)**4+s_II) * R)
fseries = from_numpy_arrays(fr, sense_curve,
length, delta_f, low_freq_cutoff)
return fseries
def sensitivity_curve_lisa_confusion(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12,
base_model="semi", duration=1.0):
""" The LISA's sensitivity curve with Galactic confusion noise,
averaged over sky and polarization angle.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
base_model : string
The base model of sensitivity curve, chosen from "semi" or "SciRD".
duration : float
The duration of observation, between 0 and 10, in the unit of years.
Returns
-------
fseries : FrequencySeries
The sky and polarization angle averaged
LISA's sensitivity curve with Galactic confusion noise.
Notes
-----
Pease see Eq.(85-86) in <LISA-LCST-SGS-TN-001> for more details.
"""
if base_model == "semi":
base_curve = sensitivity_curve_lisa_semi_analytical(
length, delta_f, low_freq_cutoff,
len_arm, acc_noise_level, oms_noise_level)
elif base_model == "SciRD":
base_curve = sensitivity_curve_lisa_SciRD(
length, delta_f, low_freq_cutoff)
else:
raise Exception("Must choose from 'semi' or 'SciRD'.")
if duration < 0 or duration > 10:
raise Exception("Must between 0 and 10.")
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
sh_confusion = []
f1 = 10**(-0.25*np.log10(duration)-2.7)
fk = 10**(-0.27*np.log10(duration)-2.47)
for f in fr:
sh_confusion.append(0.5*1.14e-44*f**(-7/3)*np.exp(-(f/f1)**1.8) *
(1.0+np.tanh((fk-f)/(0.31e-3))))
fseries_confusion = from_numpy_arrays(fr, np.array(sh_confusion),
length, delta_f, low_freq_cutoff)
fseries = from_numpy_arrays(base_curve.sample_frequencies,
base_curve+fseries_confusion,
length, delta_f, low_freq_cutoff)
return fseries
def sh_transformed_psd_lisa_tdi_XYZ(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12,
base_model="semi", duration=1.0,
tdi="1.5"):
""" The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA
with Galactic confusion noise, transformed from LISA sensitivity curve.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
base_model : string
The base model of sensitivity curve, chosen from "semi" or "SciRD".
duration : float
The duration of observation, between 0 and 10, in the unit of years.
tdi : string
The version of TDI, currently only for 1.5 or 2.0.
Returns
-------
fseries : FrequencySeries
The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA with Galactic confusion
noise, transformed from LISA sensitivity curve.
Notes
-----
Pease see Eq.(7,41-43) in <LISA-LCST-SGS-TN-001> for more details.
"""
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
if tdi == "1.5":
response = averaged_response_lisa_tdi_1p5(fr, len_arm)
elif tdi == "2.0":
response = averaged_response_lisa_tdi_2p0(fr, len_arm)
else:
raise Exception("The version of TDI, currently only for 1.5 or 2.0.")
fseries_response = from_numpy_arrays(fr, np.array(response),
length, delta_f, low_freq_cutoff)
sh = sensitivity_curve_lisa_confusion(length, delta_f, low_freq_cutoff,
len_arm, acc_noise_level,
oms_noise_level, base_model,
duration)
psd = 2*sh.data * fseries_response.data
fseries = from_numpy_arrays(sh.sample_frequencies, psd,
length, delta_f, low_freq_cutoff)
return fseries
def semi_analytical_psd_lisa_confusion_noise(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, duration=1.0,
tdi="1.5"):
""" The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA Galactic confusion noise,
no instrumental noise.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
duration : float
The duration of observation, between 0 and 10, in the unit of years.
tdi : string
The version of TDI, currently only for 1.5 or 2.0.
Returns
-------
fseries : FrequencySeries
The TDI-1.5/2.0 PSD (X,Y,Z channel) for LISA Galactic confusion
noise, no instrumental noise.
"""
fr = np.linspace(low_freq_cutoff, (length-1)*2*delta_f, length)
if tdi == "1.5":
response = averaged_response_lisa_tdi_1p5(fr, len_arm)
elif tdi == "2.0":
response = averaged_response_lisa_tdi_2p0(fr, len_arm)
else:
raise Exception("The version of TDI, currently only for 1.5 or 2.0.")
fseries_response = from_numpy_arrays(fr, np.array(response),
length, delta_f, low_freq_cutoff)
sh_confusion = []
f1 = 10**(-0.25*np.log10(duration)-2.7)
fk = 10**(-0.27*np.log10(duration)-2.47)
for f in fr:
sh_confusion.append(0.5*1.14e-44*f**(-7/3)*np.exp(-(f/f1)**1.8) *
(1.0+np.tanh((fk-f)/(0.31e-3))))
fseries_confusion = from_numpy_arrays(fr, np.array(sh_confusion),
length, delta_f, low_freq_cutoff)
psd_confusion = 2*fseries_confusion.data * fseries_response.data
fseries = from_numpy_arrays(fseries_confusion.sample_frequencies,
psd_confusion, length, delta_f,
low_freq_cutoff)
return fseries
def analytical_psd_lisa_tdi_AE_confusion(length, delta_f, low_freq_cutoff,
len_arm=2.5e9, acc_noise_level=3e-15,
oms_noise_level=15e-12,
duration=1.0, tdi="1.5"):
""" The TDI-1.5 PSD (A,E channel) for LISA
with Galactic confusion noise.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : float
Low-frequency cutoff for output FrequencySeries.
len_arm : float
The arm length of LISA, in the unit of "m".
acc_noise_level : float
The level of acceleration noise.
oms_noise_level : float
The level of OMS noise.
duration : float
The duration of observation, between 0 and 10, in the unit of years.
tdi : string
The version of TDI, currently only for 1.5.
Returns
-------
fseries : FrequencySeries
The TDI-1.5 PSD (A,E channel) for LISA with Galactic confusion
noise.
"""
if tdi != "1.5":
raise Exception("The version of TDI, currently only for 1.5.")
psd_AE = analytical_psd_lisa_tdi_1p5_AE(length, delta_f, low_freq_cutoff,
len_arm, acc_noise_level,
oms_noise_level)
psd_X_confusion = semi_analytical_psd_lisa_confusion_noise(
length, delta_f, low_freq_cutoff,
len_arm, duration, tdi)
# Here we assume the confusion noise's contribution to the CSD Sxy is
# negligible for low-frequency part. So Sxy doesn't change.
fseries = psd_AE + psd_X_confusion
return fseries
| 26,286
| 34.331989
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/psd/read.py
|
#!/usr/bin/python
# Copyright (C) 2012 Alex Nitz, Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilities to read PSDs from files.
"""
import logging
import numpy
import scipy.interpolate
from pycbc.types import FrequencySeries
def from_numpy_arrays(freq_data, noise_data, length, delta_f, low_freq_cutoff):
"""Interpolate n PSD (as two 1-dimensional arrays of frequency and data)
to the desired length, delta_f and low frequency cutoff.
Parameters
----------
freq_data : array
Array of frequencies.
noise_data : array
PSD values corresponding to frequencies in freq_arr.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series in Herz.
low_freq_cutoff : float
Frequencies below this value are set to zero.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
# Only include points above the low frequency cutoff
if freq_data[0] > low_freq_cutoff:
raise ValueError('Lowest frequency in input data '
' is higher than requested low-frequency cutoff ' + str(low_freq_cutoff))
kmin = int(low_freq_cutoff / delta_f)
flow = kmin * delta_f
data_start = (0 if freq_data[0]==low_freq_cutoff else numpy.searchsorted(freq_data, flow) - 1)
# If the cutoff is exactly in the file, start there
if freq_data[data_start+1] == low_freq_cutoff:
data_start += 1
freq_data = freq_data[data_start:]
noise_data = noise_data[data_start:]
if (length - 1) * delta_f > freq_data[-1]:
logging.warning('Requested number of samples exceeds the highest '
'available frequency in the input data, '
'will use max available frequency instead. '
'(requested %f Hz, available %f Hz)',
(length - 1) * delta_f, freq_data[-1])
length = int(freq_data[-1]/delta_f + 1)
flog = numpy.log(freq_data)
slog = numpy.log(noise_data)
psd_interp = scipy.interpolate.interp1d(
flog, slog, fill_value=(slog[0], slog[-1]), bounds_error=False)
psd = numpy.zeros(length, dtype=numpy.float64)
vals = numpy.log(numpy.arange(kmin, length) * delta_f)
psd[kmin:] = numpy.exp(psd_interp(vals))
return FrequencySeries(psd, delta_f=delta_f)
def from_txt(filename, length, delta_f, low_freq_cutoff, is_asd_file=True):
"""Read an ASCII file containing one-sided ASD or PSD data and generate
a frequency series with the corresponding PSD. The ASD or PSD data is
interpolated in order to match the desired resolution of the
generated frequency series.
Parameters
----------
filename : string
Path to a two-column ASCII file. The first column must contain
the frequency (positive frequencies only) and the second column
must contain the amplitude density OR power spectral density.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series in Herz.
low_freq_cutoff : float
Frequencies below this value are set to zero.
is_asd_file : Boolean
If false assume that the second column holds power spectral density.
If true assume that the second column holds amplitude spectral density.
Default: True
Returns
-------
psd : FrequencySeries
The generated frequency series.
Raises
------
ValueError
If the ASCII file contains negative, infinite or NaN frequencies
or amplitude densities.
"""
file_data = numpy.loadtxt(filename)
if (file_data < 0).any() or \
numpy.logical_not(numpy.isfinite(file_data)).any():
raise ValueError('Invalid data in ' + filename)
freq_data = file_data[:, 0]
noise_data = file_data[:, 1]
if is_asd_file:
noise_data = noise_data ** 2
return from_numpy_arrays(freq_data, noise_data, length, delta_f,
low_freq_cutoff)
def from_xml(filename, length, delta_f, low_freq_cutoff, ifo_string=None,
root_name='psd'):
"""Read an ASCII file containing one-sided ASD or PSD data and generate
a frequency series with the corresponding PSD. The ASD or PSD data is
interpolated in order to match the desired resolution of the
generated frequency series.
Parameters
----------
filename : string
Path to a two-column ASCII file. The first column must contain
the frequency (positive frequencies only) and the second column
must contain the amplitude density OR power spectral density.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series in Herz.
low_freq_cutoff : float
Frequencies below this value are set to zero.
ifo_string : string
Use the PSD in the file's PSD dictionary with this ifo string.
If not given and only one PSD present in the file return that, if not
given and multiple (or zero) PSDs present an exception will be raised.
root_name : string (default='psd')
If given use this as the root name for the PSD XML file. If this means
nothing to you, then it is probably safe to ignore this option.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
import lal.series
from ligo.lw import utils as ligolw_utils
with open(filename, 'rb') as fp:
ct_handler = lal.series.PSDContentHandler
xml_doc = ligolw_utils.load_fileobj(fp, compress='auto',
contenthandler=ct_handler)
psd_dict = lal.series.read_psd_xmldoc(xml_doc, root_name=root_name)
if ifo_string is not None:
psd_freq_series = psd_dict[ifo_string]
elif len(psd_dict.keys()) == 1:
psd_freq_series = psd_dict[tuple(psd_dict.keys())[0]]
else:
err_msg = "No ifo string given and input XML file contains not "
err_msg += "exactly one PSD. Specify which PSD you want to use."
raise ValueError(err_msg)
noise_data = psd_freq_series.data.data[:]
freq_data = numpy.arange(len(noise_data)) * psd_freq_series.deltaF
return from_numpy_arrays(freq_data, noise_data, length, delta_f,
low_freq_cutoff)
| 7,153
| 37.053191
| 98
|
py
|
pycbc
|
pycbc-master/pycbc/io/ligolw.py
|
# Copyright (C) 2020 Leo Singer, 2021 Tito Dal Canton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Tools for dealing with LIGOLW XML files."""
import os
import sys
import numpy
from ligo.lw import lsctables
from ligo.lw import ligolw
from ligo.lw.ligolw import Param, LIGOLWContentHandler \
as OrigLIGOLWContentHandler
from ligo.lw.lsctables import TableByName
from ligo.lw.table import Column, TableStream
from ligo.lw.types import FormatFunc, FromPyType, ToPyType
from ligo.lw.utils import process as ligolw_process
from ligo.lw.param import Param as LIGOLWParam
from ligo.lw.array import Array as LIGOLWArray
import pycbc.version as pycbc_version
__all__ = (
'default_null_value',
'return_empty_sngl',
'return_search_summary',
'create_process_table',
'legacy_row_id_converter',
'get_table_columns',
'LIGOLWContentHandler'
)
ROWID_PYTYPE = int
ROWID_TYPE = FromPyType[ROWID_PYTYPE]
ROWID_FORMATFUNC = FormatFunc[ROWID_TYPE]
IDTypes = set([u"ilwd:char", u"ilwd:char_u"])
def default_null_value(col_name, col_type):
"""
Associate a sensible "null" default value to a given LIGOLW column type.
"""
if col_type in ['real_4', 'real_8']:
return 0.
if col_type in ['int_4s', 'int_8s']:
# this case includes row IDs
return 0
if col_type == 'lstring':
return ''
raise NotImplementedError(('Do not know how to initialize column '
'{} of type {}').format(col_name, col_type))
def return_empty_sngl(nones=False):
"""
Function to create a SnglInspiral object where all columns are populated
but all are set to values that test False (ie. strings to '', floats/ints
to 0, ...). This avoids errors when you try to create a table containing
columns you don't care about, but which still need populating. NOTE: This
will also produce a process_id and event_id with 0 values. For most
applications these should be set to their correct values.
Parameters
----------
nones : bool (False)
If True, just set all columns to None.
Returns
--------
lsctables.SnglInspiral
The "empty" SnglInspiral object.
"""
sngl = lsctables.SnglInspiral()
cols = lsctables.SnglInspiralTable.validcolumns
for entry in cols:
col_name = Column.ColumnName(entry)
value = None if nones else default_null_value(col_name, cols[entry])
setattr(sngl, col_name, value)
return sngl
def return_search_summary(start_time=0, end_time=0, nevents=0, ifos=None):
"""
Function to create a SearchSummary object where all columns are populated
but all are set to values that test False (ie. strings to '', floats/ints
to 0, ...). This avoids errors when you try to create a table containing
columns you don't care about, but which still need populating. NOTE: This
will also produce a process_id with 0 values. For most applications these
should be set to their correct values.
It then populates columns if given them as options.
Returns
--------
lsctables.SeachSummary
The "empty" SearchSummary object.
"""
if ifos is None:
ifos = []
# create an empty search summary
search_summary = lsctables.SearchSummary()
cols = lsctables.SearchSummaryTable.validcolumns
for entry in cols:
col_name = Column.ColumnName(entry)
value = default_null_value(col_name, cols[entry])
setattr(search_summary, col_name, value)
# fill in columns
if ifos:
search_summary.instruments = ifos
if nevents:
search_summary.nevents = nevents
if start_time and end_time:
search_summary.in_start_time = int(start_time)
search_summary.in_start_time_ns = int(start_time % 1 * 1e9)
search_summary.in_end_time = int(end_time)
search_summary.in_end_time_ns = int(end_time % 1 * 1e9)
search_summary.out_start_time = int(start_time)
search_summary.out_start_time_ns = int(start_time % 1 * 1e9)
search_summary.out_end_time = int(end_time)
search_summary.out_end_time_ns = int(end_time % 1 * 1e9)
return search_summary
def create_process_table(document, program_name=None, detectors=None,
comment=None, options=None):
"""Create a LIGOLW process table with sane defaults, add it to a LIGOLW
document, and return it.
"""
if program_name is None:
program_name = os.path.basename(sys.argv[0])
if options is None:
options = {}
# ligo.lw does not like `cvs_entry_time` being an empty string
cvs_entry_time = pycbc_version.date or None
opts = options.copy()
key_del = []
for key, value in opts.items():
if type(value) not in tuple(FromPyType.keys()):
key_del.append(key)
if len(key_del) != 0:
for key in key_del:
opts.pop(key)
process = ligolw_process.register_to_xmldoc(
document, program_name, opts, version=pycbc_version.version,
cvs_repository='pycbc/'+pycbc_version.git_branch,
cvs_entry_time=cvs_entry_time, instruments=detectors,
comment=comment)
return process
def legacy_row_id_converter(ContentHandler):
"""Convert from old-style to new-style row IDs on the fly.
This is loosely adapted from :func:`ligo.lw.utils.ilwd.strip_ilwdchar`.
Notes
-----
When building a ContentHandler, this must be the _outermost_ decorator,
outside of :func:`ligo.lw.lsctables.use_in`, :func:`ligo.lw.param.use_in`,
or :func:`ligo.lw.table.use_in`.
"""
def endElementNS(self, uri_localname, qname,
__orig_endElementNS=ContentHandler.endElementNS):
"""Convert values of <Param> elements from ilwdchar to int."""
if isinstance(self.current, Param) and self.current.Type in IDTypes:
old_type = ToPyType[self.current.Type]
old_val = str(old_type(self.current.pcdata))
new_value = ROWID_PYTYPE(old_val.split(":")[-1])
self.current.Type = ROWID_TYPE
self.current.pcdata = ROWID_FORMATFUNC(new_value)
__orig_endElementNS(self, uri_localname, qname)
remapped = {}
def startColumn(self, parent, attrs,
__orig_startColumn=ContentHandler.startColumn):
"""Convert types in <Column> elements from ilwdchar to int.
Notes
-----
This method is adapted from
:func:`ligo.lw.utils.ilwd.strip_ilwdchar`.
"""
result = __orig_startColumn(self, parent, attrs)
# If this is an ilwdchar column, then create a function to convert its
# rows' values for use in the startStream method below.
if result.Type in IDTypes:
old_type = ToPyType[result.Type]
def converter(old_value):
return ROWID_PYTYPE(str(old_type(old_value)).split(":")[-1])
remapped[(id(parent), result.Name)] = converter
result.Type = ROWID_TYPE
# If this is an ilwdchar column, then normalize the column name.
if parent.Name in TableByName:
validcolumns = TableByName[parent.Name].validcolumns
if result.Name not in validcolumns:
stripped_column_to_valid_column = {
Column.ColumnName(name): name for name in validcolumns}
if result.Name in stripped_column_to_valid_column:
result.setAttribute(
'Name', stripped_column_to_valid_column[result.Name])
return result
def startStream(self, parent, attrs,
__orig_startStream=ContentHandler.startStream):
"""Convert values in table <Stream> elements from ilwdchar to int.
Notes
-----
This method is adapted from
:meth:`ligo.lw.table.TableStream.config`.
"""
result = __orig_startStream(self, parent, attrs)
if isinstance(result, TableStream):
loadcolumns = set(parent.columnnames)
if parent.loadcolumns is not None:
# FIXME: convert loadcolumns attributes to sets to
# avoid the conversion.
loadcolumns &= set(parent.loadcolumns)
result._tokenizer.set_types([
(remapped.pop((id(parent), colname), pytype)
if colname in loadcolumns else None)
for pytype, colname
in zip(parent.columnpytypes, parent.columnnames)])
return result
ContentHandler.endElementNS = endElementNS
ContentHandler.startColumn = startColumn
ContentHandler.startStream = startStream
return ContentHandler
def _build_series(series, dim_names, comment, delta_name, delta_unit):
Attributes = ligolw.sax.xmlreader.AttributesImpl
elem = ligolw.LIGO_LW(
Attributes({'Name': str(series.__class__.__name__)}))
if comment is not None:
elem.appendChild(ligolw.Comment()).pcdata = comment
elem.appendChild(ligolw.Time.from_gps(series.epoch, 'epoch'))
elem.appendChild(LIGOLWParam.from_pyvalue('f0', series.f0, unit='s^-1'))
delta = getattr(series, delta_name)
if numpy.iscomplexobj(series.data.data):
data = numpy.row_stack((
numpy.arange(len(series.data.data)) * delta,
series.data.data.real,
series.data.data.imag
))
else:
data = numpy.row_stack((
numpy.arange(len(series.data.data)) * delta,
series.data.data
))
a = LIGOLWArray.build(series.name, data, dim_names=dim_names)
a.Unit = str(series.sampleUnits)
dim0 = a.getElementsByTagName(ligolw.Dim.tagName)[0]
dim0.Unit = delta_unit
dim0.Start = series.f0
dim0.Scale = delta
elem.appendChild(a)
return elem
def make_psd_xmldoc(psddict, xmldoc=None):
"""Add a set of PSDs to a LIGOLW XML document. If the document is not
given, a new one is created first.
"""
xmldoc = ligolw.Document() if xmldoc is None else xmldoc.childNodes[0]
# the PSDs must be children of a LIGO_LW with name "psd"
root_name = 'psd'
Attributes = ligolw.sax.xmlreader.AttributesImpl
lw = xmldoc.appendChild(
ligolw.LIGO_LW(Attributes({'Name': root_name})))
for instrument, psd in psddict.items():
xmlseries = _build_series(
psd,
('Frequency,Real', 'Frequency'),
None,
'deltaF',
's^-1'
)
fs = lw.appendChild(xmlseries)
fs.appendChild(LIGOLWParam.from_pyvalue('instrument', instrument))
return xmldoc
def snr_series_to_xml(snr_series, document, sngl_inspiral_id):
"""Save an SNR time series into an XML document, in a format compatible
with BAYESTAR.
"""
snr_lal = snr_series.lal()
snr_lal.name = 'snr'
snr_lal.sampleUnits = ''
snr_xml = _build_series(
snr_lal,
('Time', 'Time,Real,Imaginary'),
None,
'deltaT',
's'
)
snr_node = document.childNodes[-1].appendChild(snr_xml)
eid_param = LIGOLWParam.from_pyvalue('event_id', sngl_inspiral_id)
snr_node.appendChild(eid_param)
def get_table_columns(table):
"""Return a list of columns that are present in the given table, in a
format that can be passed to `lsctables.New()`.
The split on ":" is needed for columns like `process:process_id`, which
must be listed as `process:process_id` in `lsctables.New()`, but are
listed as just `process_id` in the `columnnames` attribute of the given
table.
"""
columns = []
for col in table.validcolumns:
att = col.split(':')[-1]
if att in table.columnnames:
columns.append(col)
return columns
@legacy_row_id_converter
@lsctables.use_in
class LIGOLWContentHandler(OrigLIGOLWContentHandler):
"Dummy class needed for loading LIGOLW files"
| 12,522
| 34.985632
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/io/record.py
|
# Copyright (C) 2015 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides definitions of, and helper functions for, FieldArray.
FieldArray are wrappers of numpy recarrays with additional functionality
useful for storing and retrieving data created by a search for gravitationa
waves.
"""
import types, re, copy, numpy, inspect
from ligo.lw import types as ligolw_types
from pycbc import coordinates, conversions, cosmology
from pycbc.population import population_models
from pycbc.waveform import parameters
# what functions are given to the eval in FieldArray's __getitem__:
_numpy_function_lib = {_x: _y for _x,_y in numpy.__dict__.items()
if isinstance(_y, (numpy.ufunc, float))}
#
# =============================================================================
#
# Data type mappings
#
# =============================================================================
#
# add ligolw_types to numpy sctypeDict
numpy.sctypeDict.update(ligolw_types.ToNumPyType)
# Annoyingly, numpy has no way to store NaNs in an integer field to indicate
# the equivalent of None. This can be problematic for fields that store ids:
# if an array has an id field with value 0, it isn't clear if this is because
# the id is the first element, or if no id was set. To clear up the ambiguity,
# we define here an integer to indicate 'id not set'.
ID_NOT_SET = -1
EMPTY_OBJECT = None
VIRTUALFIELD_DTYPE = 'VIRTUAL'
def set_default_empty(array):
if array.dtype.names is None:
# scalar dtype, just set
if array.dtype.str[1] == 'i':
# integer, set to ID_NOT_SET
array[:] = ID_NOT_SET
elif array.dtype.str[1] == 'O':
# object, set to EMPTY_OBJECT
array[:] = EMPTY_OBJECT
else:
for name in array.dtype.names:
set_default_empty(array[name])
def default_empty(shape, dtype):
"""Numpy's empty array can have random values in it. To prevent that, we
define here a default emtpy array. This default empty is a numpy.zeros
array, except that objects are set to None, and all ints to ID_NOT_SET.
"""
default = numpy.zeros(shape, dtype=dtype)
set_default_empty(default)
return default
# set default data types
_default_types_status = {
'default_strlen': 50,
'ilwd_as_int': True,
'lstring_as_obj': False
}
def lstring_as_obj(true_or_false=None):
"""Toggles whether lstrings should be treated as strings or as objects.
When FieldArrays is first loaded, the default is True.
Parameters
----------
true_or_false : {None|bool}
Pass True to map lstrings to objects; False otherwise. If None
provided, just returns the current state.
Return
------
current_stat : bool
The current state of lstring_as_obj.
Examples
--------
>>> from pycbc.io import FieldArray
>>> FieldArray.lstring_as_obj()
True
>>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
FieldArray([(0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,),
(0.0,), (0.0,)],
dtype=[('foo', 'O')])
>>> FieldArray.lstring_as_obj(False)
False
>>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')])
FieldArray([('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',),
('0.0',), ('0.0',), ('0.0',), ('0.0',)],
dtype=[('foo', 'S50')])
"""
if true_or_false is not None:
_default_types_status['lstring_as_obj'] = true_or_false
# update the sctypeDict
numpy.sctypeDict[u'lstring'] = numpy.object_ \
if _default_types_status['lstring_as_obj'] \
else 'S%i' % _default_types_status['default_strlen']
return _default_types_status['lstring_as_obj']
def ilwd_as_int(true_or_false=None):
"""Similar to lstring_as_obj, sets whether or not ilwd:chars should be
treated as strings or as ints. Default is True.
"""
if true_or_false is not None:
_default_types_status['ilwd_as_int'] = true_or_false
numpy.sctypeDict[u'ilwd:char'] = int \
if _default_types_status['ilwd_as_int'] \
else 'S%i' % default_strlen
return _default_types_status['ilwd_as_int']
def default_strlen(strlen=None):
"""Sets the default string length for lstring and ilwd:char, if they are
treated as strings. Default is 50.
"""
if strlen is not None:
_default_types_status['default_strlen'] = strlen
# update the sctypeDicts as needed
lstring_as_obj(_default_types_status['lstring_as_obj'])
ilwd_as_int(_default_types_status['ilwd_as_int'])
return _default_types_status['default_strlen']
# set the defaults
lstring_as_obj(True)
ilwd_as_int(True)
#
# =============================================================================
#
# Helper functions
#
# =============================================================================
#
#
# Argument syntax parsing
#
# this parser will pull out sufields as separate identifiers from their parent
# field; e.g., foo.bar --> ['foo', 'bar']
_pyparser = re.compile(r'(?P<identifier>[\w_][\w\d_]*)')
# this parser treats subfields as one identifier with their parent field;
# e.g., foo.bar --> ['foo.bar']
_fieldparser = re.compile(r'(?P<identifier>[\w_][.\w\d_]*)')
def get_vars_from_arg(arg):
"""Given a python string, gets the names of any identifiers use in it.
For example, if ``arg = '3*narf/foo.bar'``, this will return
``set(['narf', 'foo', 'bar'])``.
"""
return set(_pyparser.findall(arg))
def get_fields_from_arg(arg):
"""Given a python string, gets FieldArray field names used in it. This
differs from get_vars_from_arg in that any identifier with a '.' in it
will be treated as one identifier. For example, if
``arg = '3*narf/foo.bar'``, this will return ``set(['narf', 'foo.bar'])``.
"""
return set(_fieldparser.findall(arg))
# this parser looks for fields inside a class method function. This is done by
# looking for variables that start with self.{x} or self["{x}"]; e.g.,
# self.a.b*3 + self.c, self['a.b']*3 + self.c, self.a.b*3 + self["c"], all
# return set('a.b', 'c').
_instfieldparser = re.compile(
r'''self(?:\.|(?:\[['"]))(?P<identifier>[\w_][.\w\d_]*)''')
def get_instance_fields_from_arg(arg):
"""Given a python string definining a method function on an instance of an
FieldArray, returns the field names used in it. This differs from
get_fields_from_arg in that it looks for variables that start with 'self'.
"""
return set(_instfieldparser.findall(arg))
def get_needed_fieldnames(arr, names):
"""Given a FieldArray-like array and a list of names, determines what
fields are needed from the array so that using the names does not result
in an error.
Parameters
----------
arr : instance of a FieldArray or similar
The array from which to determine what fields to get.
names : (list of) strings
A list of the names that are desired. The names may be either a field,
a virtualfield, a property, a method of ``arr``, or any function of
these. If a virtualfield/property or a method, the source code of that
property/method will be analyzed to pull out what fields are used in
it.
Returns
-------
set
The set of the fields needed to evaluate the names.
"""
fieldnames = set([])
# we'll need the class that the array is an instance of to evaluate some
# things
cls = arr.__class__
if isinstance(names, str):
names = [names]
# parse names for variables, incase some of them are functions of fields
parsed_names = set([])
for name in names:
parsed_names.update(get_fields_from_arg(name))
# only include things that are in the array's namespace
names = list(parsed_names & (set(dir(arr)) | set(arr.fieldnames)))
for name in names:
if name in arr.fieldnames:
# is a field, just add the name
fieldnames.update([name])
else:
# the name is either a virtualfield, a method, or some other
# property; we need to evaluate the source code to figure out what
# fields we need
try:
# the underlying functions of properties need to be retrieved
# using their fget attribute
func = getattr(cls, name).fget
except AttributeError:
# no fget attribute, assume is an instance method
func = getattr(arr, name)
# evaluate the source code of the function
try:
sourcecode = inspect.getsource(func)
except TypeError:
# not a function, just pass
continue
# evaluate the source code for the fields
possible_fields = get_instance_fields_from_arg(sourcecode)
# some of the variables returned by possible fields may themselves
# be methods/properties that depend on other fields. For instance,
# mchirp relies on eta and mtotal, which each use mass1 and mass2;
# we therefore need to anayze each of the possible fields
fieldnames.update(get_needed_fieldnames(arr, possible_fields))
return fieldnames
def get_dtype_descr(dtype):
"""Numpy's ``dtype.descr`` will return empty void fields if a dtype has
offsets specified. This function tries to fix that by not including
fields that have no names and are void types.
"""
dts = []
for dt in dtype.descr:
if (dt[0] == '' and dt[1][1] == 'V'):
continue
# Downstream codes (numpy, etc) can't handle metadata in dtype
if isinstance(dt[1], tuple):
dt = (dt[0], dt[1][0])
dts.append(dt)
return dts
def combine_fields(dtypes):
"""Combines the fields in the list of given dtypes into a single dtype.
Parameters
----------
dtypes : (list of) numpy.dtype(s)
Either a numpy.dtype, or a list of numpy.dtypes.
Returns
-------
numpy.dtype
A new dtype combining the fields in the list of dtypes.
"""
if not isinstance(dtypes, list):
dtypes = [dtypes]
# Note: incase any of the dtypes have offsets, we won't include any fields
# that have no names and are void
new_dt = numpy.dtype([dt for dtype in dtypes \
for dt in get_dtype_descr(dtype)])
return new_dt
def _ensure_array_list(arrays):
"""Ensures that every element in a list is an instance of a numpy array."""
# Note: the isinstance test is needed below so that instances of FieldArray
# are not converted to numpy arrays
return [numpy.array(arr, ndmin=1) if not isinstance(arr, numpy.ndarray)
else arr for arr in arrays]
def merge_arrays(merge_list, names=None, flatten=True, outtype=None):
"""Merges the given arrays into a single array. The arrays must all have
the same shape. If one or more of the given arrays has multiple fields,
all of the fields will be included as separate fields in the new array.
Parameters
----------
merge_list : list of arrays
The list of arrays to merge.
names : {None | sequence of strings}
Optional, the names of the fields in the output array. If flatten is
True, must be the same length as the total number of fields in
merge_list. Otherise, must be the same length as the number of
arrays in merge_list. If None provided, and flatten is True, names
used will be the same as the name of the fields in the given arrays.
If the datatype has no name, or flatten is False, the new field will
be `fi` where i is the index of the array in arrays.
flatten : bool
Make all of the fields in the given arrays separate fields in the
new array. Otherwise, each array will be added as a field. If an
array has fields, they will be subfields in the output array. Default
is True.
outtype : {None | class}
Cast the new array to the given type. Default is to return a
numpy structured array.
Returns
-------
new array : {numpy.ndarray | outtype}
A new array with all of the fields in all of the arrays merged into
a single array.
"""
# make sure everything in merge_list is an array
merge_list = _ensure_array_list(merge_list)
if not all(merge_list[0].shape == arr.shape for arr in merge_list):
raise ValueError("all of the arrays in merge_list must have the " +
"same shape")
if flatten:
new_dt = combine_fields([arr.dtype for arr in merge_list])
else:
new_dt = numpy.dtype([('f%i' %ii, arr.dtype.descr) \
for ii,arr in enumerate(merge_list)])
new_arr = merge_list[0].__class__(merge_list[0].shape, dtype=new_dt)
# ii is a counter to keep track of which fields from the new array
# go with which arrays in merge list
ii = 0
for arr in merge_list:
if arr.dtype.names is None:
new_arr[new_dt.names[ii]] = arr
ii += 1
else:
for field in arr.dtype.names:
new_arr[field] = arr[field]
ii += 1
# set the names if desired
if names is not None:
new_arr.dtype.names = names
# ditto the outtype
if outtype is not None:
new_arr = new_arr.view(type=outtype)
return new_arr
def add_fields(input_array, arrays, names=None, assubarray=False):
"""Adds the given array(s) as new field(s) to the given input array.
Returns a new instance of the input_array with the new fields added.
Parameters
----------
input_array : instance of a numpy.ndarray or numpy recarray
The array to to add the fields to.
arrays : (list of) numpy array(s)
The arrays to add. If adding multiple arrays, must be a list;
if adding a single array, can just be that array.
names : (list of) strings
Optional, the name(s) of the new fields in the output array. If
adding multiple fields, must be a list of strings with the same
length as the list of arrays. If None provided, names used will
be the same as the name of the datatype in the given arrays.
If the datatype has no name, the new field will be ``'fi'`` where
i is the index of the array in arrays.
assubarray : bool
Add the list of arrays as a single subarray field. If True, and names
provided, names should be a string or a length-1 sequence. Default is
False, in which case each array will be added as a separate field.
Returns
-------
new_array : new instance of `input_array`
A copy of the `input_array` with the desired fields added.
"""
if not isinstance(arrays, list):
arrays = [arrays]
# ensure that all arrays in arrays are arrays
arrays = _ensure_array_list(arrays)
# set the names
if names is not None:
if isinstance(names, str):
names = [names]
# check if any names are subarray names; if so, we have to add them
# separately
subarray_names = [name for name in names if len(name.split('.')) > 1]
else:
subarray_names = []
if any(subarray_names):
subarrays = [arrays[ii] for ii,name in enumerate(names) \
if name in subarray_names]
# group together by subarray
groups = {}
for name,arr in zip(subarray_names, subarrays):
key = name.split('.')[0]
subkey = '.'.join(name.split('.')[1:])
try:
groups[key].append((subkey, arr))
except KeyError:
groups[key] = [(subkey, arr)]
# now cycle over the groups, adding all of the fields in each group
# as a subarray
for group_name in groups:
# we'll create a dictionary out of the subarray field names ->
# subarrays
thisdict = dict(groups[group_name])
# check if the input array has this field; if so, remove it, then
# add it back with the other new arrays
if group_name in input_array.fieldnames:
# get the data
new_subarray = input_array[group_name]
# add the new fields to the subarray
new_subarray = add_fields(new_subarray, thisdict.values(),
thisdict.keys())
# remove the original from the input array
input_array = input_array.without_fields(group_name)
else:
new_subarray = thisdict.values()
# add the new subarray to input_array as a subarray
input_array = add_fields(input_array, new_subarray,
names=group_name, assubarray=True)
# set the subarray names
input_array[group_name].dtype.names = thisdict.keys()
# remove the subarray names from names
keep_idx = [ii for ii,name in enumerate(names) \
if name not in subarray_names]
names = [names[ii] for ii in keep_idx]
# if there's nothing left, just return
if names == []:
return input_array
# also remove the subarray arrays
arrays = [arrays[ii] for ii in keep_idx]
if assubarray:
# merge all of the arrays into a single array
if len(arrays) > 1:
arrays = [merge_arrays(arrays, flatten=True)]
# now merge all the fields as a single subarray
merged_arr = numpy.empty(len(arrays[0]),
dtype=[('f0', arrays[0].dtype.descr)])
merged_arr['f0'] = arrays[0]
arrays = [merged_arr]
merge_list = [input_array] + arrays
if names is not None:
names = list(input_array.dtype.names) + names
# merge into a single array
return merge_arrays(merge_list, names=names, flatten=True,
outtype=type(input_array))
#
# =============================================================================
#
# Base FieldArray definitions
#
# =============================================================================
#
# We'll include functions in various pycbc modules in FieldArray's function
# library. All modules used must have an __all__ list defined.
_modules_for_functionlib = [conversions, coordinates, cosmology,
population_models]
_fieldarray_functionlib = {_funcname : getattr(_mod, _funcname)
for _mod in _modules_for_functionlib
for _funcname in getattr(_mod, '__all__')}
class FieldArray(numpy.recarray):
"""
Subclass of numpy.recarray that adds additional functionality.
Initialization is done the same way as numpy.recarray, with the addition
that a "name" attribute can be passed to name the output array. When you
initialize an array it creates a new zeroed array. This is similar to
numpy.recarray, except that ``numpy.recarray(shape)`` will create an empty
array, whereas here the default is to zero all of the elements (see
``default_zero`` for definition of zero for different data types). If you
prefer an empty array, set ``zero=False`` when initializing.
You cannot pass an array or sequence as input as you do with numpy.array.
To initialize an FieldArray from an already existing arrays, use the
``FieldArray.from_arrays`` class method. To initialize from a list of
tuples, use ``FieldArray.from_records``. See the docstring for those methods
for details. For more information on initalizing an empty array, see
``numpy.recarray`` help.
Some additional features:
* **Arbitrary functions**:
You can retrive functions on fields in the same manner that you access
individual fields. For example, if you have a FieldArray ``x`` with
fields ``a`` and ``b``, you can access each field with
``x['a'], x['b']``. You can also do ``x['a*b/(a+b)**2.']``,
``x[cos(a)*sin(b)]``, etc. Boolean operations are also possible, e.g.,
``x['(a < 3) & (b < 2)']``. Syntax for functions is python. Any numpy
ufunc, as well as all functions listed in the functionlib attribute, may
be used. Note that while fields may be accessed as attributes (e.g,
field ``a`` can be accessed via ``x['a']`` or ``x.a``), functions on
multiple fields may not (``x.a+b`` does not work, for obvious reasons).
* **Subfields and '.' indexing**:
Structured arrays, which are the base class for recarrays and, by
inheritance, FieldArray, allows for fields to themselves have fields. For
example, an array ``x`` may have fields ``a`` and ``b``, with ``b`` having
subfields ``c`` and ``d``. You can access subfields using other index
notation or attribute notation. So, the subfields ``d`` may be retrieved
via ``x['b']['d']``, ``x.b.d``, ``x['b'].d`` or ``x['b.d']``. Likewise,
functions can be carried out on the subfields, as they can on fields. If
``d`` is a float field, we could get the log of it via ``x['log(b.d)']``.
There is no limit to the number of subfields. So, ``c`` could also have
subfield ``c0``, which would be accessed via ``x.c.c0``, or any of the
other methods.
.. warning::
Record arrays also allow you to set values of a field using attribute
notation. However, this can lead to unexpected results if you
accidently misspell the attribute. For example, if ``x`` has field
``foo``, and you misspell this when setting, e.g., you try to do
``x.fooo = numpy.arange(x.size)``, ``foo`` will not be set, nor will
you get an error. Instead, the attribute ``fooo`` will be added to
``x``. If you tried to do this using index notation, however ---
``x['fooo'] = numpy.arange(x.size)`` --- you will
get an ``AttributeError`` as you might expect. For this reason, it is
recommended that you always use index notation when *setting* values;
you can use either index or attribute notation when *retrieving*
values.
* **Properties and methods as fields**:
If a propety or instance method is defined for a class that inherits from
FieldArray, those can be accessed in the same way as fields are. For
example, define ``Foo`` as:
.. code-block:: python
class Foo(FieldArray):
@property
def bar(self):
return self['a']**2.
def narf(self, y):
return self['a'] + y
Then if we have an instance: ``foo = Foo(100, dtype=[('a', float)])``.
The ``bar`` and ``narf`` attributes may be accessed via field notation:
``foo.bar``, ``foo['bar']``, ``foo.narf(10)`` and ``foo['narf(10)']``.
* **Virtual fields**:
Virtual fields are methods wrapped as properties that operate on one or
more fields, thus returning an array of values. To outside code virtual
fields look the same as fields, and can be called similarily. Internally,
no additional data is stored; the operation is performed on the fly when
the virtual field is called. Virtual fields can be added to an array
instance with the add_virtualfields method. Alternatively, virtual fields
can be defined by sub-classing FieldArray:
.. code-block:: python
class Foo(FieldArray):
_virtualfields = ['bar']
@property
def bar(self):
return self['a']**2.
The fields property returns the names of both fields and virtual fields.
.. note::
It can happen that a field, virtual field, or function in the
functionlib have that same name. In that case, precedence is: field,
virtual field, function. For example, if a function called 'foo' is in
the function library, and a virtual field is added call 'foo', then
`a['foo']` will return the virtual field rather than the function.
Likewise, if the array is initialized with a field called `foo`, or a
field with that name is added, `a['foo']` will return that field
rather than the virtual field and/or the function.
Parameters
----------
shape : {int | tuple}
The shape of the new array.
name : {None | str}
Optional, what to name the new array. The array's ``name`` attribute
is set to this.
For details on other keyword arguments, see ``numpy.recarray`` help.
Attributes
----------
name : str
Instance attribute. The name of the array.
Examples
--------
.. note:: For some predefined arrays with default fields, see the other
array classes defined below.
Create an empty array with four rows and two fields called `foo` and
`bar`, both of which are floats:
>>> x = FieldArray(4, dtype=[('foo', float), ('bar', float)])
Set/retrieve a fields using index or attribute syntax:
>>> x['foo'] = [1.,2.,3.,4.]
>>> x['bar'] = [5.,6.,7.,8.]
>>> x
FieldArray([(1.0, 5.0), (2.0, 6.0), (3.0, 7.0), (4.0, 8.0)],
dtype=[('foo', '<f8'), ('bar', '<f8')])
>>> x.foo
array([ 1., 2., 3., 4.])
>>> x['bar']
array([ 5., 6., 7., 8.])
Get the names of the fields:
>>> x.fieldnames
('foo', 'bar')
Rename the fields to `a` and `b`:
>>> x.dtype.names = ['a', 'b']
>>> x.fieldnames
('a', 'b')
Retrieve a function of the fields as if it were a field:
>>> x['sin(a/b)']
array([ 0.19866933, 0.3271947 , 0.41557185, 0.47942554])
Add a virtual field:
>>> def c(self):
... return self['a'] + self['b']
...
>>> x = x.add_virtualfields('c', c)
>>> x.fields
('a', 'b', 'c')
>>> x['c']
array([ 6., 8., 10., 12.])
Create an array with subfields:
>>> x = FieldArray(4, dtype=[('foo', [('cat', float), ('hat', int)]), ('bar', float)])
>>> x.fieldnames
['foo.cat', 'foo.hat', 'bar']
Load from a list of arrays (in this case, from an hdf5 file):
>>> bankhdf = h5py.File('bank/H1L1-BANK2HDF-1117400416-928800.hdf')
>>> bankhdf.keys()
[u'mass1', u'mass2', u'spin1z', u'spin2z', u'template_hash']
>>> templates = FieldArray.from_arrays(bankhdf.values(), names=bankhdf.keys())
>>> templates.fieldnames
('mass1', 'mass2', 'spin1z', 'spin2z', 'template_hash')
>>> templates.mass1
array([ 1.71731389, 1.10231435, 2.99999857, ..., 1.67488706,
1.00531888, 2.11106491], dtype=float32)
Sort by a field without having to worry about also sorting the other
fields:
>>> templates[['mass1', 'mass2']]
array([(1.7173138856887817, 1.2124452590942383),
(1.1023143529891968, 1.0074082612991333),
(2.9999985694885254, 1.0578444004058838), ...,
(1.6748870611190796, 1.1758257150650024),
(1.0053188800811768, 1.0020891427993774),
(2.111064910888672, 1.0143394470214844)],
dtype=[('mass1', '<f4'), ('mass2', '<f4')])
>>> templates.sort(order='mass1')
>>> templates[['mass1', 'mass2']]
array([(1.000025987625122, 1.0000133514404297),
(1.0002814531326294, 1.0002814531326294),
(1.0005437135696411, 1.0005437135696411), ...,
(2.999999523162842, 1.371169090270996),
(2.999999523162842, 1.4072519540786743), (3.0, 1.4617927074432373)],
dtype=[('mass1', '<f4'), ('mass2', '<f4')])
Convert a LIGOLW xml table:
>>> type(sim_table)
ligo.lw.lsctables.SimInspiralTable
>>> sim_array = FieldArray.from_ligolw_table(sim_table)
>>> sim_array.mass1
array([ 2.27440691, 1.85058105, 1.61507106, ..., 2.0504961 ,
2.33554196, 2.02732205], dtype=float32)
>>> sim_array.waveform
array([u'SpinTaylorT2', u'SpinTaylorT2', u'SpinTaylorT2', ...,
u'SpinTaylorT2', u'SpinTaylorT2', u'SpinTaylorT2'], dtype=object)
>>> sim_array = FieldArray.from_ligolw_table(sim_table, columns=['simulation_id', 'mass1', 'mass2'])
>>> sim_array
FieldArray([(0, 2.274406909942627, 2.6340370178222656),
(1, 1.8505810499191284, 2.8336880207061768),
(2, 1.6150710582733154, 2.2336490154266357), ...,
(11607, 2.0504961013793945, 2.6019821166992188),
(11608, 2.3355419635772705, 1.2164380550384521),
(11609, 2.0273220539093018, 2.2453839778900146)],
dtype=[('simulation_id', '<i8'), ('mass1', '<f4'), ('mass2', '<f4')])
Add a field to the array:
>>> optimal_snrs = numpy.random.uniform(4.,40., size=len(sim_array))
>>> sim_array = sim_array.add_fields(optimal_snrs, 'optimal_snrs')
>>> sim_array.fieldnames
('simulation_id', 'mass1', 'mass2', 'optimal_snrs')
Notes
-----
Input arrays with variable-length strings in one or more fields can be
tricky to deal with. Numpy arrays are designed to use fixed-length
datasets, so that quick memory access can be achieved. To deal with
variable-length strings, there are two options: 1. set the data type to
object, or 2. set the data type to a string with a fixed length larger
than the longest string in the input array.
The first option, using objects, essentially causes the array to store a
pointer to the string. This is the most flexible option, as it allows
strings in the array to be updated to any length. However, operations on
object fields are slower, as numpy cannot take advantage of its fast
memory striding abilities (see `this question/answer on stackoverflow
<http://stackoverflow.com/a/14639568/1366472>`_ for details). Also,
numpy's support of object arrays is more limited. In particular, prior
to version 1.9.2, you cannot create a view of an array that changes the
dtype if the array has any fields that are objects, even if the view does
not affect the object fields. (This has since been relaxed.)
The second option, using strings of a fixed length, solves the issues
with object fields. However, if you try to change one of the strings
after the array is created, the string will be truncated at whatever
string length is used. Additionally, if you choose too large of a string
length, you can substantially increase the memory overhead for large
arrays.
"""
_virtualfields = []
_functionlib = _fieldarray_functionlib
__persistent_attributes__ = ['name', '_virtualfields', '_functionlib']
def __new__(cls, shape, name=None, zero=True, **kwargs):
"""Initializes a new empty array.
"""
obj = super(FieldArray, cls).__new__(cls, shape, **kwargs).view(
type=cls)
obj.name = name
obj.__persistent_attributes__ = [a
for a in cls.__persistent_attributes__]
obj._functionlib = {f: func for f,func in cls._functionlib.items()}
obj._virtualfields = [f for f in cls._virtualfields]
# zero out the array if desired
if zero:
default = default_empty(1, dtype=obj.dtype)
obj[:] = default
return obj
def __array_finalize__(self, obj):
"""Default values are set here.
See <https://docs.scipy.org/doc/numpy/user/basics.subclassing.html> for
details.
"""
if obj is None:
return
# copy persistent attributes
try:
obj.__copy_attributes__(self)
except AttributeError:
pass
def __copy_attributes__(self, other, default=None):
"""Copies the values of all of the attributes listed in
`self.__persistent_attributes__` to other.
"""
[setattr(other, attr, copy.deepcopy(getattr(self, attr, default))) \
for attr in self.__persistent_attributes__]
def __getattribute__(self, attr, no_fallback=False):
"""Allows fields to be accessed as attributes.
"""
# first try to get the attribute
try:
return numpy.ndarray.__getattribute__(self, attr)
except AttributeError as e:
# don't try getitem, which might get back here
if no_fallback:
raise(e)
# might be a field, try to retrive it using getitem
if attr in self.fields:
return self.__getitem__(attr)
# otherwise, unrecognized
raise AttributeError(e)
def __setitem__(self, item, values):
"""Wrap's recarray's setitem to allow attribute-like indexing when
setting values.
"""
if type(item) is int and type(values) is numpy.ndarray:
# numpy >=1.14 only accepts tuples
values = tuple(values)
try:
return super(FieldArray, self).__setitem__(item, values)
except ValueError:
# we'll get a ValueError if a subarray is being referenced using
# '.'; so we'll try to parse it out here
fields = item.split('.')
if len(fields) > 1:
for field in fields[:-1]:
self = self[field]
item = fields[-1]
# now try again
return super(FieldArray, self).__setitem__(item, values)
def __getbaseitem__(self, item):
"""Gets an item assuming item is either an index or a fieldname.
"""
# We cast to a ndarray to avoid calling array_finalize, which can be
# slow
out = self.view(numpy.ndarray)[item]
# if there are no fields, then we can just return
if out.dtype.fields is None:
return out
# if there are fields, but only a single entry, we'd just get a
# record by casting to self, so just cast immediately to recarray
elif out.ndim == 0:
return out.view(numpy.recarray)
# otherwise, cast back to an instance of self
else:
return out.view(type(self))
def __getsubitem__(self, item):
"""Gets a subfield using `field.subfield` notation.
"""
try:
return self.__getbaseitem__(item)
except ValueError as err:
subitems = item.split('.')
if len(subitems) > 1:
return self.__getbaseitem__(subitems[0]
).__getsubitem__('.'.join(subitems[1:]))
else:
raise ValueError(err)
def __getitem__(self, item):
"""Wraps recarray's `__getitem__` so that math functions on fields and
attributes can be retrieved. Any function in numpy's library may be
used.
"""
try:
return self.__getsubitem__(item)
except ValueError:
#
# arg isn't a simple argument of row, so we'll have to eval it
#
if not hasattr(self, '_code_cache'):
self._code_cache = {}
if item not in self._code_cache:
code = compile(item, '<string>', 'eval')
# get the function library
item_dict = dict(_numpy_function_lib.items())
item_dict.update(self._functionlib)
# parse to get possible fields
itemvars_raw = get_fields_from_arg(item)
itemvars = []
for it in itemvars_raw:
try:
float(it)
is_num = True
except ValueError:
is_num = False
if not is_num:
itemvars.append(it)
self._code_cache[item] = (code, itemvars, item_dict)
code, itemvars, item_dict = self._code_cache[item]
added = {}
for it in itemvars:
if it in self.fieldnames:
# pull out the fields: note, by getting the parent fields
# we also get the sub fields name
added[it] = self.__getbaseitem__(it)
elif (it in self.__dict__) or (it in self._virtualfields):
# pull out any needed attributes
added[it] = self.__getattribute__(it, no_fallback=True)
else:
# add any aliases
aliases = self.aliases
if it in aliases:
added[it] = self.__getbaseitem__(aliases[it])
if item_dict is not None:
item_dict.update(added)
ans = eval(code, {"__builtins__": None}, item_dict)
for k in added:
item_dict.pop(k)
return ans
def __contains__(self, field):
"""Returns True if the given field name is in self's fields."""
return field in self.fields
def sort(self, axis=-1, kind='quicksort', order=None):
"""Sort an array, in-place.
This function extends the standard numpy record array in-place sort
to allow the basic use of Field array virtual fields. Only a single
field is currently supported when referencing a virtual field.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
"""
try:
numpy.recarray.sort(self, axis=axis, kind=kind, order=order)
except ValueError:
if isinstance(order, list):
raise ValueError("Cannot process more than one order field")
self[:] = self[numpy.argsort(self[order])]
def addattr(self, attrname, value=None, persistent=True):
"""Adds an attribute to self. If persistent is True, the attribute will
be made a persistent attribute. Persistent attributes are copied
whenever a view or copy of this array is created. Otherwise, new views
or copies of this will not have the attribute.
"""
setattr(self, attrname, value)
# add as persistent
if persistent and attrname not in self.__persistent_attributes__:
self.__persistent_attributes__.append(attrname)
def add_methods(self, names, methods):
"""Adds the given method(s) as instance method(s) of self. The
method(s) must take `self` as a first argument.
"""
if isinstance(names, str):
names = [names]
methods = [methods]
for name,method in zip(names, methods):
setattr(self, name, types.MethodType(method, self))
def add_properties(self, names, methods):
"""Returns a view of self with the given methods added as properties.
From: <http://stackoverflow.com/a/2954373/1366472>.
"""
cls = type(self)
cls = type(cls.__name__, (cls,), dict(cls.__dict__))
if isinstance(names, str):
names = [names]
methods = [methods]
for name,method in zip(names, methods):
setattr(cls, name, property(method))
return self.view(type=cls)
def add_virtualfields(self, names, methods):
"""Returns a view of this array with the given methods added as virtual
fields. Specifically, the given methods are added using add_properties
and their names are added to the list of virtual fields. Virtual fields
are properties that are assumed to operate on one or more of self's
fields, thus returning an array of values.
"""
if isinstance(names, str):
names = [names]
methods = [methods]
out = self.add_properties(names, methods)
if out._virtualfields is None:
out._virtualfields = []
out._virtualfields.extend(names)
return out
def add_functions(self, names, functions):
"""Adds the given functions to the function library.
Functions are added to this instance of the array; all copies of
and slices of this array will also have the new functions included.
Parameters
----------
names : (list of) string(s)
Name or list of names of the functions.
functions : (list of) function(s)
The function(s) to call.
"""
if isinstance(names, str):
names = [names]
functions = [functions]
if len(functions) != len(names):
raise ValueError("number of provided names must be same as number "
"of functions")
self._functionlib.update(dict(zip(names, functions)))
def del_functions(self, names):
"""Removes the specified function names from the function library.
Functions are removed from this instance of the array; all copies
and slices of this array will also have the functions removed.
Parameters
----------
names : (list of) string(s)
Name or list of names of the functions to remove.
"""
if isinstance(names, str):
names = [names]
for name in names:
self._functionlib.pop(name)
@classmethod
def from_arrays(cls, arrays, name=None, **kwargs):
"""Creates a new instance of self from the given (list of) array(s).
This is done by calling numpy.rec.fromarrays on the given arrays with
the given kwargs. The type of the returned array is cast to this
class, and the name (if provided) is set.
Parameters
----------
arrays : (list of) numpy array(s)
A list of the arrays to create the FieldArray from.
name : {None|str}
What the output array should be named.
For other keyword parameters, see the numpy.rec.fromarrays help.
Returns
-------
array : instance of this class
An array that is an instance of this class in which the field
data is from the given array(s).
"""
obj = numpy.rec.fromarrays(arrays, **kwargs).view(type=cls)
obj.name = name
return obj
@classmethod
def from_records(cls, records, name=None, **kwargs):
"""Creates a new instance of self from the given (list of) record(s).
A "record" is a tuple in which each element is the value of one field
in the resulting record array. This is done by calling
`numpy.rec.fromrecords` on the given records with the given kwargs.
The type of the returned array is cast to this class, and the name
(if provided) is set.
Parameters
----------
records : (list of) tuple(s)
A list of the tuples to create the FieldArray from.
name : {None|str}
What the output array should be named.
Other Parameters
----------------
For other keyword parameters, see the `numpy.rec.fromrecords` help.
Returns
-------
array : instance of this class
An array that is an instance of this class in which the field
data is from the given record(s).
"""
obj = numpy.rec.fromrecords(records, **kwargs).view(
type=cls)
obj.name = name
return obj
@classmethod
def from_kwargs(cls, **kwargs):
"""Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
"""
arrays = []
names = []
for p,vals in kwargs.items():
if not isinstance(vals, numpy.ndarray):
if not isinstance(vals, list):
vals = [vals]
vals = numpy.array(vals)
arrays.append(vals)
names.append(p)
return cls.from_arrays(arrays, names=names)
@classmethod
def from_ligolw_table(cls, table, columns=None, cast_to_dtypes=None):
"""Converts the given ligolw table into an FieldArray. The `tableName`
attribute is copied to the array's `name`.
Parameters
----------
table : LIGOLw table instance
The table to convert.
columns : {None|list}
Optionally specify a list of columns to retrieve. All of the
columns must be in the table's validcolumns attribute. If None
provided, all the columns in the table will be converted.
dtype : {None | dict}
Override the columns' dtypes using the given dictionary. The
dictionary should be keyed by the column names, with the values
a tuple that can be understood by numpy.dtype. For example, to
cast a ligolw column called "foo" to a field called "bar" with
type float, cast_to_dtypes would be: ``{"foo": ("bar", float)}``.
Returns
-------
array : FieldArray
The input table as an FieldArray.
"""
name = table.tableName.split(':')[0]
if columns is None:
# get all the columns
columns = table.validcolumns
else:
# note: this will raise a KeyError if one or more columns is
# not in the table's validcolumns
new_columns = {}
for col in columns:
new_columns[col] = table.validcolumns[col]
columns = new_columns
if cast_to_dtypes is not None:
dtype = [cast_to_dtypes[col] for col in columns]
else:
dtype = list(columns.items())
# get the values
if _default_types_status['ilwd_as_int']:
# columns like `process:process_id` have corresponding attributes
# with names that are only the part after the colon, so we split
input_array = \
[tuple(getattr(row, col.split(':')[-1]) if dt != 'ilwd:char'
else int(getattr(row, col))
for col,dt in columns.items())
for row in table]
else:
input_array = \
[tuple(getattr(row, col) for col in columns) for row in table]
# return the values as an instance of cls
return cls.from_records(input_array, dtype=dtype,
name=name)
def to_array(self, fields=None, axis=0):
"""Returns an `numpy.ndarray` of self in which the fields are included
as an extra dimension.
Parameters
----------
fields : {None, (list of) strings}
The fields to get. All of the fields must have the same datatype.
If None, will try to return all of the fields.
axis : {0, int}
Which dimension to put the fields in in the returned array. For
example, if `self` has shape `(l,m,n)` and `k` fields, the
returned array will have shape `(k,l,m,n)` if `axis=0`, `(l,k,m,n)`
if `axis=1`, etc. Setting `axis=-1` will put the fields in the
last dimension. Default is 0.
Returns
-------
numpy.ndarray
The desired fields as a numpy array.
"""
if fields is None:
fields = self.fieldnames
if isinstance(fields, str):
fields = [fields]
return numpy.stack([self[f] for f in fields], axis=axis)
@property
def fieldnames(self):
"""Returns a tuple listing the field names in self. Equivalent to
`array.dtype.names`, where `array` is self.
"""
return self.dtype.names
@property
def virtualfields(self):
"""Returns a tuple listing the names of virtual fields in self.
"""
if self._virtualfields is None:
vfs = tuple()
else:
vfs = tuple(self._virtualfields)
return vfs
@property
def functionlib(self):
"""Returns the library of functions that are available when calling
items.
"""
return self._functionlib
@property
def fields(self):
"""Returns a tuple listing the names of fields and virtual fields in
self."""
return tuple(list(self.fieldnames) + list(self.virtualfields))
@property
def aliases(self):
"""Returns a dictionary of the aliases, or "titles", of the field names
in self. An alias can be specified by passing a tuple in the name
part of the dtype. For example, if an array is created with
``dtype=[(('foo', 'bar'), float)]``, the array will have a field
called `bar` that has alias `foo` that can be accessed using
either `arr['foo']` or `arr['bar']`. Note that the first string
in the dtype is the alias, the second the name. This function returns
a dictionary in which the aliases are the keys and the names are the
values. Only fields that have aliases are returned.
"""
return dict(c[0] for c in self.dtype.descr if isinstance(c[0], tuple))
def add_fields(self, arrays, names=None, assubarray=False):
"""
Adds the given arrays as new fields to self. Returns a new instance
with the new fields added. Note: this array does not change; the
returned array is a new copy.
Parameters
----------
arrays : (list of) numpy array(s)
The arrays to add. If adding multiple arrays, must be a list;
if adding a single array, can just be that array.
names : (list of) strings
Optional, the name(s) of the new fields in the output array. If
adding multiple fields, must be a list of strings with the same
length as the list of arrays. If None provided, names used will
be the same as the name of the datatype in the given arrays.
If the datatype has no name, the new field will be ``'fi'`` where
i is the index of the array in arrays.
assubarray : bool
Add the list of arrays as a single subarray field. If True, and
names provided, names should be a string or a length-1 sequence.
Default is False, in which case each array will be added as a
separate field.
Returns
-------
new_array : new instance of this array
A copy of this array with the desired fields added.
"""
newself = add_fields(self, arrays, names=names, assubarray=assubarray)
self.__copy_attributes__(newself)
return newself
def parse_boolargs(self, args):
"""Returns an array populated by given values, with the indices of
those values dependent on given boolen tests on self.
The given `args` should be a list of tuples, with the first element the
return value and the second argument a string that evaluates to either
True or False for each element in self.
Each boolean argument is evaluated on elements for which every prior
boolean argument was False. For example, if array `foo` has a field
`bar`, and `args = [(1, 'bar < 10'), (2, 'bar < 20'), (3, 'bar < 30')]`,
then the returned array will have `1`s at the indices for
which `foo.bar < 10`, `2`s where `foo.bar < 20 and not foo.bar < 10`,
and `3`s where `foo.bar < 30 and not (foo.bar < 10 or foo.bar < 20)`.
The last argument in the list may have "else", an empty string, None,
or simply list a return value. In any of these cases, any element not
yet populated will be assigned the last return value.
Parameters
----------
args : {(list of) tuples, value}
One or more return values and boolean argument determining where
they should go.
Returns
-------
return_values : array
An array with length equal to self, with values populated with the
return values.
leftover_indices : array
An array of indices that evaluated to False for all arguments.
These indices will not have been popluated with any value,
defaulting to whatever numpy uses for a zero for the return
values' dtype. If there are no leftovers, an empty array is
returned.
Examples
--------
Given the following array:
>>> arr = FieldArray(5, dtype=[('mtotal', float)])
>>> arr['mtotal'] = numpy.array([3., 5., 2., 1., 4.])
Return `"TaylorF2"` for all elements with `mtotal < 4` (note that the
elements 1 and 4 are leftover):
>>> arr.parse_boolargs(('TaylorF2', 'mtotal<4'))
(array(['TaylorF2', '', 'TaylorF2', 'TaylorF2', ''],
dtype='|S8'),
array([1, 4]))
Return `"TaylorF2"` for all elements with `mtotal < 4`,
`"SEOBNR_ROM_DoubleSpin"` otherwise:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', 'else')])
(array(['TaylorF2', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2',
'SEOBNRv2_ROM_DoubleSpin'],
dtype='|S23'),
array([], dtype=int64))
The following will also return the same:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin',)])
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', '')])
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin'])
Return `"TaylorF2"` for all elements with `mtotal < 3`, `"IMRPhenomD"`
for all elements with `3 <= mtotal < 4`, `"SEOBNRv2_ROM_DoubleSpin"`
otherwise:
>>> arr.parse_boolargs([('TaylorF2', 'mtotal<3'), ('IMRPhenomD', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin'])
(array(['IMRPhenomD', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2',
'SEOBNRv2_ROM_DoubleSpin'],
dtype='|S23'),
array([], dtype=int64))
Just return `"TaylorF2"` for all elements:
>>> arr.parse_boolargs('TaylorF2')
(array(['TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2'],
dtype='|S8'),
array([], dtype=int64))
"""
if not isinstance(args, list):
args = [args]
# format the arguments
return_vals = []
bool_args = []
for arg in args:
if not isinstance(arg, tuple):
return_val = arg
bool_arg = None
elif len(arg) == 1:
return_val = arg[0]
bool_arg = None
elif len(arg) == 2:
return_val, bool_arg = arg
else:
raise ValueError("argument not formatted correctly")
return_vals.append(return_val)
bool_args.append(bool_arg)
# get the output dtype
outdtype = numpy.array(return_vals).dtype
out = numpy.zeros(self.size, dtype=outdtype)
mask = numpy.zeros(self.size, dtype=bool)
leftovers = numpy.ones(self.size, dtype=bool)
for ii,(boolarg,val) in enumerate(zip(bool_args, return_vals)):
if boolarg is None or boolarg == '' or boolarg.lower() == 'else':
if ii+1 != len(bool_args):
raise ValueError("only the last item may not provide "
"any boolean arguments")
mask = leftovers
else:
mask = leftovers & self[boolarg]
out[mask] = val
leftovers &= ~mask
return out, numpy.where(leftovers)[0]
def append(self, other):
"""Appends another array to this array.
The returned array will have all of the class methods and virutal
fields of this array, including any that were added using `add_method`
or `add_virtualfield`. If this array and other array have one or more
string fields, the dtype for those fields are updated to a string
length that can encompass the longest string in both arrays.
.. note::
Increasing the length of strings only works for fields, not
sub-fields.
Parameters
----------
other : array
The array to append values from. It must have the same fields and
dtype as this array, modulo the length of strings. If the other
array does not have the same dtype, a TypeError is raised.
Returns
-------
array
An array with others values appended to this array's values. The
returned array is an instance of the same class as this array,
including all methods and virtual fields.
"""
try:
return numpy.append(self, other).view(type=self.__class__)
except TypeError:
# see if the dtype error was due to string fields having different
# lengths; if so, we'll make the joint field the larger of the
# two
str_fields = [name for name in self.fieldnames
if _isstring(self.dtype[name])]
# get the larger of the two
new_strlens = dict(
[[name,
max(self.dtype[name].itemsize, other.dtype[name].itemsize)]
for name in str_fields]
)
# cast both to the new string lengths
new_dt = []
for dt in self.dtype.descr:
name = dt[0]
if name in new_strlens:
dt = (name, self.dtype[name].type, new_strlens[name])
new_dt.append(dt)
new_dt = numpy.dtype(new_dt)
return numpy.append(
self.astype(new_dt),
other.astype(new_dt)
).view(type=self.__class__)
@classmethod
def parse_parameters(cls, parameters, possible_fields):
"""Parses a list of parameters to get the list of fields needed in
order to evaluate those parameters.
Parameters
----------
parameters : (list of) string(s)
The list of desired parameters. These can be (functions of) fields
or virtual fields.
possible_fields : (list of) string(s)
The list of possible fields.
Returns
-------
list :
The list of names of the fields that are needed in order to
evaluate the given parameters.
"""
if isinstance(possible_fields, str):
possible_fields = [possible_fields]
possible_fields = list(map(str, possible_fields))
# we'll just use float as the dtype, as we just need this for names
arr = cls(1, dtype=list(zip(possible_fields,
len(possible_fields)*[float])))
# try to perserve order
return list(get_needed_fieldnames(arr, parameters))
def _isstring(dtype):
"""Given a numpy dtype, determines whether it is a string. Returns True
if the dtype is string or unicode.
"""
return dtype.type == numpy.unicode_ or dtype.type == numpy.string_
def aliases_from_fields(fields):
"""Given a dictionary of fields, will return a dictionary mapping the
aliases to the names.
"""
return dict(c for c in fields if isinstance(c, tuple))
def fields_from_names(fields, names=None):
"""Given a dictionary of fields and a list of names, will return a
dictionary consisting of the fields specified by names. Names can be
either the names of fields, or their aliases.
"""
if names is None:
return fields
if isinstance(names, str):
names = [names]
aliases_to_names = aliases_from_fields(fields)
names_to_aliases = dict(zip(aliases_to_names.values(),
aliases_to_names.keys()))
outfields = {}
for name in names:
try:
outfields[name] = fields[name]
except KeyError:
if name in aliases_to_names:
key = (name, aliases_to_names[name])
elif name in names_to_aliases:
key = (names_to_aliases[name], name)
else:
raise KeyError('default fields has no field %s' % name)
outfields[key] = fields[key]
return outfields
#
# =============================================================================
#
# FieldArray with default fields
#
# =============================================================================
#
class _FieldArrayWithDefaults(FieldArray):
"""
Subclasses FieldArray, adding class attribute ``_staticfields``, and
class method ``default_fields``. The ``_staticfields`` should be a
dictionary that defines some field names and corresponding dtype. The
``default_fields`` method returns a dictionary of the static fields
and any default virtualfields that were added. A field array can then
be initialized in one of 3 ways:
1. With just a shape. In this case, the returned array will have all
of the default fields.
2. With a shape and a list of names, given by the ``names`` keyword
argument. The names may be default fields, virtual fields, a method or
property of the class, or any python function of these things. If a
virtual field, method, or property is in the names, the needed underlying
fields will be included in the return array. For example, if the class
has a virtual field called 'mchirp', which is a function of fields called
'mass1' and 'mass2', then 'mchirp' or any function of 'mchirp' may be
included in the list of names (e.g., names=['mchirp**(5/6)']). If so, the
returned array will have fields 'mass1' and 'mass2' even if these were
not specified in names, so that 'mchirp' may be used without error.
names must be names of either default fields or virtualfields, else a
KeyError is raised.
3. With a shape and a dtype. Any field specified by the dtype will be
used. The fields need not be in the list of default fields, and/or the
dtype can be different than that specified by the default fields.
If additional fields are desired beyond the default fields, these can
be specified using the ``additional_fields`` keyword argument; these should
be provided in the same way as ``dtype``; i.e, as a list of (name, dtype)
tuples.
This class does not define any static fields, and ``default_fields`` just
returns an empty dictionary. This class is mostly meant to be subclassed
by other classes, so they can add their own defaults.
"""
_staticfields = {}
@classmethod
def default_fields(cls, include_virtual=True, **kwargs):
"""The default fields and their dtypes. By default, this returns
whatever the class's ``_staticfields`` and ``_virtualfields`` is set
to as a dictionary of fieldname, dtype (the dtype of virtualfields is
given by VIRTUALFIELD_DTYPE). This function should be overridden by
subclasses to add dynamic fields; i.e., fields that require some input
parameters at initialization. Keyword arguments can be passed to this
to set such dynamic fields.
"""
output = cls._staticfields.copy()
if include_virtual:
output.update({name: VIRTUALFIELD_DTYPE
for name in cls._virtualfields})
return output
def __new__(cls, shape, name=None, additional_fields=None,
field_kwargs=None, **kwargs):
"""The ``additional_fields`` should be specified in the same way as
``dtype`` is normally given to FieldArray. The ``field_kwargs`` are
passed to the class's default_fields method as keyword arguments.
"""
if field_kwargs is None:
field_kwargs = {}
if 'names' in kwargs and 'dtype' in kwargs:
raise ValueError("Please provide names or dtype, not both")
default_fields = cls.default_fields(include_virtual=False,
**field_kwargs)
if 'names' in kwargs:
names = kwargs.pop('names')
if isinstance(names, str):
names = [names]
# evaluate the names to figure out what base fields are needed
# to do this, we'll create a small default instance of self (since
# no names are specified in the following initialization, this
# block of code is skipped)
arr = cls(1, field_kwargs=field_kwargs)
# try to perserve order
sortdict = dict([[nm, ii] for ii,nm in enumerate(names)])
names = list(get_needed_fieldnames(arr, names))
names.sort(key=lambda x: sortdict[x] if x in sortdict
else len(names))
# add the fields as the dtype argument for initializing
kwargs['dtype'] = [(fld, default_fields[fld]) for fld in names]
if 'dtype' not in kwargs:
kwargs['dtype'] = list(default_fields.items())
# add the additional fields
if additional_fields is not None:
if not isinstance(additional_fields, list):
additional_fields = [additional_fields]
if not isinstance(kwargs['dtype'], list):
kwargs['dtype'] = [kwargs['dtype']]
kwargs['dtype'] += additional_fields
return super(_FieldArrayWithDefaults, cls).__new__(cls, shape,
name=name, **kwargs)
def add_default_fields(self, names, **kwargs):
"""
Adds one or more empty default fields to self.
Parameters
----------
names : (list of) string(s)
The names of the fields to add. Must be a field in self's default
fields.
Other keyword args are any arguments passed to self's default fields.
Returns
-------
new array : instance of this array
A copy of this array with the field added.
"""
if isinstance(names, str):
names = [names]
default_fields = self.default_fields(include_virtual=False, **kwargs)
# parse out any virtual fields
arr = self.__class__(1, field_kwargs=kwargs)
# try to perserve order
sortdict = dict([[nm, ii] for ii,nm in enumerate(names)])
names = list(get_needed_fieldnames(arr, names))
names.sort(key=lambda x: sortdict[x] if x in sortdict
else len(names))
fields = [(name, default_fields[name]) for name in names]
arrays = []
names = []
for name,dt in fields:
arrays.append(default_empty(self.size, dtype=[(name, dt)]))
names.append(name)
return self.add_fields(arrays, names)
@classmethod
def parse_parameters(cls, parameters, possible_fields=None):
"""Parses a list of parameters to get the list of fields needed in
order to evaluate those parameters.
Parameters
----------
parameters : (list of) strings
The list of desired parameters. These can be (functions of) fields
or virtual fields.
possible_fields : {None, dict}
Specify the list of possible fields. Must be a dictionary given
the names, and dtype of each possible field. If None, will use this
class's `_staticfields`.
Returns
-------
list :
The list of names of the fields that are needed in order to
evaluate the given parameters.
"""
if possible_fields is not None:
# make sure field names are strings and not unicode
possible_fields = dict([[f, dt]
for f,dt in possible_fields.items()])
class ModifiedArray(cls):
_staticfields = possible_fields
cls = ModifiedArray
return cls(1, names=parameters).fieldnames
#
# =============================================================================
#
# WaveformArray
#
# =============================================================================
#
class WaveformArray(_FieldArrayWithDefaults):
"""
A FieldArray with some default fields and properties commonly used
by CBC waveforms. This may be initialized in one of 3 ways:
1. With just the size of the array. In this case, the returned array will
have all of the default field names. Example:
>>> warr = WaveformArray(10)
>>> warr.fieldnames
('distance',
'spin2x',
'mass1',
'mass2',
'lambda1',
'polarization',
'spin2y',
'spin2z',
'spin1y',
'spin1x',
'spin1z',
'inclination',
'coa_phase',
'dec',
'tc',
'lambda2',
'ra')
2. With some subset of the default field names. Example:
>>> warr = WaveformArray(10, names=['mass1', 'mass2'])
>>> warr.fieldnames
('mass1', 'mass2')
The list of names may include virtual fields, and methods, as well as
functions of these. If one or more virtual fields or methods are specified,
the source code is analyzed to pull out whatever underlying fields are
needed. Example:
>>> warr = WaveformArray(10, names=['mchirp**(5/6)', 'chi_eff', 'cos(coa_phase)'])
>>> warr.fieldnames
('spin2z', 'mass1', 'mass2', 'coa_phase', 'spin1z')
3. By specifying a dtype. In this case, only the provided fields will
be used, even if they are not in the defaults. Example:
>>> warr = WaveformArray(10, dtype=[('foo', float)])
>>> warr.fieldnames
('foo',)
Additional fields can also be specified using the additional_fields
keyword argument. Example:
>>> warr = WaveformArray(10, names=['mass1', 'mass2'], additional_fields=[('bar', float)])
>>> warr.fieldnames
('mass1', 'mass2', 'bar')
.. note::
If an array is initialized with all of the default fields (case 1,
above), then the names come from waveform.parameters; i.e., they
are actually Parameter instances, not just strings. This means that the
field names carry all of the metadata that a Parameter has. For
example:
>>> warr = WaveformArray(10)
>>> warr.fields[0]
'distance'
>>> warr.fields[0].description
'Luminosity distance to the binary (in Mpc).'
>>> warr.fields[0].label
'$d_L$ (Mpc)'
"""
_staticfields = (parameters.cbc_intrinsic_params +
parameters.extrinsic_params).dtype_dict
_virtualfields = [
parameters.mchirp, parameters.eta, parameters.mtotal,
parameters.q, parameters.primary_mass, parameters.secondary_mass,
parameters.chi_eff,
parameters.spin_px, parameters.spin_py, parameters.spin_pz,
parameters.spin_sx, parameters.spin_sy, parameters.spin_sz,
parameters.spin1_a, parameters.spin1_azimuthal, parameters.spin1_polar,
parameters.spin2_a, parameters.spin2_azimuthal, parameters.spin2_polar,
parameters.remnant_mass]
@property
def primary_mass(self):
"""Returns the larger of self.mass1 and self.mass2."""
return conversions.primary_mass(self.mass1, self.mass2)
@property
def secondary_mass(self):
"""Returns the smaller of self.mass1 and self.mass2."""
return conversions.secondary_mass(self.mass1, self.mass)
@property
def mtotal(self):
"""Returns the total mass."""
return conversions.mtotal_from_mass1_mass2(self.mass1, self.mass2)
@property
def q(self):
"""Returns the mass ratio m1/m2, where m1 >= m2."""
return conversions.q_from_mass1_mass2(self.mass1, self.mass2)
@property
def eta(self):
"""Returns the symmetric mass ratio."""
return conversions.eta_from_mass1_mass2(self.mass1, self.mass2)
@property
def mchirp(self):
"""Returns the chirp mass."""
return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2)
@property
def chi_eff(self):
"""Returns the effective spin."""
return conversions.chi_eff(self.mass1, self.mass2, self.spin1z,
self.spin2z)
@property
def spin_px(self):
"""Returns the x-component of the spin of the primary mass."""
return conversions.primary_spin(self.mass1, self.mass2, self.spin1x,
self.spin2x)
@property
def spin_py(self):
"""Returns the y-component of the spin of the primary mass."""
return conversions.primary_spin(self.mass1, self.mass2, self.spin1y,
self.spin2y)
@property
def spin_pz(self):
"""Returns the z-component of the spin of the primary mass."""
return conversions.primary_spin(self.mass1, self.mass2, self.spin1z,
self.spin2z)
@property
def spin_sx(self):
"""Returns the x-component of the spin of the secondary mass."""
return conversions.secondary_spin(self.mass1, self.mass2, self.spin1x,
self.spin2x)
@property
def spin_sy(self):
"""Returns the y-component of the spin of the secondary mass."""
return conversions.secondary_spin(self.mass1, self.mass2, self.spin1y,
self.spin2y)
@property
def spin_sz(self):
"""Returns the z-component of the spin of the secondary mass."""
return conversions.secondary_spin(self.mass1, self.mass2, self.spin1z,
self.spin2z)
@property
def spin1_a(self):
"""Returns the dimensionless spin magnitude of mass 1."""
return coordinates.cartesian_to_spherical_rho(
self.spin1x, self.spin1y, self.spin1z)
@property
def spin1_azimuthal(self):
"""Returns the azimuthal spin angle of mass 1."""
return coordinates.cartesian_to_spherical_azimuthal(
self.spin1x, self.spin1y)
@property
def spin1_polar(self):
"""Returns the polar spin angle of mass 1."""
return coordinates.cartesian_to_spherical_polar(
self.spin1x, self.spin1y, self.spin1z)
@property
def spin2_a(self):
"""Returns the dimensionless spin magnitude of mass 2."""
return coordinates.cartesian_to_spherical_rho(
self.spin1x, self.spin1y, self.spin1z)
@property
def spin2_azimuthal(self):
"""Returns the azimuthal spin angle of mass 2."""
return coordinates.cartesian_to_spherical_azimuthal(
self.spin2x, self.spin2y)
@property
def spin2_polar(self):
"""Returns the polar spin angle of mass 2."""
return coordinates.cartesian_to_spherical_polar(
self.spin2x, self.spin2y, self.spin2z)
@property
def remnant_mass(self):
"""Returns the remnant mass for an NS-BH binary."""
return conversions.remnant_mass_from_mass1_mass2_cartesian_spin_eos(
self.mass1, self.mass2,
spin1x=self.spin1x,
spin1y=self.spin1y,
spin1z=self.spin1z)
__all__ = ['FieldArray', 'WaveformArray']
| 77,702
| 39.3233
| 113
|
py
|
pycbc
|
pycbc-master/pycbc/io/hdf.py
|
"""
Convenience classes for accessing hdf5 trigger files
"""
import h5py
import numpy as np
import logging
import inspect
import pickle
from itertools import chain
from io import BytesIO
from lal import LIGOTimeGPS, YRJUL_SI
from ligo.lw import ligolw
from ligo.lw import lsctables
from ligo.lw import utils as ligolw_utils
from ligo.lw.utils import process as ligolw_process
from pycbc import version as pycbc_version
from pycbc.io.ligolw import return_search_summary, return_empty_sngl
from pycbc import events, conversions, pnutils
from pycbc.events import ranking, veto
from pycbc.events import mean_if_greater_than_zero
from pycbc.pnutils import mass1_mass2_to_mchirp_eta
class HFile(h5py.File):
""" Low level extensions to the capabilities of reading an hdf5 File
"""
def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = []
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1e6))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
# Read each chunk's worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i])
# Store only the results that pass the function
for arg, part in zip(args, partial):
data[arg].append(part[keep])
i += chunksize
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return indices.astype(np.uint64), res
else:
return res
else:
res = tuple(np.concatenate(data[arg]) for arg in args)
if return_indices:
return (indices.astype(np.uint64),) + res
else:
return res
class DictArray(object):
""" Utility for organizing sets of arrays of equal length.
Manages a dictionary of arrays of equal length. This can also
be instantiated with a set of hdf5 files and the key values. The full
data is always in memory and all operations create new instances of the
DictArray.
"""
def __init__(self, data=None, files=None, groups=None):
""" Create a DictArray
Parameters
----------
data: dict, optional
Dictionary of equal length numpy arrays
files: list of filenames, optional
List of hdf5 file filenames. Incompatibile with the `data` option.
groups: list of strings
List of keys into each file. Required by the files option.
"""
# Check that input fits with how the DictArray is set up
if data and files:
raise RuntimeError('DictArray can only have data or files as '
'input, not both.')
if data is None and files is None:
raise RuntimeError('DictArray needs either data or files at'
'initialization. To set up an empty instance'
'use DictArray(data={})')
if files and not groups:
raise RuntimeError('If files are given then need groups.')
self.data = data
self.groups = groups
if files:
self.data = {}
for g in groups:
self.data[g] = []
for f in files:
d = HFile(f)
for g in groups:
if g in d:
self.data[g].append(d[g][:])
d.close()
for k in self.data:
if not len(self.data[k]) == 0:
self.data[k] = np.concatenate(self.data[k])
for k in self.data:
setattr(self, k, self.data[k])
def _return(self, data):
return self.__class__(data=data)
def __len__(self):
return len(self.data[tuple(self.data.keys())[0]])
def __add__(self, other):
if self.data == {}:
logging.debug('Adding data to a DictArray instance which'
' was initialized with an empty dict')
return self._return(data=other)
data = {}
for k in self.data:
try:
data[k] = np.concatenate([self.data[k], other.data[k]])
except KeyError:
logging.info('%s does not exist in other data' % k)
return self._return(data=data)
def select(self, idx):
""" Return a new DictArray containing only the indexed values
"""
data = {}
for k in self.data:
# Make sure each entry is an array (not a scalar)
data[k] = np.array(self.data[k][idx])
return self._return(data=data)
def remove(self, idx):
""" Return a new DictArray that does not contain the indexed values
"""
data = {}
for k in self.data:
data[k] = np.delete(self.data[k], np.array(idx, dtype=int))
return self._return(data=data)
def save(self, outname):
f = HFile(outname, "w")
for k in self.attrs:
f.attrs[k] = self.attrs[k]
for k in self.data:
f.create_dataset(k, data=self.data[k],
compression='gzip',
compression_opts=9,
shuffle=True)
f.close()
class StatmapData(DictArray):
def __init__(self, data=None, seg=None, attrs=None, files=None,
groups=('stat', 'time1', 'time2', 'trigger_id1',
'trigger_id2', 'template_id', 'decimation_factor',
'timeslide_id')):
super(StatmapData, self).__init__(data=data, files=files,
groups=groups)
if data:
self.seg=seg
self.attrs=attrs
elif files:
f = HFile(files[0], "r")
self.seg = f['segments']
self.attrs = f.attrs
def _return(self, data):
return self.__class__(data=data, attrs=self.attrs, seg=self.seg)
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
if len(self.time1) == 0 or len(self.time2) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat, self.time1, self.time2,
self.timeslide_id, interval, window)
return self.select(cid)
def save(self, outname):
super(StatmapData, self).save(outname)
with HFile(outname, "w") as f:
for key in self.seg.keys():
f['segments/%s/start' % key] = self.seg[key]['start'][:]
f['segments/%s/end' % key] = self.seg[key]['end'][:]
class MultiifoStatmapData(StatmapData):
def __init__(self, data=None, seg=None, attrs=None,
files=None, ifos=None):
groups = ['decimation_factor', 'stat', 'template_id', 'timeslide_id']
for ifo in ifos:
groups += ['%s/time' % ifo]
groups += ['%s/trigger_id' % ifo]
super(MultiifoStatmapData, self).__init__(data=data, files=files,
groups=groups, attrs=attrs,
seg=seg)
def _return(self, data):
ifolist = self.attrs['ifos'].split(' ')
return self.__class__(data=data, attrs=self.attrs, seg=self.seg,
ifos=ifolist)
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
pivot_ifo = self.attrs['pivot']
fixed_ifo = self.attrs['fixed']
if len(self.data['%s/time' % pivot_ifo]) == 0 or len(self.data['%s/time' % fixed_ifo]) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat,
self.data['%s/time' % pivot_ifo],
self.data['%s/time' % fixed_ifo],
self.timeslide_id,
interval,
window)
return self.select(cid)
class FileData(object):
def __init__(self, fname, group=None, columnlist=None, filter_func=None):
"""
Parameters
----------
group : string
Name of group to be read from the file
columnlist : list of strings
Names of columns to be read; if None, use all existing columns
filter_func : string
String should evaluate to a Boolean expression using attributes
of the class instance derived from columns: ex. 'self.snr < 6.5'
"""
if not fname: raise RuntimeError("Didn't get a file!")
self.fname = fname
self.h5file = HFile(fname, "r")
if group is None:
if len(self.h5file.keys()) == 1:
group, = self.h5file.keys()
else:
raise RuntimeError("Didn't get a group!")
self.group_key = group
self.group = self.h5file[group]
self.columns = columnlist if columnlist is not None \
else list(self.group.keys())
self.filter_func = filter_func
self._mask = None
def close(self):
self.h5file.close()
@property
def mask(self):
"""
Create a mask implementing the requested filter on the datasets
Returns
-------
array of Boolean
True for dataset indices to be returned by the get_column method
"""
if self.filter_func is None:
raise RuntimeError("Can't get a mask without a filter function!")
else:
# only evaluate if no previous calculation was done
if self._mask is None:
# get required columns into the namespace as numpy arrays
for column in self.columns:
if column in self.filter_func:
setattr(self, column, self.group[column][:])
self._mask = eval(self.filter_func)
return self._mask
def get_column(self, col):
"""
Method designed to be analogous to legacy pylal.SnglInspiralUtils
functionality
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested
"""
# catch corner case with an empty file (group with no datasets)
if not len(self.group.keys()):
return np.array([])
vals = self.group[col]
if self.filter_func:
return vals[self.mask]
else:
return vals[:]
class DataFromFiles(object):
def __init__(self, filelist, group=None, columnlist=None, filter_func=None):
self.files = filelist
self.group = group
self.columns = columnlist
self.filter_func = filter_func
def get_column(self, col):
"""
Loop over files getting the requested dataset values from each
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested and
concatenated in order of file list
"""
logging.info('getting %s' % col)
vals = []
for f in self.files:
d = FileData(f, group=self.group, columnlist=self.columns,
filter_func=self.filter_func)
vals.append(d.get_column(col))
# Close each file since h5py has an upper limit on the number of
# open file objects (approx. 1000)
d.close()
logging.info('- got %i values' % sum(len(v) for v in vals))
return np.concatenate(vals)
class SingleDetTriggers(object):
"""
Provides easy access to the parameters of single-detector CBC triggers.
"""
# FIXME: Some of these are optional and should be kwargs.
def __init__(self, trig_file, bank_file, veto_file,
segment_name, filter_func, detector, premask=None):
logging.info('Loading triggers')
self.trigs_f = HFile(trig_file, 'r')
self.trigs = self.trigs_f[detector]
self.ifo = detector # convenience attributes
self.detector = detector
if bank_file:
logging.info('Loading bank')
self.bank = HFile(bank_file, 'r')
else:
logging.info('No bank file given')
# empty dict in place of non-existent hdf file
self.bank = {}
if premask is not None:
self.mask = premask
else:
self.mask = np.ones(len(self.trigs['end_time']), dtype=bool)
if veto_file:
logging.info('Applying veto segments')
# veto_mask is an array of indices into the trigger arrays
# giving the surviving triggers
logging.info('%i triggers before vetoes', self.mask.sum())
self.veto_mask, _ = events.veto.indices_outside_segments(
self.end_time, [veto_file],
ifo=detector, segment_name=segment_name)
idx = np.flatnonzero(self.mask)[self.veto_mask]
self.mask[:] = False
self.mask[idx] = True
logging.info('%i triggers remain after vetoes',
len(self.veto_mask))
# FIXME this should use the hfile select interface to avoid
# memory and processing limitations.
if filter_func:
# get required columns into the namespace with dummy attribute
# names to avoid confusion with other class properties
logging.info('Setting up filter function')
for c in self.trigs.keys():
if c in filter_func:
setattr(self, '_'+c, self.trigs[c][:])
for c in self.bank.keys():
if c in filter_func:
# get template parameters corresponding to triggers
setattr(self, '_'+c,
np.array(self.bank[c])[self.trigs['template_id'][:]])
self.filter_mask = eval(filter_func.replace('self.', 'self._'))
# remove the dummy attributes
for c in chain(self.trigs.keys(), self.bank.keys()):
if c in filter_func: delattr(self, '_'+c)
self.mask = self.mask & self.filter_mask
logging.info('%i triggers remain after cut on %s',
sum(self.mask), filter_func)
def __getitem__(self, key):
# Is key in the TRIGGER_MERGE file?
try:
return self.get_column(key)
except KeyError:
pass
# Is key in the bank file?
try:
self.checkbank(key)
return self.bank[key][:][self.template_id]
except (RuntimeError, KeyError) as exc:
err_msg = "Cannot find {} in input files".format(key)
raise ValueError(err_msg) from exc
def checkbank(self, param):
if self.bank == {}:
return RuntimeError("Can't get %s values without a bank file"
% param)
def trig_dict(self):
"""Returns dict of the masked trigger valuse """
mtrigs = {}
for k in self.trigs:
if len(self.trigs[k]) == len(self.trigs['end_time']):
if self.mask is not None:
mtrigs[k] = self.trigs[k][self.mask]
else:
mtrigs[k] = self.trigs[k][:]
return mtrigs
@classmethod
def get_param_names(cls):
"""Returns a list of plottable CBC parameter variables"""
return [m[0] for m in inspect.getmembers(cls) \
if type(m[1]) == property]
def apply_mask(self, logic_mask):
"""Apply a boolean array to the set of triggers"""
if hasattr(self.mask, 'dtype') and (self.mask.dtype == 'bool'):
orig_indices = self.mask.nonzero()[0][logic_mask]
self.mask[:] = False
self.mask[orig_indices] = True
else:
self.mask = list(np.array(self.mask)[logic_mask])
def mask_to_n_loudest_clustered_events(self, rank_method,
n_loudest=10,
cluster_window=10):
"""Edits the mask property of the class to point to the N loudest
single detector events as ranked by ranking statistic. Events are
clustered so that no more than 1 event within +/- cluster-window will
be considered."""
# If this becomes memory intensive we can optimize
stat = rank_method.rank_stat_single((self.ifo, self.trig_dict()))
if len(stat) == 0:
# No triggers, so just return here
self.stat = np.array([])
return
times = self.end_time
index = stat.argsort()[::-1]
new_times = []
new_index = []
for curr_idx in index:
curr_time = times[curr_idx]
for time in new_times:
if abs(curr_time - time) < cluster_window:
break
else:
# Only get here if no other triggers within cluster window
new_index.append(curr_idx)
new_times.append(curr_time)
if len(new_index) >= n_loudest:
break
index = np.array(new_index)
index.sort()
self.stat = stat[index]
if hasattr(self.mask, 'dtype') and self.mask.dtype == 'bool':
orig_indices = np.flatnonzero(self.mask)[index]
self.mask = list(orig_indices)
elif isinstance(self.mask, list):
self.mask = list(np.array(self.mask)[index])
@property
def template_id(self):
return self.get_column('template_id').astype(int)
@property
def mass1(self):
self.checkbank('mass1')
return self.bank['mass1'][:][self.template_id]
@property
def mass2(self):
self.checkbank('mass2')
return self.bank['mass2'][:][self.template_id]
@property
def spin1z(self):
self.checkbank('spin1z')
return self.bank['spin1z'][:][self.template_id]
@property
def spin2z(self):
self.checkbank('spin2z')
return self.bank['spin2z'][:][self.template_id]
@property
def spin2x(self):
self.checkbank('spin2x')
return self.bank['spin2x'][:][self.template_id]
@property
def spin2y(self):
self.checkbank('spin2y')
return self.bank['spin2y'][:][self.template_id]
@property
def spin1x(self):
self.checkbank('spin1x')
return self.bank['spin1x'][:][self.template_id]
@property
def spin1y(self):
self.checkbank('spin1y')
return self.bank['spin1y'][:][self.template_id]
@property
def inclination(self):
self.checkbank('inclination')
return self.bank['inclination'][:][self.template_id]
@property
def f_lower(self):
self.checkbank('f_lower')
return self.bank['f_lower'][:][self.template_id]
@property
def approximant(self):
self.checkbank('approximant')
return self.bank['approximant'][:][self.template_id]
@property
def mtotal(self):
return self.mass1 + self.mass2
@property
def mchirp(self):
return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2)
@property
def eta(self):
return conversions.eta_from_mass1_mass2(self.mass1, self.mass2)
@property
def effective_spin(self):
# FIXME assumes aligned spins
return conversions.chi_eff(self.mass1, self.mass2,
self.spin1z, self.spin2z)
# IMPROVEME: would like to have a way to access all get_freq and/or
# other pnutils.* names rather than hard-coding each one
# - eg make this part of a fancy interface to the bank file ?
@property
def f_seobnrv2_peak(self):
return pnutils.get_freq('fSEOBNRv2Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def f_seobnrv4_peak(self):
return pnutils.get_freq('fSEOBNRv4Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def end_time(self):
return self.get_column('end_time')
@property
def template_duration(self):
return self.get_column('template_duration')
@property
def snr(self):
return self.get_column('snr')
@property
def sgchisq(self):
return self.get_column('sg_chisq')
@property
def u_vals(self):
return self.get_column('u_vals')
@property
def rchisq(self):
return self.get_column('chisq') \
/ (self.get_column('chisq_dof') * 2 - 2)
@property
def psd_var_val(self):
return self.get_column('psd_var_val')
@property
def newsnr(self):
return ranking.newsnr(self.snr, self.rchisq)
@property
def newsnr_sgveto(self):
return ranking.newsnr_sgveto(self.snr, self.rchisq, self.sgchisq)
@property
def newsnr_sgveto_psdvar(self):
return ranking.newsnr_sgveto_psdvar(self.snr, self.rchisq,
self.sgchisq, self.psd_var_val)
@property
def newsnr_sgveto_psdvar_threshold(self):
return ranking.newsnr_sgveto_psdvar_threshold(self.snr, self.rchisq,
self.sgchisq, self.psd_var_val)
def get_ranking(self, rank_name, **kwargs):
return ranking.get_sngls_ranking_from_trigs(self, rank_name, **kwargs)
def get_column(self, cname):
# Fiducial value that seems to work, not extensively tuned.
MFRAC = 0.3
# If the mask accesses few enough elements then directly use it
# This can be slower than reading in all the elements if most of them
# will be read.
if self.mask is not None and (isinstance(self.mask, list) or \
(len(self.mask.nonzero()[0]) < (len(self.mask) * MFRAC))):
return self.trigs[cname][self.mask]
# We have a lot of elements to read so we resort to readin the entire
# array before masking.
elif self.mask is not None:
return self.trigs[cname][:][self.mask]
else:
return self.trigs[cname][:]
class ForegroundTriggers(object):
# Injection files are expected to only have 'exclusive' IFAR/FAP values,
# should use has_inc=False for these.
def __init__(self, coinc_file, bank_file, sngl_files=None, n_loudest=None,
group='foreground', has_inc=True):
self.coinc_file = FileData(coinc_file, group=group)
if 'ifos' in self.coinc_file.h5file.attrs:
self.ifos = self.coinc_file.h5file.attrs['ifos'].split(' ')
else:
raise ValueError("File doesn't have an 'ifos' attribute!",
coinc_file)
self.sngl_files = {}
if sngl_files is not None:
for sngl_file in sngl_files:
curr_dat = FileData(sngl_file)
curr_ifo = curr_dat.group_key
self.sngl_files[curr_ifo] = curr_dat
if not all([ifo in self.sngl_files.keys() for ifo in self.ifos]):
print("sngl_files: {}".format(sngl_files))
print("self.ifos: {}".format(self.ifos))
raise RuntimeError("IFOs in statmap file not all represented "
"by single-detector trigger files.")
if not sorted(self.sngl_files.keys()) == sorted(self.ifos):
logging.warning("WARNING: Single-detector trigger files "
"given for IFOs not in the statmap file")
self.bank_file = HFile(bank_file, "r")
self.n_loudest = n_loudest
self._inclusive = has_inc
self._sort_arr = None
self._template_id = None
self._trig_ids = None
self.get_active_segments()
@property
def sort_arr(self):
if self._sort_arr is None:
if self._inclusive:
try:
ifar = self.coinc_file.get_column('ifar')
except KeyError:
logging.warning("WARNING: Can't find inclusive IFAR!"
"Using exclusive IFAR instead ...")
ifar = self.coinc_file.get_column('ifar_exc')
self._inclusive = False
else:
ifar = self.coinc_file.get_column('ifar_exc')
sorting = ifar.argsort()[::-1]
if self.n_loudest:
sorting = sorting[:self.n_loudest]
self._sort_arr = sorting
return self._sort_arr
@property
def template_id(self):
if self._template_id is None:
template_id = self.get_coincfile_array('template_id')
self._template_id = template_id.astype(int)
return self._template_id
@property
def trig_id(self):
if self._trig_ids is not None:
return self._trig_ids
self._trig_ids = {}
for ifo in self.ifos:
self._trig_ids[ifo] = self.get_coincfile_array(ifo + '/trigger_id')
return self._trig_ids
def get_coincfile_array(self, variable):
return self.coinc_file.get_column(variable)[self.sort_arr]
def get_bankfile_array(self, variable):
try:
return self.bank_file[variable][:][self.template_id]
except IndexError:
if len(self.template_id) == 0:
return np.array([])
raise
def get_snglfile_array_dict(self, variable):
return_dict = {}
for ifo in self.ifos:
try:
tid = self.trig_id[ifo]
lgc = tid == -1
# Put in *some* value for the invalid points to avoid failure
# Make sure this doesn't change the cached internal array!
tid = np.copy(tid)
tid[lgc] = 0
# If small number of points don't read the full file
if len(tid) < 1000:
curr = []
hdf_dataset = self.sngl_files[ifo].group[variable]
for idx in tid:
curr.append(hdf_dataset[idx])
curr = np.array(curr)
else:
curr = self.sngl_files[ifo].get_column(variable)[tid]
except IndexError:
if len(self.trig_id[ifo]) == 0:
curr = np.array([])
lgc = curr == 0
else:
raise
return_dict[ifo] = (curr, np.logical_not(lgc))
return return_dict
def get_active_segments(self):
self.active_segments = {}
for ifo in self.ifos:
starts = self.sngl_files[ifo].get_column('search/start_time')
ends = self.sngl_files[ifo].get_column('search/end_time')
self.active_segments[ifo] = veto.start_end_to_segments(starts,
ends)
def get_end_time(self):
times_gen = (self.get_coincfile_array('{}/time'.format(ifo))
for ifo in self.ifos)
ref_times = np.array([mean_if_greater_than_zero(t)[0]
for t in zip(*times_gen)])
return ref_times
def get_ifos(self):
"""
Returns
-------
ifos_list
List of lists of ifo names involved in each foreground event.
Ifos will be listed in the same order as self.ifos
"""
# Ian thinks this could be coded more simply and efficiently
# Note also that effectively the same thing is done as part of the
# to_coinc_hdf_object method
ifo_or_minus = []
for ifo in self.ifos:
ifo_trigs = np.where(self.get_coincfile_array(ifo + '/time') < 0,
'-', ifo)
ifo_or_minus.append(ifo_trigs)
ifos_list = [list(trig[trig != '-'])
for trig in iter(np.array(ifo_or_minus).T)]
return ifos_list
def to_coinc_xml_object(self, file_name):
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
ifos = list(self.sngl_files.keys())
proc_id = ligolw_process.register_to_xmldoc(outdoc, 'pycbc',
{}, instruments=ifos, comment='', version=pycbc_version.git_hash,
cvs_repository='pycbc/'+pycbc_version.git_branch,
cvs_entry_time=pycbc_version.date).process_id
search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
coinc_h5file = self.coinc_file.h5file
try:
start_time = coinc_h5file['segments']['coinc']['start'][:].min()
end_time = coinc_h5file['segments']['coinc']['end'][:].max()
except KeyError:
start_times = []
end_times = []
for ifo_comb in coinc_h5file['segments']:
if ifo_comb == 'foreground_veto':
continue
seg_group = coinc_h5file['segments'][ifo_comb]
start_times.append(seg_group['start'][:].min())
end_times.append(seg_group['end'][:].max())
start_time = min(start_times)
end_time = max(end_times)
num_trigs = len(self.sort_arr)
search_summary = return_search_summary(start_time, end_time,
num_trigs, ifos)
search_summ_table.append(search_summary)
outdoc.childNodes[0].appendChild(search_summ_table)
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
coinc_def_table = lsctables.New(lsctables.CoincDefTable)
coinc_event_table = lsctables.New(lsctables.CoincTable)
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
time_slide_table = lsctables.New(lsctables.TimeSlideTable)
# Set up time_slide table
time_slide_id = lsctables.TimeSlideID(0)
for ifo in ifos:
time_slide_row = lsctables.TimeSlide()
time_slide_row.instrument = ifo
time_slide_row.time_slide_id = time_slide_id
time_slide_row.offset = 0
time_slide_row.process_id = proc_id
time_slide_table.append(time_slide_row)
# Set up coinc_definer table
coinc_def_id = lsctables.CoincDefID(0)
coinc_def_row = lsctables.CoincDef()
coinc_def_row.search = "inspiral"
coinc_def_row.description = \
"sngl_inspiral<-->sngl_inspiral coincidences"
coinc_def_row.coinc_def_id = coinc_def_id
coinc_def_row.search_coinc_type = 0
coinc_def_table.append(coinc_def_row)
bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
bank_col_vals = {}
for name in bank_col_names:
bank_col_vals[name] = self.get_bankfile_array(name)
coinc_event_names = ['ifar', 'time', 'fap', 'stat']
coinc_event_vals = {}
for name in coinc_event_names:
if name == 'time':
coinc_event_vals[name] = self.get_end_time()
else:
coinc_event_vals[name] = self.get_coincfile_array(name)
sngl_col_names = ['snr', 'chisq', 'chisq_dof', 'bank_chisq',
'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof',
'end_time', 'template_duration', 'coa_phase',
'sigmasq']
sngl_col_vals = {}
for name in sngl_col_names:
sngl_col_vals[name] = self.get_snglfile_array_dict(name)
sngl_event_count = 0
for idx in range(len(self.sort_arr)):
# Set up IDs and mapping values
coinc_id = lsctables.CoincID(idx)
# Set up sngls
sngl_mchirps = []
sngl_mtots = []
net_snrsq = 0
triggered_ifos = []
for ifo in ifos:
# If this ifo is not participating in this coincidence then
# ignore it and move on.
if not sngl_col_vals['snr'][ifo][1][idx]:
continue
triggered_ifos += [ifo]
event_id = lsctables.SnglInspiralID(sngl_event_count)
sngl_event_count += 1
sngl = return_empty_sngl()
sngl.event_id = event_id
sngl.ifo = ifo
net_snrsq += sngl_col_vals['snr'][ifo][0][idx]**2
for name in sngl_col_names:
val = sngl_col_vals[name][ifo][0][idx]
if name == 'end_time':
sngl.end = LIGOTimeGPS(val)
else:
setattr(sngl, name, val)
for name in bank_col_names:
val = bank_col_vals[name][idx]
setattr(sngl, name, val)
sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
sngl.mass1, sngl.mass2)
sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
sngl.mass1, sngl.mass2)
sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
# If exact match is not used, get masses from single triggers
sngl_mchirps += [sngl.mchirp]
sngl_mtots += [sngl.mtotal]
sngl_inspiral_table.append(sngl)
# Set up coinc_map entry
coinc_map_row = lsctables.CoincMap()
coinc_map_row.table_name = 'sngl_inspiral'
coinc_map_row.coinc_event_id = coinc_id
coinc_map_row.event_id = event_id
coinc_event_map_table.append(coinc_map_row)
# Take the mean if exact match is not used
sngl_combined_mchirp = np.mean(sngl_mchirps)
sngl_combined_mtot = np.mean(sngl_mtots)
# Set up coinc inspiral and coinc event tables
coinc_event_row = lsctables.Coinc()
coinc_inspiral_row = lsctables.CoincInspiral()
coinc_event_row.coinc_def_id = coinc_def_id
coinc_event_row.nevents = len(triggered_ifos)
# NB, `coinc_event_row.instruments = triggered_ifos does not give a
# correct result with ligo.lw 1.7.1
coinc_event_row.instruments = ','.join(sorted(triggered_ifos))
coinc_inspiral_row.instruments = triggered_ifos
coinc_event_row.time_slide_id = time_slide_id
coinc_event_row.process_id = proc_id
coinc_event_row.coinc_event_id = coinc_id
coinc_inspiral_row.coinc_event_id = coinc_id
coinc_inspiral_row.mchirp = sngl_combined_mchirp
coinc_inspiral_row.mass = sngl_combined_mtot
coinc_inspiral_row.end = LIGOTimeGPS(coinc_event_vals['time'][idx])
coinc_inspiral_row.snr = net_snrsq**0.5
coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
coinc_inspiral_row.combined_far = 1./coinc_event_vals['ifar'][idx]
# Transform to Hz
coinc_inspiral_row.combined_far = \
coinc_inspiral_row.combined_far / YRJUL_SI
coinc_event_row.likelihood = coinc_event_vals['stat'][idx]
coinc_inspiral_row.minimum_duration = 0.
coinc_event_table.append(coinc_event_row)
coinc_inspiral_table.append(coinc_inspiral_row)
outdoc.childNodes[0].appendChild(coinc_def_table)
outdoc.childNodes[0].appendChild(coinc_event_table)
outdoc.childNodes[0].appendChild(coinc_event_map_table)
outdoc.childNodes[0].appendChild(time_slide_table)
outdoc.childNodes[0].appendChild(coinc_inspiral_table)
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
ligolw_utils.write_filename(outdoc, file_name)
def to_coinc_hdf_object(self, file_name):
ofd = h5py.File(file_name,'w')
# Some fields are special cases
logging.info("Outputting search results")
time = self.get_end_time()
# time will be used later to determine active ifos
ofd['time'] = time
if self._inclusive:
ofd['ifar'] = self.get_coincfile_array('ifar')
ofd['p_value'] = self.get_coincfile_array('fap')
ofd['ifar_exclusive'] = self.get_coincfile_array('ifar_exc')
ofd['p_value_exclusive'] = self.get_coincfile_array('fap_exc')
# Coinc fields
for field in ['stat']:
ofd[field] = self.get_coincfile_array(field)
logging.info("Outputting template information")
# Bank fields
for field in ['mass1','mass2','spin1z','spin2z']:
ofd[field] = self.get_bankfile_array(field)
mass1 = self.get_bankfile_array('mass1')
mass2 = self.get_bankfile_array('mass2')
ofd['chirp_mass'], _ = mass1_mass2_to_mchirp_eta(mass1, mass2)
logging.info("Outputting single-trigger information")
logging.info("reduced chisquared")
chisq_vals_valid = self.get_snglfile_array_dict('chisq')
chisq_dof_vals_valid = self.get_snglfile_array_dict('chisq_dof')
for ifo in self.ifos:
chisq_vals = chisq_vals_valid[ifo][0]
chisq_valid = chisq_vals_valid[ifo][1]
chisq_dof_vals = chisq_dof_vals_valid[ifo][0]
rchisq = chisq_vals / (2. * chisq_dof_vals - 2.)
rchisq[np.logical_not(chisq_valid)] = -1.
ofd[ifo + '_chisq'] = rchisq
# Single-detector fields
for field in ['sg_chisq', 'end_time', 'sigmasq',
'psd_var_val']:
logging.info(field)
try:
vals_valid = self.get_snglfile_array_dict(field)
except KeyError:
logging.info(field + " is not present in the "
"single-detector files")
for ifo in self.ifos:
# Some of the values will not be valid for all IFOs,
# the `valid` parameter out of get_snglfile_array_dict
# tells us this, and we set the values to -1
vals = vals_valid[ifo][0]
valid = vals_valid[ifo][1]
vals[np.logical_not(valid)] = -1.
ofd[f'{ifo}_{field}'] = vals
snr_vals_valid = self.get_snglfile_array_dict('snr')
network_snr_sq = np.zeros_like(snr_vals_valid[self.ifos[0]][0])
for ifo in self.ifos:
vals = snr_vals_valid[ifo][0]
valid = snr_vals_valid[ifo][1]
vals[np.logical_not(valid)] = -1.
ofd[ifo + '_snr'] = vals
network_snr_sq[valid] += vals[valid] ** 2.0
ofd['network_snr'] = np.sqrt(network_snr_sq)
logging.info("Triggered detectors")
# Create a n_ifos by n_events matrix, with the ifo letter if the
# event contains a trigger from the ifo, empty string if not
triggered_matrix = [[ifo[0] if v else ''
for v in snr_vals_valid[ifo][1]]
for ifo in self.ifos]
# Combine the ifo letters to make a single string per event
triggered_detectors = [''.join(triggered).encode('ascii')
for triggered in zip(*triggered_matrix)]
ofd.create_dataset('trig', data=triggered_detectors,
dtype='<S3')
logging.info("active detectors")
# Create a n_ifos by n_events matrix, with the ifo letter if the
# ifo was active at the event time, empty string if not
active_matrix = [[ifo[0] if t in self.active_segments[ifo]
else '' for t in time]
for ifo in self.ifos]
# Combine the ifo letters to make a single string per event
active_detectors = [''.join(active_at_time).encode('ascii')
for active_at_time in zip(*active_matrix)]
ofd.create_dataset('obs', data=active_detectors,
dtype='<S3')
ofd.close()
class ReadByTemplate(object):
# Default assignment to {} is OK for a variable used only in __init__
def __init__(self, filename, bank=None, segment_name=None, veto_files=None,
gating_veto_windows={}):
self.filename = filename
self.file = h5py.File(filename, 'r')
self.ifo = tuple(self.file.keys())[0]
self.valid = None
self.bank = h5py.File(bank, 'r') if bank else {}
# Determine the segments which define the boundaries of valid times
# to use triggers
key = '%s/search/' % self.ifo
s, e = self.file[key + 'start_time'][:], self.file[key + 'end_time'][:]
self.segs = veto.start_end_to_segments(s, e).coalesce()
if segment_name is None:
segment_name = []
if veto_files is None:
veto_files = []
for vfile, name in zip(veto_files, segment_name):
veto_segs = veto.select_segments_by_definer(vfile, ifo=self.ifo,
segment_name=name)
self.segs = (self.segs - veto_segs).coalesce()
if self.ifo in gating_veto_windows:
gating_veto = gating_veto_windows[self.ifo].split(',')
gveto_before = float(gating_veto[0])
gveto_after = float(gating_veto[1])
if gveto_before > 0 or gveto_after < 0:
raise ValueError("Gating veto window values must be negative "
"before gates and positive after gates.")
if not (gveto_before == 0 and gveto_after == 0):
autogate_times = np.unique(
self.file[self.ifo + '/gating/auto/time'][:])
if self.ifo + '/gating/file' in self.file:
detgate_times = self.file[self.ifo + '/gating/file/time'][:]
else:
detgate_times = []
gate_times = np.concatenate((autogate_times, detgate_times))
gating_veto_segs = veto.start_end_to_segments(
gate_times + gveto_before,
gate_times + gveto_after
).coalesce()
self.segs = (self.segs - gating_veto_segs).coalesce()
self.valid = veto.segments_to_start_end(self.segs)
def get_data(self, col, num):
"""Get a column of data for template with id 'num'.
Parameters
----------
col: str
Name of column to read
num: int
The template id to read triggers for
Returns
-------
data: numpy.ndarray
The requested column of data
"""
ref = self.file['%s/%s_template' % (self.ifo, col)][num]
return self.file['%s/%s' % (self.ifo, col)][ref]
def set_template(self, num):
"""Set the active template to read from.
Parameters
----------
num: int
The template id to read triggers for.
Returns
-------
trigger_id: numpy.ndarray
The indices of this templates triggers.
"""
self.template_num = num
times = self.get_data('end_time', num)
# Determine which of these template's triggers are kept after
# applying vetoes
if self.valid:
self.keep = veto.indices_within_times(times, self.valid[0],
self.valid[1])
# logging.info('applying vetoes')
else:
self.keep = np.arange(0, len(times))
if self.bank != {}:
self.param = {}
if 'parameters' in self.bank.attrs:
for col in self.bank.attrs['parameters']:
self.param[col] = self.bank[col][self.template_num]
else:
for col in self.bank:
self.param[col] = self.bank[col][self.template_num]
# Calculate the trigger id by adding the relative offset in self.keep
# to the absolute beginning index of this templates triggers stored
# in 'template_boundaries'
trigger_id = self.keep + \
self.file['%s/template_boundaries' % self.ifo][num]
return trigger_id
def __getitem__(self, col):
""" Return the column of data for current active template after
applying vetoes
Parameters
----------
col: str
Name of column to read
Returns
-------
data: numpy.ndarray
The requested column of data
"""
if self.template_num is None:
raise ValueError('You must call set_template to first pick the '
'template to read data from')
data = self.get_data(col, self.template_num)
data = data[self.keep] if self.valid else data
return data
chisq_choices = ['traditional', 'cont', 'bank', 'max_cont_trad', 'sg',
'max_bank_cont', 'max_bank_trad', 'max_bank_cont_trad']
def get_chisq_from_file_choice(hdfile, chisq_choice):
f = hdfile
if chisq_choice in ['traditional','max_cont_trad', 'max_bank_trad',
'max_bank_cont_trad']:
trad_chisq = f['chisq'][:]
# We now need to handle the case where chisq is not actually calculated
# 0 is used as a sentinel value
trad_chisq_dof = f['chisq_dof'][:]
trad_chisq /= (trad_chisq_dof * 2 - 2)
if chisq_choice in ['cont', 'max_cont_trad', 'max_bank_cont',
'max_bank_cont_trad']:
cont_chisq = f['cont_chisq'][:]
cont_chisq_dof = f['cont_chisq_dof'][:]
cont_chisq /= cont_chisq_dof
if chisq_choice in ['bank', 'max_bank_cont', 'max_bank_trad',
'max_bank_cont_trad']:
bank_chisq = f['bank_chisq'][:]
bank_chisq_dof = f['bank_chisq_dof'][:]
bank_chisq /= bank_chisq_dof
if chisq_choice == 'sg':
chisq = f['sg_chisq'][:]
elif chisq_choice == 'traditional':
chisq = trad_chisq
elif chisq_choice == 'cont':
chisq = cont_chisq
elif chisq_choice == 'bank':
chisq = bank_chisq
elif chisq_choice == 'max_cont_trad':
chisq = np.maximum(trad_chisq, cont_chisq)
elif chisq_choice == 'max_bank_cont':
chisq = np.maximum(bank_chisq, cont_chisq)
elif chisq_choice == 'max_bank_trad':
chisq = np.maximum(bank_chisq, trad_chisq)
elif chisq_choice == 'max_bank_cont_trad':
chisq = np.maximum(np.maximum(bank_chisq, cont_chisq), trad_chisq)
else:
err_msg = "Do not recognize --chisq-choice %s" % chisq_choice
raise ValueError(err_msg)
return chisq
def save_dict_to_hdf5(dic, filename):
"""
Parameters
----------
dic:
python dictionary to be converted to hdf5 format
filename:
desired name of hdf5 file
"""
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, '/', dic)
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
if isinstance(item, (np.ndarray, np.int64, np.float64, str, int, float,
bytes, tuple, list)):
h5file[path + str(key)] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type' % type(item))
def load_hdf5_to_dict(h5file, path):
"""
Parameters
----------
h5file:
h5py file to be loaded as a dictionary
path:
path within h5py file to load: '/' for the whole h5py file
Returns
-------
dic:
dictionary with hdf5 file group content
"""
dic = {}
for key, item in h5file[path].items():
if isinstance(item, h5py.Dataset):
dic[key] = item[()]
elif isinstance(item, h5py.Group):
dic[key] = load_hdf5_to_dict(h5file, path + key + '/')
else:
raise ValueError('Cannot load %s type' % type(item))
return dic
def combine_and_copy(f, files, group):
""" Combine the same column from multiple files and save to a third"""
# ensure that the files input is stable for iteration order
assert isinstance(files, (list, tuple))
f[group] = np.concatenate([fi[group][:] if group in fi else \
np.array([], dtype=np.uint32) for fi in files])
def name_all_datasets(files):
assert isinstance(files, (list, tuple))
datasets = []
for fi in files:
datasets += get_all_subkeys(fi, '/')
return set(datasets)
def get_all_subkeys(grp, key):
subkey_list = []
subkey_start = key
if key == '':
grpk = grp
else:
grpk = grp[key]
for sk in grpk.keys():
path = subkey_start + '/' + sk
if isinstance(grp[path], h5py.Dataset):
subkey_list.append(path.lstrip('/'))
else:
subkey_list += get_all_subkeys(grp, path)
# returns an empty list if there is no dataset or subgroup within the group
return subkey_list
#
# =============================================================================
#
# Checkpointing utilities
#
# =============================================================================
#
def dump_state(state, fp, path=None, dsetname='state',
protocol=pickle.HIGHEST_PROTOCOL):
"""Dumps the given state to an hdf5 file handler.
The state is stored as a raw binary array to ``{path}/{dsetname}`` in the
given hdf5 file handler. If a dataset with the same name and path is
already in the file, the dataset will be resized and overwritten with the
new state data.
Parameters
----------
state : any picklable object
The sampler state to dump to file. Can be the object returned by
any of the samplers' `.state` attribute (a dictionary of dictionaries),
or any picklable object.
fp : h5py.File
An open hdf5 file handler. Must have write capability enabled.
path : str, optional
The path (group name) to store the state dataset to. Default (None)
will result in the array being stored to the top level.
dsetname : str, optional
The name of the dataset to store the binary array to. Default is
``state``.
protocol : int, optional
The protocol version to use for pickling. See the :py:mod:`pickle`
module for more details.
"""
memfp = BytesIO()
pickle.dump(state, memfp, protocol=protocol)
dump_pickle_to_hdf(memfp, fp, path=path, dsetname=dsetname)
def dump_pickle_to_hdf(memfp, fp, path=None, dsetname='state'):
"""Dumps pickled data to an hdf5 file object.
Parameters
----------
memfp : file object
Bytes stream of pickled data.
fp : h5py.File
An open hdf5 file handler. Must have write capability enabled.
path : str, optional
The path (group name) to store the state dataset to. Default (None)
will result in the array being stored to the top level.
dsetname : str, optional
The name of the dataset to store the binary array to. Default is
``state``.
"""
memfp.seek(0)
bdata = np.frombuffer(memfp.read(), dtype='S1')
if path is not None:
dsetname = path + '/' + dsetname
if dsetname not in fp:
fp.create_dataset(dsetname, shape=bdata.shape, maxshape=(None,),
dtype=bdata.dtype)
elif bdata.size != fp[dsetname].shape[0]:
fp[dsetname].resize((bdata.size,))
fp[dsetname][:] = bdata
def load_state(fp, path=None, dsetname='state'):
"""Loads a sampler state from the given hdf5 file object.
The sampler state is expected to be stored as a raw bytes array which can
be loaded by pickle.
Parameters
----------
fp : h5py.File
An open hdf5 file handler.
path : str, optional
The path (group name) that the state data is stored to. Default (None)
is to read from the top level.
dsetname : str, optional
The name of the dataset that the state data is stored to. Default is
``state``.
"""
if path is not None:
fp = fp[path]
bdata = fp[dsetname][()].tobytes()
return pickle.load(BytesIO(bdata))
__all__ = ('HFile', 'DictArray', 'StatmapData', 'MultiifoStatmapData',
'FileData', 'DataFromFiles', 'SingleDetTriggers',
'ForegroundTriggers', 'ReadByTemplate', 'chisq_choices',
'get_chisq_from_file_choice', 'save_dict_to_hdf5',
'recursively_save_dict_contents_to_group', 'load_hdf5_to_dict',
'combine_and_copy', 'name_all_datasets', 'get_all_subkeys',
'dump_state', 'dump_pickle_to_hdf', 'load_state')
| 55,538
| 36.628049
| 100
|
py
|
pycbc
|
pycbc-master/pycbc/io/__init__.py
|
import logging
from astropy.utils.data import download_file
from .hdf import *
from .record import *
def get_file(url, retry=5, **args):
""" Retrieve file with retry upon failure
Uses the astropy download_file but adds a retry feature for flaky
connections. See astropy for full options
"""
i = 0
while True:
i += 1
try:
return download_file(url, **args)
except Exception as e:
logging.warning("Failed on attempt %d to download %s", i, url)
if i >= retry:
logging.error("Giving up on %s", url)
raise e
| 621
| 26.043478
| 74
|
py
|
pycbc
|
pycbc-master/pycbc/io/live.py
|
import logging
import os
import pycbc
import numpy
import lal
import json
import copy
from multiprocessing.dummy import threading
from ligo.lw import ligolw
from ligo.lw import lsctables
from ligo.lw import utils as ligolw_utils
from pycbc import version as pycbc_version
from pycbc import pnutils
from pycbc.io.ligolw import (
return_empty_sngl,
create_process_table,
make_psd_xmldoc,
snr_series_to_xml
)
from pycbc.results import generate_asd_plot
from pycbc.results import ifo_color
from pycbc.results import source_color
from pycbc.mchirp_area import calc_probabilities
class CandidateForGraceDB(object):
"""This class provides an interface for uploading candidates to GraceDB.
"""
def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs):
"""Initialize a representation of a zerolag candidate for upload to
GraceDB.
Parameters
----------
coinc_ifos: list of strs
A list of the originally triggered ifos with SNR above threshold
for this candidate, before possible significance followups.
ifos: list of strs
A list of ifos which may have triggers identified in coinc_results
for this candidate: ifos potentially contributing to significance
coinc_results: dict of values
A dictionary of values. The format is defined in
`pycbc/events/coinc.py` and matches the on-disk representation in
the hdf file for this time.
psds: dict of FrequencySeries
Dictionary providing PSD estimates for all detectors observing.
low_frequency_cutoff: float
Minimum valid frequency for the PSD estimates.
high_frequency_cutoff: float, optional
Maximum frequency considered for the PSD estimates. Default None.
skyloc_data: dict of dicts, optional
Dictionary providing SNR time series for each detector, to be used
in sky localization with BAYESTAR. The format should be
`skyloc_data['H1']['snr_series']`. More detectors can be present
than in `ifos`; if so, extra detectors will only be used for sky
localization.
channel_names: dict of strings, optional
Strain channel names for each detector. Will be recorded in the
`sngl_inspiral` table.
padata: PAstroData instance
Organizes info relevant to the astrophysical probability of the
candidate.
mc_area_args: dict of dicts, optional
Dictionary providing arguments to be used in source probability
estimation with `pycbc/mchirp_area.py`.
"""
self.coinc_results = coinc_results
self.psds = kwargs['psds']
self.basename = None
if kwargs.get('gracedb'):
self.gracedb = kwargs['gracedb']
# Determine if the candidate should be marked as HWINJ
self.is_hardware_injection = ('HWINJ' in coinc_results
and coinc_results['HWINJ'])
# We may need to apply a time offset for premerger search
self.time_offset = 0
rtoff = f'foreground/{ifos[0]}/time_offset'
if rtoff in coinc_results:
self.time_offset = coinc_results[rtoff]
# Check for ifos with SNR peaks in coinc_results
self.et_ifos = [i for i in ifos if f'foreground/{i}/end_time' in
coinc_results]
if 'skyloc_data' in kwargs:
sld = kwargs['skyloc_data']
assert len({sld[ifo]['snr_series'].delta_t for ifo in sld}) == 1, \
"delta_t for all ifos do not match"
snr_ifos = sld.keys() # Ifos with SNR time series calculated
self.snr_series = {ifo: sld[ifo]['snr_series'] for ifo in snr_ifos}
# Extra ifos have SNR time series but not sngl inspiral triggers
for ifo in snr_ifos:
# Ifos used for sky loc must have a PSD
assert ifo in self.psds
self.snr_series[ifo].start_time += self.time_offset
else:
self.snr_series = None
snr_ifos = self.et_ifos
# Set up the bare structure of the xml document
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
proc_id = create_process_table(outdoc, program_name='pycbc',
detectors=snr_ifos).process_id
# Set up coinc_definer table
coinc_def_table = lsctables.New(lsctables.CoincDefTable)
coinc_def_id = lsctables.CoincDefID(0)
coinc_def_row = lsctables.CoincDef()
coinc_def_row.search = "inspiral"
coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincs"
coinc_def_row.coinc_def_id = coinc_def_id
coinc_def_row.search_coinc_type = 0
coinc_def_table.append(coinc_def_row)
outdoc.childNodes[0].appendChild(coinc_def_table)
# Set up coinc inspiral and coinc event tables
coinc_id = lsctables.CoincID(0)
coinc_event_table = lsctables.New(lsctables.CoincTable)
coinc_event_row = lsctables.Coinc()
coinc_event_row.coinc_def_id = coinc_def_id
coinc_event_row.nevents = len(snr_ifos)
coinc_event_row.instruments = ','.join(snr_ifos)
coinc_event_row.time_slide_id = lsctables.TimeSlideID(0)
coinc_event_row.process_id = proc_id
coinc_event_row.coinc_event_id = coinc_id
coinc_event_row.likelihood = 0.
coinc_event_table.append(coinc_event_row)
outdoc.childNodes[0].appendChild(coinc_event_table)
# Set up sngls
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
# Marker variable recording template info from a valid sngl trigger
sngl_populated = None
network_snrsq = 0
for sngl_id, ifo in enumerate(snr_ifos):
sngl = return_empty_sngl(nones=True)
sngl.event_id = lsctables.SnglInspiralID(sngl_id)
sngl.process_id = proc_id
sngl.ifo = ifo
names = [n.split('/')[-1] for n in coinc_results
if f'foreground/{ifo}' in n]
for name in names:
val = coinc_results[f'foreground/{ifo}/{name}']
if name == 'end_time':
val += self.time_offset
sngl.end = lal.LIGOTimeGPS(val)
else:
# Sngl inspirals have a restricted set of attributes
try:
setattr(sngl, name, val)
except AttributeError:
pass
if sngl.mass1 and sngl.mass2:
sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
sngl.mass1, sngl.mass2)
sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
sngl.mass1, sngl.mass2)
sngl_populated = sngl
if sngl.snr:
sngl.eff_distance = sngl.sigmasq ** 0.5 / sngl.snr
network_snrsq += sngl.snr ** 2.0
if 'channel_names' in kwargs and ifo in kwargs['channel_names']:
sngl.channel = kwargs['channel_names'][ifo]
sngl_inspiral_table.append(sngl)
# Set up coinc_map entry
coinc_map_row = lsctables.CoincMap()
coinc_map_row.table_name = 'sngl_inspiral'
coinc_map_row.coinc_event_id = coinc_id
coinc_map_row.event_id = sngl.event_id
coinc_event_map_table.append(coinc_map_row)
if self.snr_series is not None:
snr_series_to_xml(self.snr_series[ifo], outdoc, sngl.event_id)
# Set merger time to the mean of trigger peaks over coinc_results ifos
self.merger_time = \
numpy.mean([coinc_results[f'foreground/{ifo}/end_time'] for ifo in
self.et_ifos]) \
+ self.time_offset
outdoc.childNodes[0].appendChild(coinc_event_map_table)
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
# Set up the coinc inspiral table
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
coinc_inspiral_row = lsctables.CoincInspiral()
# This seems to be used as FAP, which should not be in gracedb
coinc_inspiral_row.false_alarm_rate = 0.
coinc_inspiral_row.minimum_duration = 0.
coinc_inspiral_row.instruments = tuple(snr_ifos)
coinc_inspiral_row.coinc_event_id = coinc_id
coinc_inspiral_row.mchirp = sngl_populated.mchirp
coinc_inspiral_row.mass = sngl_populated.mtotal
coinc_inspiral_row.end_time = sngl_populated.end_time
coinc_inspiral_row.end_time_ns = sngl_populated.end_time_ns
coinc_inspiral_row.snr = network_snrsq ** 0.5
far = 1.0 / (lal.YRJUL_SI * coinc_results['foreground/ifar'])
coinc_inspiral_row.combined_far = far
coinc_inspiral_table.append(coinc_inspiral_row)
outdoc.childNodes[0].appendChild(coinc_inspiral_table)
# Append the PSDs
psds_lal = {}
for ifo, psd in self.psds.items():
kmin = int(kwargs['low_frequency_cutoff'] / psd.delta_f)
fseries = lal.CreateREAL8FrequencySeries(
"psd", psd.epoch, kwargs['low_frequency_cutoff'], psd.delta_f,
lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
psds_lal[ifo] = fseries
make_psd_xmldoc(psds_lal, outdoc)
# P astro calculation
if 'padata' in kwargs:
if 'p_terr' in kwargs:
raise RuntimeError("Both p_astro calculation data and a "
"previously calculated p_terr value were provided, this "
"doesn't make sense!")
assert len(coinc_ifos) < 3, \
f"p_astro can't handle {coinc_ifos} coinc ifos!"
trigger_data = {
'mass1': sngl_populated.mass1,
'mass2': sngl_populated.mass2,
'spin1z': sngl_populated.spin1z,
'spin2z': sngl_populated.spin2z,
'network_snr': network_snrsq ** 0.5,
'far': far,
'triggered': coinc_ifos,
# Consider all ifos potentially relevant to detection,
# ignore those that only contribute to sky loc
'sensitive': self.et_ifos}
horizons = {i: self.psds[i].dist for i in self.et_ifos}
self.p_astro, self.p_terr = \
kwargs['padata'].do_pastro_calc(trigger_data, horizons)
elif 'p_terr' in kwargs:
self.p_astro, self.p_terr = 1 - kwargs['p_terr'], kwargs['p_terr']
else:
self.p_astro, self.p_terr = None, None
# Source probabilities and hasmassgap estimation
self.probabilities = None
self.hasmassgap = None
if 'mc_area_args' in kwargs:
eff_distances = [sngl.eff_distance for sngl in sngl_inspiral_table]
self.probabilities = calc_probabilities(coinc_inspiral_row.mchirp,
coinc_inspiral_row.snr,
min(eff_distances),
kwargs['mc_area_args'])
if 'embright_mg_max' in kwargs['mc_area_args']:
hasmg_args = copy.deepcopy(kwargs['mc_area_args'])
hasmg_args['mass_gap'] = True
hasmg_args['mass_bdary']['gap_max'] = \
kwargs['mc_area_args']['embright_mg_max']
self.hasmassgap = calc_probabilities(
coinc_inspiral_row.mchirp,
coinc_inspiral_row.snr,
min(eff_distances),
hasmg_args)['Mass Gap']
# Combine p astro and source probs
if self.p_astro is not None and self.probabilities is not None:
self.astro_probs = {cl: pr * self.p_astro for
cl, pr in self.probabilities.items()}
self.astro_probs['Terrestrial'] = self.p_terr
else:
self.astro_probs = None
self.outdoc = outdoc
self.time = sngl_populated.end
def save(self, fname):
"""Write a file representing this candidate in a LIGOLW XML format
compatible with GraceDB.
Parameters
----------
fname: str
Name of file to write to disk.
"""
kwargs = {}
if threading.current_thread() is not threading.main_thread():
# avoid an error due to no ability to do signal handling in threads
kwargs['trap_signals'] = None
ligolw_utils.write_filename(self.outdoc, fname, \
compress='auto', **kwargs)
save_dir = os.path.dirname(fname)
# Save EMBright properties info as json
if self.hasmassgap is not None:
self.embright_file = os.path.join(save_dir, 'pycbc.em_bright.json')
with open(self.embright_file, 'w') as embrightf:
json.dump({'HasMassGap': self.hasmassgap}, embrightf)
logging.info('EM Bright file saved as %s', self.embright_file)
# Save multi-cpt p astro as json
if self.astro_probs is not None:
self.multipa_file = os.path.join(save_dir, 'pycbc.p_astro.json')
with open(self.multipa_file, 'w') as multipaf:
json.dump(self.astro_probs, multipaf)
logging.info('Multi p_astro file saved as %s', self.multipa_file)
# Save source probabilities in a json file
if self.probabilities is not None:
self.prob_file = os.path.join(save_dir, 'src_probs.json')
with open(self.prob_file, 'w') as probf:
json.dump(self.probabilities, probf)
logging.info('Source probabilities file saved as %s', self.prob_file)
# Don't save any other files!
return
# Save p astro / p terr as json
if self.p_astro is not None:
self.pastro_file = os.path.join(save_dir, 'pa_pterr.json')
with open(self.pastro_file, 'w') as pastrof:
json.dump({'p_astro': self.p_astro, 'p_terr': self.p_terr},
pastrof)
logging.info('P_astro file saved as %s', self.pastro_file)
def upload(self, fname, gracedb_server=None, testing=True,
extra_strings=None, search='AllSky', labels=None):
"""Upload this candidate to GraceDB, and annotate it with a few useful
plots and comments.
Parameters
----------
fname: str
The name to give the xml file associated with this trigger
gracedb_server: string, optional
URL to the GraceDB web API service for uploading the event.
If omitted, the default will be used.
testing: bool
Switch to determine if the upload should be sent to gracedb as a
test trigger (True) or a production trigger (False).
search: str
String going into the "search" field of the GraceDB event.
labels: list
Optional list of labels to tag the new event with.
"""
import matplotlib
matplotlib.use('Agg')
import pylab as pl
if fname.endswith('.xml.gz'):
self.basename = fname.replace('.xml.gz', '')
elif fname.endswith('.xml'):
self.basename = fname.replace('.xml', '')
else:
raise ValueError("Upload filename must end in .xml or .xml.gz, got"
" %s" % fname)
# First make sure the event is saved on disk
# as GraceDB operations can fail later
self.save(fname)
# hardware injections need to be maked with the INJ tag
if self.is_hardware_injection:
labels = (labels or []) + ['INJ']
# connect to GraceDB if we are not reusing a connection
if not hasattr(self, 'gracedb'):
logging.info('Connecting to GraceDB')
gdbargs = {'reload_certificate': True, 'reload_buffer': 300}
if gracedb_server:
gdbargs['service_url'] = gracedb_server
try:
from ligo.gracedb.rest import GraceDb
self.gracedb = GraceDb(**gdbargs)
except Exception as exc:
logging.error('Failed to create GraceDB client')
logging.error(exc)
# create GraceDB event
logging.info('Uploading %s to GraceDB', fname)
group = 'Test' if testing else 'CBC'
gid = None
try:
response = self.gracedb.create_event(
group,
"pycbc",
fname,
search=search,
labels=labels
)
gid = response.json()["graceid"]
logging.info("Uploaded event %s", gid)
except Exception as exc:
logging.error('Failed to create GraceDB event')
logging.error(str(exc))
# Upload em_bright properties JSON
if self.hasmassgap is not None and gid is not None:
try:
self.gracedb.write_log(
gid, 'EM Bright properties JSON file upload',
filename=self.embright_file,
tag_name=['em_bright']
)
logging.info('Uploaded em_bright properties for %s', gid)
except Exception as exc:
logging.error('Failed to upload em_bright properties file '
'for %s', gid)
logging.error(str(exc))
# Upload multi-cpt p_astro JSON
if self.astro_probs is not None and gid is not None:
try:
self.gracedb.write_log(
gid, 'Multi-component p_astro JSON file upload',
filename=self.multipa_file,
tag_name=['p_astro'],
label='PASTRO_READY'
)
logging.info('Uploaded multi p_astro for %s', gid)
except Exception as exc:
logging.error(
'Failed to upload multi p_astro file for %s',
gid
)
logging.error(str(exc))
# If there is p_astro but no probabilities, upload p_astro JSON
if hasattr(self, 'pastro_file') and gid is not None:
try:
self.gracedb.write_log(
gid, '2-component p_astro JSON file upload',
filename=self.pastro_file,
tag_name=['sig_info']
)
logging.info('Uploaded p_astro for %s', gid)
except Exception as exc:
logging.error('Failed to upload p_astro file for %s', gid)
logging.error(str(exc))
# plot the SNR timeseries and noise PSDs
if self.snr_series is not None:
snr_series_fname = self.basename + '.hdf'
snr_series_plot_fname = self.basename + '_snr.png'
asd_series_plot_fname = self.basename + '_asd.png'
pl.figure()
ref_time = int(self.merger_time)
for ifo in sorted(self.snr_series):
curr_snrs = self.snr_series[ifo]
curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
pl.plot(curr_snrs.sample_times - ref_time, abs(curr_snrs),
c=ifo_color(ifo), label=ifo)
if ifo in self.et_ifos:
base = 'foreground/{}/'.format(ifo)
snr = self.coinc_results[base + 'snr']
mt = (self.coinc_results[base + 'end_time']
+ self.time_offset)
pl.plot([mt - ref_time], [snr], c=ifo_color(ifo),
marker='x')
pl.legend()
pl.xlabel('GPS time from {:d} (s)'.format(ref_time))
pl.ylabel('SNR')
pl.savefig(snr_series_plot_fname)
pl.close()
generate_asd_plot(self.psds, asd_series_plot_fname)
# Additionally save the PSDs into the snr_series file
for ifo in sorted(self.psds):
# Undo dynamic range factor
curr_psd = self.psds[ifo].astype(numpy.float64)
curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0
curr_psd.save(snr_series_fname, group='%s/psd' % ifo)
# Upload SNR series in HDF format and plots
if self.snr_series is not None and gid is not None:
try:
self.gracedb.write_log(
gid, 'SNR timeseries HDF file upload',
filename=snr_series_fname
)
self.gracedb.write_log(
gid, 'SNR timeseries plot upload',
filename=snr_series_plot_fname,
tag_name=['background'],
displayName=['SNR timeseries']
)
self.gracedb.write_log(
gid, 'ASD plot upload',
filename=asd_series_plot_fname,
tag_name=['psd'], displayName=['ASDs']
)
except Exception as exc:
logging.error('Failed to upload SNR timeseries and ASD for %s',
gid)
logging.error(str(exc))
# If 'self.prob_file' exists, make pie plot and do uploads.
# The pie plot only shows relative astrophysical source
# probabilities, not p_astro vs p_terrestrial
if hasattr(self, 'prob_file'):
self.prob_plotf = self.prob_file.replace('.json', '.png')
# Don't try to plot zero probabilities
prob_plot = {k: v for (k, v) in self.probabilities.items()
if v != 0.0}
labels, sizes = zip(*prob_plot.items())
colors = [source_color(label) for label in labels]
fig, ax = pl.subplots()
ax.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%',
textprops={'fontsize': 15})
ax.axis('equal')
fig.savefig(self.prob_plotf)
pl.close()
if gid is not None:
try:
self.gracedb.write_log(
gid,
'Source probabilities JSON file upload',
filename=self.prob_file,
tag_name=['pe']
)
logging.info('Uploaded source probabilities for %s', gid)
self.gracedb.write_log(
gid,
'Source probabilities plot upload',
filename=self.prob_plotf,
tag_name=['pe']
)
logging.info(
'Uploaded source probabilities pie chart for %s',
gid
)
except Exception as exc:
logging.error(
'Failed to upload source probability results for %s',
gid
)
logging.error(str(exc))
if gid is not None:
try:
# Add code version info
gracedb_tag_with_version(self.gracedb, gid)
# Add any annotations to the event log
for text in (extra_strings or []):
self.gracedb.write_log(
gid, text, tag_name=['analyst_comments'])
except Exception as exc:
logging.error('Something failed during annotation of analyst'
' comments for event %s on GraceDB.', fname)
logging.error(str(exc))
return gid
def gracedb_tag_with_version(gracedb, event_id):
"""Add a GraceDB log entry reporting PyCBC's version and install location.
"""
version_str = 'Using PyCBC version {}{} at {}'
version_str = version_str.format(
pycbc_version.version,
' (release)' if pycbc_version.release else '',
os.path.dirname(pycbc.__file__))
gracedb.write_log(event_id, version_str)
__all__ = ['CandidateForGraceDB', 'gracedb_tag_with_version']
| 24,753
| 42.352014
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/spins.py
|
# Copyright (C) 2017 Collin Capano, Chris Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides spin distributions of CBCs.
"""
import numpy
from pycbc import conversions
from pycbc.distributions.uniform import Uniform
from pycbc.distributions.angular import UniformAngle
from pycbc.distributions.power_law import UniformPowerLaw
from pycbc.distributions.arbitrary import Arbitrary
from pycbc.distributions.bounded import get_param_bounds_from_config, \
VARARGS_DELIM, BoundedDist
class IndependentChiPChiEff(Arbitrary):
r"""A distribution such that :math:`\chi_{\mathrm{eff}}` and
:math:`\chi_p` are uniform and independent of each other.
To ensure constraints are applied correctly, this distribution produces all
three components of both spins as well as the component masses.
Parameters
----------
mass1 : BoundedDist, Bounds, or tuple
The distribution or bounds to use for mass1. Must be either a
BoundedDist giving the distribution on mass1, or bounds (as
either a Bounds instance or a tuple) giving the minimum and maximum
values to use for mass1. If the latter, a Uniform distribution will
be used.
mass2 : BoundedDist, Bounds, or tuple
The distribution or bounds to use for mass2. Syntax is the same as
mass1.
chi_eff : BoundedDist, Bounds, or tuple; optional
The distribution or bounds to use for :math:`chi_eff`. Syntax is the
same as mass1, except that None may also be passed. In that case,
`(-1, 1)` will be used for the bounds. Default is None.
chi_a : BoundedDist, Bounds, or tuple; optional
The distribution or bounds to use for :math:`chi_a`. Syntax is the
same as mass1, except that None may also be passed. In that case,
`(-1, 1)` will be used for the bounds. Default is None.
xi_bounds : Bounds or tuple, optional
The bounds to use for :math:`\xi_1` and :math:`\xi_2`. Must be
:math:`\in (0, 1)`. If None (the default), will be `(0, 1)`.
nsamples : int, optional
The number of samples to use for the internal kde. The larger the
number of samples, the more accurate the pdf will be, but the longer
it will take to evaluate. Default is 10000.
seed : int, optional
Seed value to use for the number generator for the kde. The current
random state of numpy will be saved prior to setting the seed. After
the samples are generated, the state will be set back to what it was.
If None provided, will use 0.
"""
name = "independent_chip_chieff"
_params = ['mass1', 'mass2', 'xi1', 'xi2', 'chi_eff', 'chi_a',
'phi_a', 'phi_s']
def __init__(self, mass1=None, mass2=None, chi_eff=None, chi_a=None,
xi_bounds=None, nsamples=None, seed=None):
if isinstance(mass1, BoundedDist):
self.mass1_distr = mass1
else:
self.mass1_distr = Uniform(mass1=mass1)
if isinstance(mass2, BoundedDist):
self.mass2_distr = mass2
else:
self.mass2_distr = Uniform(mass2=mass2)
# chi eff
if isinstance(chi_eff, BoundedDist):
self.chieff_distr = chi_eff
else:
if chi_eff is None:
chi_eff = (-1., 1.)
self.chieff_distr = Uniform(chi_eff=chi_eff)
if isinstance(chi_a, BoundedDist):
self.chia_distr = chi_a
else:
if chi_a is None:
chi_a = (-1., 1.)
self.chia_distr = Uniform(chi_a=chi_a)
# xis
if xi_bounds is None:
xi_bounds = (0, 1.)
if (xi_bounds[0] > 1. or xi_bounds[0] < 0.) or (
xi_bounds[1] > 1. or xi_bounds[1] < 0.):
raise ValueError("xi bounds must be in [0, 1)")
self.xi1_distr = UniformPowerLaw(dim=0.5, xi1=xi_bounds)
self.xi2_distr = UniformPowerLaw(dim=0.5, xi2=xi_bounds)
# the angles
self.phia_distr = UniformAngle(phi_a=(0,2))
self.phis_distr = UniformAngle(phi_s=(0,2))
self.distributions = {'mass1': self.mass1_distr,
'mass2': self.mass2_distr,
'xi1': self.xi1_distr,
'xi2': self.xi2_distr,
'chi_eff': self.chieff_distr,
'chi_a': self.chia_distr,
'phi_a': self.phia_distr,
'phi_s': self.phis_distr}
# create random variables for the kde
if nsamples is None:
nsamples = 1e4
# save the current random state
rstate = numpy.random.get_state()
# set the seed
if seed is None:
seed = 0
numpy.random.seed(seed)
rvals = self.rvs(size=int(nsamples))
# reset the random state back to what it was
numpy.random.set_state(rstate)
bounds = dict(b for distr in self.distributions.values()
for b in distr.bounds.items())
super(IndependentChiPChiEff, self).__init__(mass1=rvals['mass1'],
mass2=rvals['mass2'], xi1=rvals['xi1'], xi2=rvals['xi2'],
chi_eff=rvals['chi_eff'], chi_a=rvals['chi_a'],
phi_a=rvals['phi_a'], phi_s=rvals['phi_s'],
bounds=bounds)
def _constraints(self, values):
"""Applies physical constraints to the given parameter values.
Parameters
----------
values : {arr or dict}
A dictionary or structured array giving the values.
Returns
-------
bool
Whether or not the values satisfy physical
"""
mass1, mass2, phi_a, phi_s, chi_eff, chi_a, xi1, xi2, _ = \
conversions.ensurearray(values['mass1'], values['mass2'],
values['phi_a'], values['phi_s'],
values['chi_eff'], values['chi_a'],
values['xi1'], values['xi2'])
s1x = conversions.spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s)
s2x = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2,
xi2, phi_a, phi_s)
s1y = conversions.spin1y_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s)
s2y = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2,
xi2, phi_a, phi_s)
s1z = conversions.spin1z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2,
chi_eff, chi_a)
s2z = conversions.spin2z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2,
chi_eff, chi_a)
test = ((s1x**2. + s1y**2. + s1z**2.) < 1.) & \
((s2x**2. + s2y**2. + s2z**2.) < 1.)
return test
def __contains__(self, params):
"""Determines whether the given values are in each parameter's bounds
and satisfy the constraints.
"""
isin = all([params in dist for dist in self.distributions.values()])
if not isin:
return False
# in the individual distributions, apply constrains
return self._constraints(params)
def _draw(self, size=1, **kwargs):
"""Draws random samples without applying physical constrains.
"""
# draw masses
try:
mass1 = kwargs['mass1']
except KeyError:
mass1 = self.mass1_distr.rvs(size=size)['mass1']
try:
mass2 = kwargs['mass2']
except KeyError:
mass2 = self.mass2_distr.rvs(size=size)['mass2']
# draw angles
try:
phi_a = kwargs['phi_a']
except KeyError:
phi_a = self.phia_distr.rvs(size=size)['phi_a']
try:
phi_s = kwargs['phi_s']
except KeyError:
phi_s = self.phis_distr.rvs(size=size)['phi_s']
# draw chi_eff, chi_a
try:
chi_eff = kwargs['chi_eff']
except KeyError:
chi_eff = self.chieff_distr.rvs(size=size)['chi_eff']
try:
chi_a = kwargs['chi_a']
except KeyError:
chi_a = self.chia_distr.rvs(size=size)['chi_a']
# draw xis
try:
xi1 = kwargs['xi1']
except KeyError:
xi1 = self.xi1_distr.rvs(size=size)['xi1']
try:
xi2 = kwargs['xi2']
except KeyError:
xi2 = self.xi2_distr.rvs(size=size)['xi2']
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
arr['mass1'] = mass1
arr['mass2'] = mass2
arr['phi_a'] = phi_a
arr['phi_s'] = phi_s
arr['chi_eff'] = chi_eff
arr['chi_a'] = chi_a
arr['xi1'] = xi1
arr['xi2'] = xi2
return arr
def apply_boundary_conditions(self, **kwargs):
return kwargs
def rvs(self, size=1, **kwargs):
"""Returns random values for all of the parameters.
"""
size = int(size)
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
remaining = size
keepidx = 0
while remaining:
draws = self._draw(size=remaining, **kwargs)
mask = self._constraints(draws)
addpts = mask.sum()
arr[keepidx:keepidx+addpts] = draws[mask]
keepidx += addpts
remaining = size - keepidx
return arr
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
IndependentChiPChiEff
A distribution instance.
"""
tag = variable_args
variable_args = variable_args.split(VARARGS_DELIM)
if not set(variable_args) == set(cls._params):
raise ValueError("Not all parameters used by this distribution "
"included in tag portion of section name")
# get the bounds for the setable parameters
mass1 = get_param_bounds_from_config(cp, section, tag, 'mass1')
mass2 = get_param_bounds_from_config(cp, section, tag, 'mass2')
chi_eff = get_param_bounds_from_config(cp, section, tag, 'chi_eff')
chi_a = get_param_bounds_from_config(cp, section, tag, 'chi_a')
xi_bounds = get_param_bounds_from_config(cp, section, tag, 'xi_bounds')
if cp.has_option('-'.join([section, tag]), 'nsamples'):
nsamples = int(cp.get('-'.join([section, tag]), 'nsamples'))
else:
nsamples = None
return cls(mass1=mass1, mass2=mass2, chi_eff=chi_eff, chi_a=chi_a,
xi_bounds=xi_bounds, nsamples=nsamples)
| 12,002
| 40.247423
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/uniform.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating uniform distributions.
"""
import numpy
from pycbc.distributions import bounded
class Uniform(bounded.BoundedDist):
"""
A uniform distribution on the given parameters. The parameters are
independent of each other. Instances of this class can be called like
a function. By default, logpdf will be called, but this can be changed
by setting the class's __call__ method to its pdf method.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
Examples
--------
Create a 2 dimensional uniform distribution:
>>> from pycbc import distributions
>>> dist = distributions.Uniform(mass1=(10.,50.), mass2=(10.,50.))
Get the log of the pdf at a particular value:
>>> dist.logpdf(mass1=25., mass2=10.)
-7.3777589082278725
Do the same by calling the distribution:
>>> dist(mass1=25., mass2=10.)
-7.3777589082278725
Generate some random values:
>>> dist.rvs(size=3)
array([(36.90885758394699, 51.294212757995254),
(39.109058546060346, 13.36220145743631),
(34.49594465315212, 47.531953033719454)],
dtype=[('mass1', '<f8'), ('mass2', '<f8')])
Initialize a uniform distribution using a boundaries.Bounds instance,
with cyclic bounds:
>>> dist = distributions.Uniform(phi=Bounds(10, 50, cyclic=True))
Apply boundary conditions to a value:
>>> dist.apply_boundary_conditions(phi=60.)
{'mass1': array(20.0)}
The boundary conditions are applied to the value before evaluating the pdf;
note that the following returns a non-zero pdf. If the bounds were not
cyclic, the following would return 0:
>>> dist.pdf(phi=60.)
0.025
"""
name = 'uniform'
def __init__(self, **params):
super(Uniform, self).__init__(**params)
# compute the norm and save
# temporarily suppress numpy divide by 0 warning
with numpy.errstate(divide="ignore"):
self._lognorm = -sum([numpy.log(abs(bnd[1]-bnd[0]))
for bnd in self._bounds.values()])
self._norm = numpy.exp(self._lognorm)
@property
def norm(self):
"""float: The normalization of the multi-dimensional pdf."""
return self._norm
@property
def lognorm(self):
"""float: The log of the normalization"""
return self._lognorm
def _cdfinv_param(self, param, value):
"""Return the inverse cdf to map the unit interval to parameter bounds.
"""
lower_bound = self._bounds[param][0]
upper_bound = self._bounds[param][1]
return (upper_bound - lower_bound) * value + lower_bound
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
if kwargs in self:
return self._norm
else:
return 0.
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
if kwargs in self:
return self._lognorm
else:
return -numpy.inf
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
Uniform
A distribution instance from the pycbc.inference.prior module.
"""
return super(Uniform, cls).from_config(cp, section, variable_args,
bounds_required=True)
__all__ = ['Uniform']
| 5,286
| 33.555556
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/mass.py
|
# Copyright (C) 2021 Yifan Wang
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This modules provides classes for evaluating distributions for mchirp and
q (i.e., mass ratio) from uniform component mass.
"""
import numpy
from scipy.interpolate import interp1d
from scipy.special import hyp2f1
from pycbc.distributions import power_law
from pycbc.distributions import bounded
class MchirpfromUniformMass1Mass2(power_law.UniformPowerLaw):
r"""A distribution for chirp mass from uniform component mass +
constraints given by chirp mass. This is a special case for UniformPowerLaw
with index 1. For more details see UniformPowerLaw.
The parameters (i.e. `**params`) are independent of each other. Instances
of this class can be called like a function. By default, `logpdf` will be
called, but this can be changed by setting the class's `__call__` method
to its pdf method.
Derivation for the probability density function:
.. math::
P(m_1,m_2)dm_1dm_2 = P(\mathcal{M}_c,q)d\mathcal{M}_cdq
Where :math:`\mathcal{M}_c` is chirp mass and :math:`q` is mass ratio,
:math:`m_1` and :math:`m_2` are component masses. The jacobian to transform
chirp mass and mass ratio to component masses is
.. math::
\frac{\partial(m_1,m_2)}{\partial(\mathcal{M}_c,q)} = \
\mathcal{M}_c \left(\frac{1+q}{q^3}\right)^{2/5}
(https://github.com/gwastro/pycbc/blob/master/pycbc/transforms.py#L416.)
Because :math:`P(m_1,m_2) = const`, then
.. math::
P(\mathcal{M}_c,q) = P(\mathcal{M}_c)P(q)\propto
\mathcal{M}_c \left(\frac{1+q}{q^3}\right)^{2/5}`.
Therefore,
.. math::
P(\mathcal{M}_c) \propto \mathcal{M}_c
and
.. math::
P(q) \propto \left(\frac{1+q}{q^3}\right)^{2/5}
Examples
--------
Generate 10000 random numbers from this distribution in [5,100]
>>> from pycbc import distributions as dist
>>> minmc = 5, maxmc = 100, size = 10000
>>> mc = dist.MchirpfromUniformMass1Mass2(value=(minmc,maxmc)).rvs(size)
The settings in the configuration file for pycbc_inference should be
.. code-block:: ini
[variable_params]
mchirp =
[prior-mchirp]
name = mchirp_from_uniform_mass1_mass2
min-mchirp = 10
max-mchirp = 80
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
"""
name = "mchirp_from_uniform_mass1_mass2"
def __init__(self, dim=2, **params):
super(MchirpfromUniformMass1Mass2, self).__init__(dim=2, **params)
class QfromUniformMass1Mass2(bounded.BoundedDist):
r"""A distribution for mass ratio (i.e., q) from uniform component mass
+ constraints given by q.
The parameters (i.e. `**params`) are independent of each other. Instances
of this class can be called like a function. By default, `logpdf` will be
called, but this can be changed by setting the class's `__call__` method
to its pdf method.
For mathematical derivation see the documentation above in the class
`MchirpfromUniformMass1Mass2`.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
Examples
--------
Generate 10000 random numbers from this distribution in [1,8]
>>> from pycbc import distributions as dist
>>> minq = 1, maxq = 8, size = 10000
>>> q = dist.QfromUniformMass1Mass2(value=(minq,maxq)).rvs(size)
"""
name = 'q_from_uniform_mass1_mass2'
def __init__(self, **params):
super(QfromUniformMass1Mass2, self).__init__(**params)
self._norm = 1.0
self._lognorm = 0.0
for p in self._params:
self._norm /= self._cdf_param(p, self._bounds[p][1]) - \
self._cdf_param(p, self._bounds[p][0])
self._lognorm = numpy.log(self._norm)
@property
def norm(self):
"""float: The normalization of the multi-dimensional pdf."""
return self._norm
@property
def lognorm(self):
"""float: The log of the normalization."""
return self._lognorm
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
for p in self._params:
if p not in kwargs.keys():
raise ValueError(
'Missing parameter {} to construct pdf.'.format(p))
if kwargs in self:
pdf = self._norm * \
numpy.prod([(1.+kwargs[p])**(2./5)/kwargs[p]**(6./5)
for p in self._params])
return float(pdf)
else:
return 0.0
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
for p in self._params:
if p not in kwargs.keys():
raise ValueError(
'Missing parameter {} to construct logpdf.'.format(p))
if kwargs in self:
return numpy.log(self._pdf(**kwargs))
else:
return -numpy.inf
def _cdf_param(self, param, value):
r""">>> from sympy import *
>>> x = Symbol('x')
>>> integrate((1+x)**(2/5)/x**(6/5))
Output:
_
-0.2 |_ /-0.4, -0.2 | I*pi\
-5.0*x * | | | x*e |
2 1 \ 0.8 | /
"""
if param in self._params:
return -5. * value**(-1./5) * hyp2f1(-2./5, -1./5, 4./5, -value)
else:
raise ValueError('{} is not contructed yet.'.format(param))
def _cdfinv_param(self, param, value):
"""Return the inverse cdf to map the unit interval to parameter bounds.
Note that value should be uniform in [0,1]."""
if (numpy.array(value) < 0).any() or (numpy.array(value) > 1).any():
raise ValueError(
'q_from_uniform_m1_m2 cdfinv requires input in [0,1].')
if param in self._params:
lower_bound = self._bounds[param][0]
upper_bound = self._bounds[param][1]
q_array = numpy.linspace(
lower_bound, upper_bound, num=1000, endpoint=True)
q_invcdf_interp = interp1d(self._cdf_param(param, q_array),
q_array, kind='cubic',
bounds_error=True)
return q_invcdf_interp(
(self._cdf_param(param, upper_bound) -
self._cdf_param(param, lower_bound)) * value +
self._cdf_param(param, lower_bound))
else:
raise ValueError('{} is not contructed yet.'.format(param))
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from this distribution.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
for (p, _) in dtype:
uniformcdfvalue = numpy.random.uniform(0, 1, size=size)
arr[p] = self._cdfinv_param(p, uniformcdfvalue)
return arr
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Example:
.. code-block:: ini
[variable_params]
q =
[prior-q]
name = q_from_uniform_mass1_mass2
min-q = 1
max-q = 8
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
QfromUniformMass1Mass2
A distribution instance from the pycbc.distributions.bounded
module.
"""
return super(QfromUniformMass1Mass2, cls).from_config(
cp, section, variable_args, bounds_required=True)
__all__ = ["MchirpfromUniformMass1Mass2", "QfromUniformMass1Mass2"]
| 10,257
| 34.010239
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/bounded.py
|
# Copyright (C) 2016 Collin Capano, Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating distributions with bounds.
"""
import warnings
from configparser import Error
import numpy
from pycbc import boundaries
from pycbc import VARARGS_DELIM
#
# Distributions for priors
#
def get_param_bounds_from_config(cp, section, tag, param):
"""Gets bounds for the given parameter from a section in a config file.
Minimum and maximum values for bounds are specified by adding
`min-{param}` and `max-{param}` options, where `{param}` is the name of
the parameter. The types of boundary (open, closed, or reflected) to create
may also be specified by adding options `btype-min-{param}` and
`btype-max-{param}`. Cyclic conditions can be adding option
`cyclic-{param}`. If no `btype` arguments are provided, the
left bound will be closed and the right open.
For example, the following will create right-open bounds for parameter
`foo`:
.. code-block:: ini
[{section}-{tag}]
min-foo = -1
max-foo = 1
This would make the boundaries cyclic:
.. code-block:: ini
[{section}-{tag}]
min-foo = -1
max-foo = 1
cyclic-foo =
For more details on boundary types and their meaning, see
`boundaries.Bounds`.
If the parameter is not found in the section will just return None (in
this case, all `btype` and `cyclic` arguments are ignored for that
parameter). If bounds are specified, both a minimum and maximum must be
provided, else a Value or Type Error will be raised.
Parameters
----------
cp : ConfigParser instance
The config file.
section : str
The name of the section.
tag : str
Any tag in the section name. The full section name searched for in
the config file is `{section}(-{tag})`.
param : str
The name of the parameter to retrieve bounds for.
Returns
-------
bounds : {Bounds instance | None}
If bounds were provided, a `boundaries.Bounds` instance
representing the bounds. Otherwise, `None`.
"""
try:
minbnd = float(cp.get_opt_tag(section, 'min-'+param, tag))
except Error:
minbnd = None
try:
maxbnd = float(cp.get_opt_tag(section, 'max-'+param, tag))
except Error:
maxbnd = None
if minbnd is None and maxbnd is None:
bnds = None
elif minbnd is None or maxbnd is None:
raise ValueError("if specifying bounds for %s, " %(param) +
"you must provide both a minimum and a maximum")
else:
bndargs = {'min_bound': minbnd, 'max_bound': maxbnd}
# try to get any other conditions, if provided
try:
minbtype = cp.get_opt_tag(section, 'btype-min-{}'.format(param),
tag)
except Error:
minbtype = 'closed'
try:
maxbtype = cp.get_opt_tag(section, 'btype-max-{}'.format(param),
tag)
except Error:
maxbtype = 'open'
bndargs.update({'btype_min': minbtype, 'btype_max': maxbtype})
cyclic = cp.has_option_tag(section, 'cyclic-{}'.format(param), tag)
bndargs.update({'cyclic': cyclic})
bnds = boundaries.Bounds(**bndargs)
return bnds
def bounded_from_config(cls, cp, section, variable_args,
bounds_required=False, additional_opts=None):
"""Returns a bounded distribution based on a configuration file. The
parameters for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cls : pycbc.prior class
The class to initialize with.
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
bounds_required : {False, bool}
If True, raise a ValueError if a min and max are not provided for
every parameter. Otherwise, the prior will be initialized with the
parameter set to None. Even if bounds are not required, a
ValueError will be raised if only one bound is provided; i.e.,
either both bounds need to provided or no bounds.
additional_opts : {None, dict}
Provide additional options to be passed to the distribution class;
should be a dictionary specifying option -> value. If an option is
provided that also exists in the config file, the value provided will
be used instead of being read from the file.
Returns
-------
cls
An instance of the given class.
"""
tag = variable_args
variable_args = variable_args.split(VARARGS_DELIM)
if additional_opts is None:
additional_opts = {}
# list of args that are used to construct distribution
special_args = ["name"] + \
['min-{}'.format(arg) for arg in variable_args] + \
['max-{}'.format(arg) for arg in variable_args] + \
['btype-min-{}'.format(arg) for arg in variable_args] + \
['btype-max-{}'.format(arg) for arg in variable_args] + \
['cyclic-{}'.format(arg) for arg in variable_args] + \
list(additional_opts.keys())
# get a dict with bounds as value
dist_args = {}
for param in variable_args:
bounds = get_param_bounds_from_config(cp, section, tag, param)
if bounds_required and bounds is None:
raise ValueError("min and/or max missing for parameter %s"%(
param))
dist_args[param] = bounds
# add any additional options that user put in that section
for key in cp.options("-".join([section, tag])):
# ignore options that are already included
if key in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, key, tag)
try:
val = float(val)
except ValueError:
pass
# add option
dist_args.update({key:val})
dist_args.update(additional_opts)
# construction distribution and add to list
return cls(**dist_args)
class BoundedDist(object):
"""
A generic class for storing common properties of distributions in which
each parameter has a minimum and maximum value.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
"""
def __init__(self, **params):
# convert input bounds to Bounds class, if necessary
for param,bnds in params.items():
if bnds is None:
params[param] = boundaries.Bounds()
elif not isinstance(bnds, boundaries.Bounds):
params[param] = boundaries.Bounds(bnds[0], bnds[1])
# warn the user about reflected boundaries
if isinstance(bnds, boundaries.Bounds) and (
bnds.min.name == 'reflected' or
bnds.max.name == 'reflected'):
warnings.warn("Param {} has one or more ".format(param) +
"reflected boundaries. Reflected boundaries "
"can cause issues when used in an MCMC.")
self._bounds = params
self._params = sorted(list(params.keys()))
@property
def params(self):
"""list of strings: The list of parameter names."""
return self._params
@property
def bounds(self):
"""dict: A dictionary of the parameter names and their bounds."""
return self._bounds
def __contains__(self, params):
try:
return all(self._bounds[p].contains_conditioned(params[p])
for p in self._params)
except KeyError:
raise ValueError("must provide all parameters [%s]" %(
', '.join(self._params)))
def apply_boundary_conditions(self, **kwargs):
"""Applies any boundary conditions to the given values (e.g., applying
cyclic conditions, and/or reflecting values off of boundaries). This
is done by running `apply_conditions` of each bounds in self on the
corresponding value. See `boundaries.Bounds.apply_conditions` for
details.
Parameters
----------
\**kwargs :
The keyword args should be the name of a parameter and value to
apply its boundary conditions to. The arguments need not include
all of the parameters in self. Any unrecognized arguments are
ignored.
Returns
-------
dict
A dictionary of the parameter names and the conditioned values.
"""
return dict([[p, self._bounds[p].apply_conditions(val)]
for p,val in kwargs.items() if p in self._bounds])
def pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored. Any boundary conditions are applied to the values before the
pdf is evaluated.
"""
return self._pdf(**self.apply_boundary_conditions(**kwargs))
def _pdf(self, **kwargs):
"""The underlying pdf function called by `self.pdf`. This must be set
by any class that inherits from this class. Otherwise, a
`NotImplementedError` is raised.
"""
raise NotImplementedError("pdf function not set")
def logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params.
Unrecognized arguments are ignored. Any boundary conditions are
applied to the values before the pdf is evaluated.
"""
return self._logpdf(**self.apply_boundary_conditions(**kwargs))
def _logpdf(self, **kwargs):
"""The underlying log pdf function called by `self.logpdf`. This must
be set by any class that inherits from this class. Otherwise, a
`NotImplementedError` is raised.
"""
raise NotImplementedError("pdf function not set")
__call__ = logpdf
def _cdfinv_param(self, param, value):
"""Return the cdfinv for a single given parameter """
raise NotImplementedError("inverse cdf not set")
def cdfinv(self, **kwds):
"""Return the inverse cdf to map the unit interval to parameter bounds.
You must provide a keyword for every parameter.
"""
updated = {}
for param in self.params:
updated[param] = self._cdfinv_param(param, kwds[param])
return updated
def rvs(self, size=1, **kwds):
"Draw random value"
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
draw = {}
for param in self.params:
draw[param] = numpy.random.uniform(0, 1, size=size)
exp = self.cdfinv(**draw)
for param in self.params:
arr[param] = exp[param]
return arr
@classmethod
def from_config(cls, cp, section, variable_args, bounds_required=False):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
bounds_required : {False, bool}
If True, raise a ValueError if a min and max are not provided for
every parameter. Otherwise, the prior will be initialized with the
parameter set to None. Even if bounds are not required, a
ValueError will be raised if only one bound is provided; i.e.,
either both bounds need to provided or no bounds.
Returns
-------
BoundedDist
A distribution instance from the pycbc.distribution subpackage.
"""
return bounded_from_config(cls, cp, section, variable_args,
bounds_required=bounds_required)
| 13,606
| 37.00838
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/power_law.py
|
# Copyright (C) 2016 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating distributions where the
probability density function is a power law.
"""
import numpy
from pycbc.distributions import bounded
class UniformPowerLaw(bounded.BoundedDist):
r"""
For a uniform distribution in power law. The parameters are
independent of each other. Instances of this class can be called like
a function. By default, logpdf will be called, but this can be changed
by setting the class's __call__ method to its pdf method.
The cumulative distribution function (CDF) will be the ratio of volumes:
.. math::
F(r) = \frac{V(r)}{V(R)}
Where :math:`R` is the radius of the sphere. So we can write our
probability density function (PDF) as:
.. math::
f(r) = c r^n
For generality we use :math:`n` for the dimension of the volume element,
eg. :math:`n=2` for a 3-dimensional sphere. And use
:math:`c` as a general constant.
So now we calculate the CDF in general for this type of PDF:
.. math::
F(r) = \int f(r) dr = \int c r^n dr = \frac{1}{n + 1} c r^{n + 1} + k
Now with the definition of the CDF at radius :math:`r_{l}` is equal to 0
and at radius :math:`r_{h}` is equal to 1 we find that the constant from
integration from this system of equations:
.. math::
1 = \frac{1}{n + 1} c ((r_{h})^{n + 1} - (r_{l})^{n + 1}) + k
Can see that :math:`c = (n + 1) / ((r_{h})^{n + 1} - (r_{l})^{n + 1}))`.
And :math:`k` is:
.. math::
k = - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}}
Can see that :math:`c= \frac{n + 1}{R^{n + 1}}`. So can see that the CDF is:
.. math::
F(r) = \frac{1}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} r^{n + 1} - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}}
And the PDF is the derivative of the CDF:
.. math::
f(r) = \frac{(n + 1)}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} (r)^n
Now we use the probabilty integral transform method to get sampling on
uniform numbers from a continuous random variable. To do this we find
the inverse of the CDF evaluated for uniform numbers:
.. math::
F(r) = u = \frac{1}{(r_{h})^{n + 1} - (r_{l})^{n + 1}} r^{n + 1} - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}}
And find :math:`F^{-1}(u)` gives:
.. math::
u = \frac{1}{n + 1} \frac{(r_{h})^{n + 1} - (r_{l})^{n + 1}} - \frac{r_{l}^{n + 1}}{(r_{h})^{n + 1} - (r_{l})^{n + 1}}
And solving for :math:`r` gives:
.. math::
r = ( ((r_{h})^{n + 1} - (r_{l})^{n + 1}) u + (r_{l})^{n + 1})^{\frac{1}{n + 1}}
Therefore the radius can be sampled by taking the n-th root of uniform
numbers and multiplying by the radius offset by the lower bound radius.
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
Attributes
----------
dim : int
The dimension of volume space. In the notation above `dim`
is :math:`n+1`. For a 3-dimensional sphere this is 3.
"""
name = "uniform_power_law"
def __init__(self, dim=None, **params):
super(UniformPowerLaw, self).__init__(**params)
self.dim = dim
self._norm = 1.0
self._lognorm = 0.0
for p in self._params:
self._norm *= self.dim / \
(self._bounds[p][1]**(self.dim) -
self._bounds[p][0]**(self.dim))
self._lognorm = numpy.log(self._norm)
@property
def norm(self):
"""float: The normalization of the multi-dimensional pdf."""
return self._norm
@property
def lognorm(self):
"""float: The log of the normalization."""
return self._lognorm
def _cdfinv_param(self, param, value):
"""Return inverse of cdf to map unit interval to parameter bounds.
"""
n = self.dim - 1
r_l = self._bounds[param][0]
r_h = self._bounds[param][1]
new_value = ((r_h**(n+1) - r_l**(n+1))*value + r_l**(n+1))**(1./(n+1))
return new_value
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
for p in self._params:
if p not in kwargs.keys():
raise ValueError(
'Missing parameter {} to construct pdf.'.format(p))
if kwargs in self:
pdf = self._norm * \
numpy.prod([(kwargs[p])**(self.dim - 1)
for p in self._params])
return float(pdf)
else:
return 0.0
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
for p in self._params:
if p not in kwargs.keys():
raise ValueError(
'Missing parameter {} to construct pdf.'.format(p))
if kwargs in self:
log_pdf = self._lognorm + \
(self.dim - 1) * \
numpy.log([kwargs[p] for p in self._params]).sum()
return log_pdf
else:
return -numpy.inf
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file. The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
Uniform
A distribution instance from the pycbc.inference.prior module.
"""
return super(UniformPowerLaw, cls).from_config(cp, section,
variable_args,
bounds_required=True)
class UniformRadius(UniformPowerLaw):
""" For a uniform distribution in volume using spherical coordinates, this
is the distriubtion to use for the radius.
For more details see UniformPowerLaw.
"""
name = "uniform_radius"
def __init__(self, dim=3, **params):
super(UniformRadius, self).__init__(dim=3, **params)
__all__ = ["UniformPowerLaw", "UniformRadius"]
| 7,732
| 34.472477
| 130
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/fixedsamples.py
|
# Copyright (C) 2020 Alexander Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating distributions based on a fixed
set of points
"""
import logging
import numpy
import numpy.random
from pycbc import VARARGS_DELIM
class FixedSamples(object):
"""
A distribution consisting of a collection of a large number of fixed points.
Only these values can be drawn from, so the number of points may need to be
large to properly reflect the paramter space. This distribution is intended
to aid in using nested samplers for semi-abitrary or complicated
distributions where it is possible to provide or draw samples but less
straightforward to provide an analytic invcdf. This class numerically
approximates the invcdf for 1 or 2 dimensional distributions
(but no higher).
Parameters
----------
params :
This of parameters this distribution should use
samples : dict of arrays or FieldArray
Sampled points of the distribution. May contain transformed parameters
which are different from the original distribution. If so, an inverse
mapping is provided to associate points with other parameters provided.
"""
name = "fixed_samples"
def __init__(self, params, samples):
self.params = params
self.samples = samples
self.p1 = self.samples[params[0]]
self.frac = len(self.p1)**0.5 / len(self.p1)
self.sort = self.p1.argsort()
self.p1sorted = self.p1[self.sort]
assert len(numpy.unique(self.p1)) == len(self.p1)
if len(params) > 2:
raise ValueError("Only one or two parameters supported "
"for fixed sample distribution")
def rvs(self, size=1, **kwds):
"Draw random value"
i = numpy.random.randint(0, high=len(self.p1), size=size)
return {p: self.samples[p][i] for p in self.params}
def cdfinv(self, **original):
"""Map unit cube to parameters in the space"""
new = {}
#First dimension
u1 = original[self.params[0]]
i1 = int(round(u1 * len(self.p1)))
if i1 >= len(self.p1):
i1 = len(self.p1) - 1
if i1 < 0:
i1 = 0
new[self.params[0]] = p1v = self.p1sorted[i1]
if len(self.params) == 1:
return new
# possible second dimension, probably shouldn't
# do more dimensions than this
u2 = original[self.params[1]]
l = numpy.searchsorted(self.p1sorted, p1v * (1 - self.frac))
r = numpy.searchsorted(self.p1sorted, p1v * (1 + self.frac))
if r < l:
l, r = r, l
region = numpy.array(self.sort[l:r], ndmin=1)
p2 = self.samples[self.params[1]]
p2part = numpy.array(p2[region], ndmin=1)
l = p2part.argsort()
p2part = numpy.array(p2part[l], ndmin=1)
i2 = int(round(u2 * len(p2part)))
if i2 >= len(p2part):
i2 = len(p2part) - 1
if i2 < 0:
i2 = 0
new[self.params[1]] = p2part[i2]
p1part = numpy.array(self.p1[region[l]], ndmin=1)
new[self.params[0]] = p1part[i2]
return new
def apply_boundary_conditions(self, **params):
""" Apply boundary conditions (none here) """
return params
def __call__(self, **kwds):
""" Dummy function, not the actual pdf """
return 0
@classmethod
def from_config(cls, cp, section, tag):
""" Return instance based on config file
Return a new instance based on the config file. This will draw from
a single distribution section provided in the config file and
apply a single transformation section if desired. If a transformation
is applied, an inverse mapping is also provided for use in the config
file.
"""
from pycbc.distributions import read_distributions_from_config
from pycbc.transforms import (read_transforms_from_config,
apply_transforms, BaseTransform)
from pycbc.transforms import transforms as global_transforms
params = tag.split(VARARGS_DELIM)
subname = cp.get_opt_tag(section, 'subname', tag)
size = cp.get_opt_tag(section, 'sample-size', tag)
distsec = '{}_sample'.format(subname)
dist = read_distributions_from_config(cp, section=distsec)
if len(dist) > 1:
raise ValueError("Fixed sample distrubtion only supports a single"
" distribution to sample from.")
logging.info('Drawing samples for fixed sample distribution:%s', params)
samples = dist[0].rvs(size=int(float(size)))
samples = {p: samples[p] for p in samples.dtype.names}
transec = '{}_transform'.format(subname)
trans = read_transforms_from_config(cp, section=transec)
if len(trans) > 0:
trans = trans[0]
samples = apply_transforms(samples, [trans])
p1 = samples[params[0]]
# We have transformed parameters, so automatically provide the
# inverse transform for use in passing to waveform approximants
class Thook(BaseTransform):
name = subname
_inputs = trans.outputs
_outputs = trans.inputs
p1name = params[0]
sort = p1.argsort()
p1sorted = p1[sort]
def transform(self, maps):
idx = numpy.searchsorted(self.p1sorted, maps[self.p1name])
out = {p: samples[p][self.sort[idx]] for p in self.outputs}
return self.format_output(maps, out)
global_transforms[Thook.name] = Thook
return cls(params, samples)
__all__ = ['FixedSamples']
| 6,510
| 37.755952
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/utils.py
|
# Copyright (C) 2021 Shichao Wu
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides functions for drawing samples from a standalone .ini file
in a Python script, rather than in the command line.
"""
import numpy as np
from pycbc.types.config import InterpolatingConfigParser
from pycbc import transforms
from pycbc import distributions
def prior_from_config(cp, prior_section='prior'):
"""Loads a prior distribution from the given config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
The config file to read.
sections : list of str, optional
The sections to retrieve the prior from. If ``None`` (the default),
will look in sections starting with 'prior'.
Returns
-------
distributions.JointDistribution
The prior distribution.
"""
# Read variable and static parameters from the config file
variable_params, static_params = distributions.read_params_from_config(
cp, prior_section=prior_section, vargs_section='variable_params',
sargs_section='static_params')
# Read waveform_transforms to apply to priors from the config file
if any(cp.get_subsections('waveform_transforms')):
waveform_transforms = transforms.read_transforms_from_config(
cp, 'waveform_transforms')
else:
waveform_transforms = None
# Read constraints to apply to priors from the config file
constraints = distributions.read_constraints_from_config(
cp, transforms=waveform_transforms, static_args=static_params)
# Get PyCBC distribution instances for each variable parameter in the
# config file
dists = distributions.read_distributions_from_config(cp, prior_section)
# construct class that will return draws from the prior
return distributions.JointDistribution(variable_params, *dists,
**{"constraints": constraints})
def draw_samples_from_config(path, num=1, seed=150914):
r""" Generate sampling points from a standalone .ini file.
Parameters
----------
path : str
The path to the .ini file.
num : int
The number of samples.
seed: int
The random seed for sampling.
Returns
--------
samples : pycbc.io.record.FieldArray
The parameter values and names of sample(s).
Examples
--------
Draw a sample from the distribution defined in the .ini file:
>>> import numpy as np
>>> from pycbc.distributions.utils import draw_samples_from_config
>>> # A path to the .ini file.
>>> CONFIG_PATH = "./pycbc_bbh_prior.ini"
>>> random_seed = np.random.randint(low=0, high=2**32-1)
>>> sample = draw_samples_from_config(
>>> path=CONFIG_PATH, num=1, seed=random_seed)
>>> # Print all parameters.
>>> print(sample.fieldnames)
>>> print(sample)
>>> # Print a certain parameter, for example 'mass1'.
>>> print(sample[0]['mass1'])
"""
np.random.seed(seed)
# Initialise InterpolatingConfigParser class.
config_parser = InterpolatingConfigParser()
# Read the file
file = open(path, 'r')
config_parser.read_file(file)
file.close()
# Construct class that will draw the samples.
prior_dists = prior_from_config(cp=config_parser)
# Draw samples from prior distribution.
samples = prior_dists.rvs(size=int(num))
# Apply parameter transformation.
if any(config_parser.get_subsections('waveform_transforms')):
waveform_transforms = transforms.read_transforms_from_config(
config_parser, 'waveform_transforms')
samples = transforms.apply_transforms(samples, waveform_transforms)
return samples
| 4,617
| 33.207407
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/sky_location.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This modules provides classes for evaluating sky distributions in
right ascension and declination.
"""
import logging
import numpy
from scipy.spatial.transform import Rotation
from pycbc.distributions import angular
from pycbc import VARARGS_DELIM
from pycbc.io import FieldArray
class UniformSky(angular.UniformSolidAngle):
"""A distribution that is uniform on the sky. This is the same as
UniformSolidAngle, except that the polar angle varies from pi/2 (the north
pole) to -pi/2 (the south pole) instead of 0 to pi. Also, the default
names are "dec" (declination) for the polar angle and "ra" (right
ascension) for the azimuthal angle, instead of "theta" and "phi".
"""
name = 'uniform_sky'
_polardistcls = angular.CosAngle
_default_polar_angle = 'dec'
_default_azimuthal_angle = 'ra'
class FisherSky():
"""A distribution that returns a random angle drawn from an approximate
`Von_Mises-Fisher distribution`_. Assumes that the Fisher concentration
parameter is large, so that we can draw the samples from a simple
rotationally-invariant distribution centered at the North Pole (which
factors as a uniform distribution for the right ascension, and a Rayleigh
distribution for the declination, as described in
`Fabrycky and Winn 2009 ApJ 696 1230`) and then rotate the samples to be
centered around the specified mean position. As in UniformSky, the
declination varies from π/2 to -π/2 and the right ascension varies from
0 to 2π.
.. _Von_Mises-Fisher distribution:
http://en.wikipedia.org/wiki/Von_Mises-Fisher_distribution
.. _Fabrycky and Winn 2009 ApJ 696 1230:
https://doi.org/10.1088/0004-637X/696/2/1230
.. _Briggs et al 1999 ApJS 122 503:
https://doi.org/10.1086/313221
Parameters
----------
mean_ra: float
RA of the center of the distribution.
mean_dec: float
Declination of the center of the distribution.
sigma: float
Spread of the distribution. For the precise interpretation, see Eq 8
of `Briggs et al 1999 ApJS 122 503`_. This should be smaller than
about 20 deg for the approximation to be valid.
angle_unit: str
Unit for the angle parameters: either "deg" or "rad".
"""
name = 'fisher_sky'
_params = ['ra', 'dec']
def __init__(self, **params):
if params['angle_unit'] not in ['deg', 'rad']:
raise ValueError("Only deg or rad is allowed as angle unit")
mean_ra = params['mean_ra']
mean_dec = params['mean_dec']
sigma = params['sigma']
if params['angle_unit'] == 'deg':
mean_ra = numpy.deg2rad(mean_ra)
mean_dec = numpy.deg2rad(mean_dec)
sigma = numpy.deg2rad(sigma)
if mean_ra < 0 or mean_ra > 2 * numpy.pi:
raise ValueError(
f'The mean RA must be between 0 and 2π, {mean_ra} rad given'
)
if mean_dec < -numpy.pi/2 or mean_dec > numpy.pi/2:
raise ValueError(
'The mean declination must be between '
f'-π/2 and π/2, {mean_dec} rad given'
)
if sigma <= 0 or sigma > 2 * numpy.pi:
raise ValueError(
'Sigma must be positive and smaller than 2π '
'(preferably much smaller)'
)
if sigma > 0.35:
logging.warning(
'Warning: sigma = %s rad is probably too large for the '
'Fisher approximation to be valid', sigma
)
self.rayleigh_scale = 0.66 * sigma
# Prepare a rotation that puts the North Pole at the mean position
self.rotation = Rotation.from_euler(
'yz',
[numpy.pi / 2 - mean_dec, mean_ra]
)
@property
def params(self):
return self._params
@classmethod
def from_config(cls, cp, section, variable_args):
tag = variable_args
variable_args = variable_args.split(VARARGS_DELIM)
if set(variable_args) != set(cls._params):
raise ValueError("Not all parameters used by this distribution "
"included in tag portion of section name")
mean_ra = float(cp.get_opt_tag(section, 'mean_ra', tag))
mean_dec = float(cp.get_opt_tag(section, 'mean_dec', tag))
sigma = float(cp.get_opt_tag(section, 'sigma', tag))
angle_unit = cp.get_opt_tag(section, 'angle_unit', tag)
return cls(
mean_ra=mean_ra,
mean_dec=mean_dec,
sigma=sigma,
angle_unit=angle_unit
)
def rvs(self, size):
# Draw samples from a distribution centered on the North pole
np_ra = numpy.random.uniform(
low=0,
high=(2*numpy.pi),
size=size
)
np_dec = numpy.random.rayleigh(
scale=self.rayleigh_scale,
size=size
)
# Convert the samples to intermediate cartesian representation
np_cart = numpy.empty(shape=(size, 3))
np_cart[:, 0] = numpy.cos(np_ra) * numpy.sin(np_dec)
np_cart[:, 1] = numpy.sin(np_ra) * numpy.sin(np_dec)
np_cart[:, 2] = numpy.cos(np_dec)
# Rotate the samples according to our pre-built rotation
rot_cart = self.rotation.apply(np_cart)
# Convert the samples back to spherical coordinates.
# Some unpleasant conditional operations are needed
# to get the correct angle convention.
rot_radec = FieldArray(
size,
dtype=[
('ra', '<f8'),
('dec', '<f8')
]
)
rot_radec['ra'] = numpy.arctan2(rot_cart[:, 1], rot_cart[:, 0])
neg_mask = rot_radec['ra'] < 0
rot_radec['ra'][neg_mask] += 2 * numpy.pi
rot_radec['dec'] = numpy.arcsin(rot_cart[:, 2])
return rot_radec
__all__ = ['UniformSky', 'FisherSky']
| 6,709
| 37.342857
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/external.py
|
# Copyright (C) 2020 Alexander Nitz, 2022 Shichao Wu
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating PDF, logPDF, CDF and inverse CDF
from external arbitrary distributions, and drawing samples from them.
"""
import importlib
import numpy as np
import scipy.integrate as scipy_integrate
import scipy.interpolate as scipy_interpolate
from pycbc import VARARGS_DELIM
class External(object):
""" Distribution defined by external cdfinv and logpdf functions
To add to an inference configuration file:
.. code-block:: ini
[prior-param1+param2]
name = external
module = custom_mod
logpdf = custom_function_name
cdfinv = custom_function_name2
Or call `DistributionFunctionFromFile` in the .ini file:
.. code-block:: ini
[prior-param]
name = external_func_fromfile
module = pycbc.distributions.external
file_path = path
column_index = index
logpdf = _logpdf
cdfinv = _cdfinv
Parameters
----------
params : list
list of parameter names
custom_mod : module
module from which logpdf and cdfinv functions can be imported
logpdf : function
function which returns the logpdf
cdfinv : function
function which applies the invcdf
Examples
--------
To instantate by hand and example of function format. You must provide
the logpdf function, and you may either provide the rvs or cdfinv function.
If the cdfinv is provided, but not the rvs, the random values will
be calculated using the cdfinv function.
>>> import numpy
>>> params = ['x', 'y']
>>> def logpdf(x=None, y=None):
... p = numpy.ones(len(x))
... return p
>>>
>>> def cdfinv(**kwds):
... return kwds
>>> e = External(['x', 'y'], logpdf, cdfinv=cdfinv)
>>> e.rvs(size=10)
"""
name = "external"
def __init__(self, params=None, logpdf=None,
rvs=None, cdfinv=None, **kwds):
self.params = params
self.logpdf = logpdf
self.cdfinv = cdfinv
self._rvs = rvs
if not (rvs or cdfinv):
raise ValueError("Must provide either rvs or cdfinv")
def rvs(self, size=1, **kwds):
"Draw random value"
if self._rvs:
return self._rvs(size=size)
samples = {param: np.random.uniform(0, 1, size=size)
for param in self.params}
return self.cdfinv(**samples)
def apply_boundary_conditions(self, **params):
return params
def __call__(self, **kwds):
return self.logpdf(**kwds)
@classmethod
def from_config(cls, cp, section, variable_args):
tag = variable_args
params = variable_args.split(VARARGS_DELIM)
modulestr = cp.get_opt_tag(section, 'module', tag)
if modulestr == "pycbc.distributions.external":
file_path = cp.get_opt_tag(section, 'file_path', tag)
mod = DistributionFunctionFromFile(
file_path=file_path,
column_index=cp.get_opt_tag(section, 'column_index', tag))
else:
mod = importlib.import_module(modulestr)
logpdfstr = cp.get_opt_tag(section, 'logpdf', tag)
logpdf = getattr(mod, logpdfstr)
cdfinv = rvs = None
if cp.has_option_tag(section, 'cdfinv', tag):
cdfinvstr = cp.get_opt_tag(section, 'cdfinv', tag)
cdfinv = getattr(mod, cdfinvstr)
if cp.has_option_tag(section, 'rvs', tag):
rvsstr = cp.get_opt_tag(section, 'rvs', tag)
rvs = getattr(mod, rvsstr)
if modulestr == "pycbc.distributions.external":
return cls(params=params, file_path=file_path,
column_index=mod.column_index, rvs=rvs, cdfinv=cdfinv)
return cls(params=params, logpdf=logpdf, rvs=rvs, cdfinv=cdfinv)
class DistributionFunctionFromFile(External):
r"""Evaluating PDF, logPDF, CDF and inverse CDF from the external
density function.
Instances of this class can be called like a distribution in the .ini file,
when used with `pycbc.distributions.external.External`. Please see the
example in the `External` class.
Parameters
----------
parameter : {'file_path', 'column_index'}
The path of the external density function's .txt file, and the
column index of the density distribution. By default, the first column
should be the values of a certain parameter, such as "mass", other
columns should be the corresponding density values (as a function of
that parameter). If you add the name of the parameter in the first
row, please add the '#' at the beginning.
\**kwargs :
All other keyword args are passed to `scipy.integrate.quad` to control
the numerical accuracy of the inverse CDF.
If not be provided, will use the default values in `self.__init__`.
Notes
-----
This class is different from `pycbc.distributions.arbitrary.FromFile`,
which needs samples from the hdf file to construct the PDF by using KDE.
This class reads in any continuous functions of the parameter.
"""
name = "external_func_fromfile"
def __init__(self, params=None, file_path=None,
column_index=None, **kwargs):
if kwargs.__contains__('cdfinv'):
super().__init__(cdfinv=kwargs['cdfinv'])
else:
super().__init__(cdfinv=not None)
self.params = params
self.data = np.loadtxt(fname=file_path, unpack=True, comments='#')
self.column_index = int(column_index)
self.epsabs = kwargs.get('epsabs', 1.49e-05)
self.epsrel = kwargs.get('epsrel', 1.49e-05)
self.x_list = np.linspace(self.data[0][0], self.data[0][-1], 1000)
self.interp = {'pdf': callable, 'cdf': callable, 'cdfinv': callable}
if not file_path:
raise ValueError("Must provide the path to density function file.")
def _pdf(self, x, **kwargs):
"""Calculate and interpolate the PDF by using the given density
function, then return the corresponding value at the given x."""
if self.interp['pdf'] == callable:
func_unnorm = scipy_interpolate.interp1d(
self.data[0], self.data[self.column_index])
norm_const = scipy_integrate.quad(
func_unnorm, self.data[0][0], self.data[0][-1],
epsabs=self.epsabs, epsrel=self.epsrel, limit=500,
**kwargs)[0]
self.interp['pdf'] = scipy_interpolate.interp1d(
self.data[0], self.data[self.column_index]/norm_const)
pdf_val = np.float64(self.interp['pdf'](x))
return pdf_val
def _logpdf(self, x, **kwargs):
"""Calculate the logPDF by calling `pdf` function."""
return np.log(self._pdf(x, **kwargs))
def _cdf(self, x, **kwargs):
"""Calculate and interpolate the CDF, then return the corresponding
value at the given x."""
if self.interp['cdf'] == callable:
cdf_list = []
for x_val in self.x_list:
cdf_x = scipy_integrate.quad(
self._pdf, self.data[0][0], x_val, epsabs=self.epsabs,
epsrel=self.epsrel, limit=500, **kwargs)[0]
cdf_list.append(cdf_x)
self.interp['cdf'] = \
scipy_interpolate.interp1d(self.x_list, cdf_list)
cdf_val = np.float64(self.interp['cdf'](x))
return cdf_val
def _cdfinv(self, **kwargs):
"""Calculate and interpolate the inverse CDF, then return the
corresponding parameter value at the given CDF value."""
if self.interp['cdfinv'] == callable:
cdf_list = []
for x_value in self.x_list:
cdf_list.append(self._cdf(x_value))
self.interp['cdfinv'] = \
scipy_interpolate.interp1d(cdf_list, self.x_list)
cdfinv_val = {list(kwargs.keys())[0]: np.float64(
self.interp['cdfinv'](list(kwargs.values())[0]))}
return cdfinv_val
__all__ = ['External', 'DistributionFunctionFromFile']
| 8,901
| 37.042735
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/uniform_log.py
|
# Copyright (C) 2017 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This modules provides classes for evaluating distributions whose logarithm
are uniform.
"""
import numpy
from pycbc.distributions import uniform
class UniformLog10(uniform.Uniform):
""" A uniform distribution on the log base 10 of the given parameters.
The parameters are independent of each other. Instances of this class can
be called like a function. By default, logpdf will be called.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and their
corresponding bounds, as either tuples or a `boundaries.Bounds`
instance.
"""
name = "uniform_log10"
def __init__(self, **params):
super(UniformLog10, self).__init__(**params)
self._norm = numpy.prod([numpy.log10(bnd[1]) - numpy.log10(bnd[0])
for bnd in self._bounds.values()])
self._lognorm = numpy.log(self._norm)
def _cdfinv_param(self, param, value):
"""Return the cdfinv for a single given parameter """
lower_bound = numpy.log10(self._bounds[param][0])
upper_bound = numpy.log10(self._bounds[param][1])
return 10. ** ((upper_bound - lower_bound) * value + lower_bound)
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
if kwargs in self:
vals = numpy.array([numpy.log(10) * self._norm * kwargs[param]
for param in kwargs.keys()])
return 1.0 / numpy.prod(vals)
else:
return 0.
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
if kwargs in self:
return numpy.log(self._pdf(**kwargs))
else:
return -numpy.inf
__all__ = ["UniformLog10"]
| 2,814
| 38.647887
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/gaussian.py
|
# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating Gaussian distributions.
"""
import numpy
from scipy.special import erf, erfinv
import scipy.stats
from pycbc.distributions import bounded
class Gaussian(bounded.BoundedDist):
r"""A Gaussian distribution on the given parameters; the parameters are
independent of each other.
Bounds can be provided on each parameter, in which case the distribution
will be a truncated Gaussian distribution. The PDF of a truncated
Gaussian distribution is given by:
.. math::
p(x|a, b, \mu,\sigma) = \frac{1}{\sqrt{2 \pi \sigma^2}}\frac{e^{- \frac{\left( x - \mu \right)^2}{2 \sigma^2}}}{\Phi(b|\mu, \sigma) - \Phi(a|\mu, \sigma)},
where :math:`\mu` is the mean, :math:`\sigma^2` is the variance,
:math:`a,b` are the bounds, and :math:`\Phi` is the cumulative distribution
of an unbounded normal distribution, given by:
.. math::
\Phi(x|\mu, \sigma) = \frac{1}{2}\left[1 + \mathrm{erf}\left(\frac{x-\mu}{\sigma \sqrt{2}}\right)\right].
Note that if :math:`[a,b) = [-\infty, \infty)`, this reduces to a standard
Gaussian distribution.
Instances of this class can be called like a function. By default, logpdf
will be called, but this can be changed by setting the class's __call__
method to its pdf method.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and
(optionally) some bounds, as either a tuple or a
`boundaries.Bounds` instance. The mean and variance of each
parameter can be provided by additional keyword arguments that have
`_mean` and `_var` adding to the parameter name. For example,
`foo=(-2,10), foo_mean=3, foo_var=2` would create a truncated Gaussian
with mean 3 and variance 2, bounded between :math:`[-2, 10)`. If no
mean or variance is provided, the distribution will have 0 mean and
unit variance. If None is provided for the bounds, the distribution
will be a normal, unbounded Gaussian (equivalent to setting the bounds
to `[-inf, inf)`).
Examples
--------
Create an unbounded Gaussian distribution with zero mean and unit variance:
>>> dist = distributions.Gaussian(mass1=None)
Create a bounded Gaussian distribution on :math:`[1,10)` with a mean of 3
and a variance of 2:
>>> dist = distributions.Gaussian(mass1=(1,10), mass1_mean=3, mass1_var=2)
Create a bounded Gaussian distribution with the same parameters, but with
cyclic boundary conditions:
>>> dist = distributions.Gaussian(mass1=Bounds(1,10, cyclic=True), mass1_mean=3, mass1_var=2)
"""
name = "gaussian"
def __init__(self, **params):
# save distribution parameters as dict
# calculate the norm and exponential norm ahead of time
# and save to self._norm, self._lognorm, and self._expnorm
self._bounds = {}
self._mean = {}
self._var = {}
self._norm = {}
self._lognorm = {}
self._expnorm = {}
# pull out specified means, variance
mean_args = [p for p in params if p.endswith('_mean')]
var_args = [p for p in params if p.endswith('_var')]
self._mean = dict([[p[:-5], params.pop(p)] for p in mean_args])
self._var = dict([[p[:-4], params.pop(p)] for p in var_args])
# initialize the bounds
super(Gaussian, self).__init__(**params)
# check that there are no params in mean/var that are not in params
missing = set(self._mean.keys()) - set(params.keys())
if any(missing):
raise ValueError("means provided for unknow params {}".format(
', '.join(missing)))
missing = set(self._var.keys()) - set(params.keys())
if any(missing):
raise ValueError("vars provided for unknow params {}".format(
', '.join(missing)))
# set default mean/var for params not specified
self._mean.update(dict([[p, 0.]
for p in params if p not in self._mean]))
self._var.update(dict([[p, 1.]
for p in params if p not in self._var]))
# compute norms
for p,bnds in self._bounds.items():
sigmasq = self._var[p]
mu = self._mean[p]
a,b = bnds
invnorm = scipy.stats.norm.cdf(b, loc=mu, scale=sigmasq**0.5) \
- scipy.stats.norm.cdf(a, loc=mu, scale=sigmasq**0.5)
invnorm *= numpy.sqrt(2*numpy.pi*sigmasq)
self._norm[p] = 1./invnorm
self._lognorm[p] = numpy.log(self._norm[p])
self._expnorm[p] = -1./(2*sigmasq)
@property
def mean(self):
return self._mean
@property
def var(self):
return self._var
def _normalcdf(self, param, value):
"""The CDF of the normal distribution, without bounds."""
mu = self._mean[param]
var = self._var[param]
return 0.5*(1. + erf((value - mu)/(2*var)**0.5))
def cdf(self, param, value):
"""Returns the CDF of the given parameter value."""
a, b = self._bounds[param]
if a != -numpy.inf:
phi_a = self._normalcdf(param, a)
else:
phi_a = 0.
if b != numpy.inf:
phi_b = self._normalcdf(param, b)
else:
phi_b = 1.
phi_x = self._normalcdf(param, value)
return (phi_x - phi_a)/(phi_b - phi_a)
def _normalcdfinv(self, param, p):
"""The inverse CDF of the normal distribution, without bounds."""
mu = self._mean[param]
var = self._var[param]
return mu + (2*var)**0.5 * erfinv(2*p - 1.)
def _cdfinv_param(self, param, p):
"""Return inverse of the CDF.
"""
a, b = self._bounds[param]
if a != -numpy.inf:
phi_a = self._normalcdf(param, a)
else:
phi_a = 0.
if b != numpy.inf:
phi_b = self._normalcdf(param, b)
else:
phi_b = 1.
adjusted_p = phi_a + p * (phi_b - phi_a)
return self._normalcdfinv(param, adjusted_p)
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
return numpy.exp(self._logpdf(**kwargs))
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
if kwargs in self:
return sum([self._lognorm[p] +
self._expnorm[p]*(kwargs[p]-self._mean[p])**2.
for p in self._params])
else:
return -numpy.inf
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a Gaussian distribution based on a configuration file. The
parameters for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Boundary arguments should be provided in the same way as described in
`get_param_bounds_from_config`. In addition, the mean and variance of
each parameter can be specified by setting `{param}_mean` and
`{param}_var`, respectively. For example, the following would create a
truncated Gaussian distribution between 0 and 6.28 for a parameter
called `phi` with mean 3.14 and variance 0.5 that is cyclic:
.. code-block:: ini
[{section}-{tag}]
min-phi = 0
max-phi = 6.28
phi_mean = 3.14
phi_var = 0.5
cyclic =
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
Gaussian
A distribution instance from the pycbc.inference.prior module.
"""
return bounded.bounded_from_config(cls, cp, section, variable_args,
bounds_required=False)
__all__ = ['Gaussian']
| 9,342
| 37.767635
| 163
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/joint.py
|
# Copyright (C) 2017 Collin Capano, Christopher M. Biwer, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This module provides classes to describe joint distributions
"""
import logging
import numpy
from pycbc.io.record import FieldArray
class JointDistribution(object):
"""
Callable class that calculates the joint distribution built from a set of
distributions.
Parameters
----------
variable_args : list
A list of strings that contain the names of the variable parameters and
the order they are expected when the class is called.
\*distributions :
The rest of the arguments must be instances of distributions describing
the individual distributions on the variable parameters.
A single distribution may contain
multiple parameters. The set of all params across the distributions
(retrieved from the distributions' params attribute) must be the same
as the set of variable_args provided.
\*\*kwargs :
Valid keyword arguments include:
`constraints` : a list of functions that accept a dict of parameters
with the parameter name as the key. If the constraint is satisfied the
function should return True, if the constraint is violated, then the
function should return False.
`n_test_samples` : number of random draws used to fix pdf normalization
factor after applying constraints.
Attributes
----------
variable_args : tuple
The parameters expected when the evaluator is called.
distributions : list
The distributions for the parameters.
constraints : list
A list of functions to test if parameter values obey multi-dimensional
constraints.
Examples
--------
An example of creating a joint distribution with constraint that total mass must
be below 30.
>>> from pycbc.distributions import Uniform, JointDistribution
>>> def mtotal_lt_30(params):
... return params["mass1"] + params["mass2"] < 30
>>> mass_lim = (2, 50)
>>> uniform_prior = Uniform(mass1=mass_lim, mass2=mass_lim)
>>> prior_eval = JointDistribution(["mass1", "mass2"], uniform_prior,
... constraints=[mtotal_lt_30])
>>> print(prior_eval(mass1=20, mass2=1))
"""
name = 'joint'
def __init__(self, variable_args, *distributions, **kwargs):
# store the names of the parameters defined in the distributions
self.variable_args = tuple(variable_args)
# store the distributions
self.distributions = distributions
# store the constraints on the parameters defined inside the
# distributions list
self._constraints = kwargs["constraints"] \
if "constraints" in kwargs.keys() else []
# store kwargs
self.kwargs = kwargs
# check that all of the supplied parameters are described by the given
# distributions
distparams = set()
for dist in distributions:
distparams.update(set(dist.params))
varset = set(self.variable_args)
missing_params = distparams - varset
if missing_params:
raise ValueError("provided variable_args do not include "
"parameters %s" %(','.join(missing_params)) + " which are "
"required by the provided distributions")
extra_params = varset - distparams
if extra_params:
raise ValueError("variable_args %s " %(','.join(extra_params)) +
"are not in any of the provided distributions")
# if there are constraints then find the renormalization factor
# since a constraint will cut out part of the space
# do this by random sampling the full space and find the percent
# of samples rejected
n_test_samples = kwargs["n_test_samples"] \
if "n_test_samples" in kwargs else int(1e6)
if self._constraints:
logging.info("Renormalizing distribution for constraints")
# draw samples
samples = {}
for dist in self.distributions:
draw = dist.rvs(n_test_samples)
for param in dist.params:
samples[param] = draw[param]
samples = FieldArray.from_kwargs(**samples)
# evaluate constraints
result = self.within_constraints(samples)
# set new scaling factor for prior to be
# the fraction of acceptances in random sampling of entire space
self._pdf_scale = result.sum() / float(n_test_samples)
if self._pdf_scale == 0.0:
raise ValueError("None of the random draws for pdf "
"renormalization satisfied the constraints. "
" You can try increasing the 'n_test_samples' keyword.")
else:
self._pdf_scale = 1.0
# since Distributions will return logpdf we keep the scale factor
# in log scale as well for self.__call__
self._logpdf_scale = numpy.log(self._pdf_scale)
def apply_boundary_conditions(self, **params):
"""Applies each distributions' boundary conditions to the given list
of parameters, returning a new list with the conditions applied.
Parameters
----------
**params :
Keyword arguments should give the parameters to apply the
conditions to.
Returns
-------
dict
A dictionary of the parameters after each distribution's
`apply_boundary_conditions` function has been applied.
"""
for dist in self.distributions:
params.update(dist.apply_boundary_conditions(**params))
return params
@staticmethod
def _return_atomic(params):
"""Determines if an array or atomic value should be returned given a
set of input params.
Parameters
----------
params : dict, numpy.record, array, or FieldArray
The input to evaluate.
Returns
-------
bool :
Whether or not functions run on the parameters should be returned
as atomic types or not.
"""
if isinstance(params, dict):
return not any(isinstance(val, numpy.ndarray)
for val in params.values())
elif isinstance(params, numpy.record):
return True
elif isinstance(params, numpy.ndarray):
return False
params = params.view(type=FieldArray)
elif isinstance(params, FieldArray):
return False
else:
raise ValueError("params must be either dict, FieldArray, "
"record, or structured array")
@staticmethod
def _ensure_fieldarray(params):
"""Ensures the given params are a ``FieldArray``.
Parameters
----------
params : dict, FieldArray, numpy.record, or numpy.ndarray
If the given object is a dict, it will be converted to a
FieldArray.
Returns
-------
FieldArray
The given values as a FieldArray.
"""
if isinstance(params, dict):
return FieldArray.from_kwargs(**params)
elif isinstance(params, numpy.record):
return FieldArray.from_records(tuple(params),
names=params.dtype.names)
elif isinstance(params, numpy.ndarray):
return params.view(type=FieldArray)
elif isinstance(params, FieldArray):
return params
else:
raise ValueError("params must be either dict, FieldArray, "
"record, or structured array")
def within_constraints(self, params):
"""Evaluates whether the given parameters satisfy the constraints.
Parameters
----------
params : dict, FieldArray, numpy.record, or numpy.ndarray
The parameter values to evaluate.
Returns
-------
(array of) bool :
If params was an array, or if params a dictionary and one or more
of the parameters are arrays, will return an array of booleans.
Otherwise, a boolean.
"""
params = self._ensure_fieldarray(params)
return_atomic = self._return_atomic(params)
# convert params to a field array if it isn't one
result = numpy.ones(params.shape, dtype=bool)
for constraint in self._constraints:
result &= constraint(params)
if return_atomic:
result = result.item()
return result
def contains(self, params):
"""Evaluates whether the given parameters satisfy the boundary
conditions, boundaries, and constraints. This method is different
from `within_constraints`, that method only check the constraints.
Parameters
----------
params : dict, FieldArray, numpy.record, or numpy.ndarray
The parameter values to evaluate.
Returns
-------
(array of) bool :
If params was an array, or if params a dictionary and one or more
of the parameters are arrays, will return an array of booleans.
Otherwise, a boolean.
"""
params = self.apply_boundary_conditions(**params)
result = True
for dist in self.distributions:
param_name = dist.params[0]
contain_array = numpy.ones(len(params[param_name]), dtype=bool)
# note: enable `__contains__` in `pycbc.distributions.bounded`
# to handle array-like input, it doesn't work now.
for index, k in enumerate(params[param_name]):
contain_array[index] = {param_name: k} in dist
result &= numpy.array(contain_array)
result &= self.within_constraints(params)
return result
def __call__(self, **params):
"""Evaluate joint distribution for parameters.
"""
return_atomic = self._return_atomic(params)
# check if statisfies constraints
if len(self._constraints) != 0:
parray = self._ensure_fieldarray(params)
isin = self.within_constraints(parray)
if not isin.any():
if return_atomic:
out = -numpy.inf
else:
out = numpy.full(parray.shape, -numpy.inf)
return out
# evaluate
# note: this step may fail if arrays of values were provided, as
# not all distributions are vectorized currently
logps = numpy.array([d(**params) for d in self.distributions])
logp = logps.sum(axis=0)
if len(self._constraints) != 0:
logp += numpy.log(isin.astype(float))
if return_atomic:
logp = logp.item()
return logp - self._logpdf_scale
def rvs(self, size=1):
""" Rejection samples the parameter space.
"""
# create output FieldArray
dtype = [(arg, float) for arg in self.variable_args]
out = FieldArray(size, dtype=dtype)
# loop until enough samples accepted
remaining = size
ndraw = size
while remaining:
# scratch space for evaluating constraints
scratch = FieldArray(ndraw, dtype=dtype)
for dist in self.distributions:
# drawing samples from the distributions is generally faster
# then evaluating constrants, so we'll always draw the full
# size, even if that gives us more points than we need
draw = dist.rvs(size=ndraw)
for param in dist.params:
scratch[param] = draw[param]
# apply any constraints
keep = self.within_constraints(scratch)
nkeep = keep.sum()
kmin = size - remaining
kmax = min(nkeep, remaining)
out[kmin:kmin+kmax] = scratch[keep][:kmax]
remaining = max(0, remaining - nkeep)
# to try to speed up next go around, we'll increase the draw
# size by the fraction of values that were kept, but cap at 1e6
ndraw = int(min(1e6, ndraw * numpy.ceil(ndraw / (nkeep + 1.))))
return out
@property
def well_reflected(self):
""" Get list of which parameters are well reflected
"""
reflect = []
bounds = self.bounds
for param in bounds:
if bounds[param].reflected == 'well':
reflect.append(param)
return reflect
@property
def cyclic(self):
""" Get list of which parameters are cyclic
"""
cyclic = []
bounds = self.bounds
for param in bounds:
if bounds[param].cyclic:
cyclic.append(param)
return cyclic
@property
def bounds(self):
""" Get the dict of boundaries
"""
bnds = {}
for dist in self.distributions:
if hasattr(dist, 'bounds'):
bnds.update(dist.bounds)
return bnds
def cdfinv(self, **original):
""" Apply the inverse cdf to the array of values [0, 1]. Every
variable parameter must be given as a keyword argument.
"""
updated = {}
for dist in self.distributions:
updated.update(dist.cdfinv(**original))
return updated
| 14,281
| 37.085333
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/angular.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating angular distributions.
"""
from configparser import Error
import numpy
from pycbc import VARARGS_DELIM
from pycbc import boundaries
from pycbc.distributions import bounded
from pycbc.distributions import uniform
class UniformAngle(uniform.Uniform):
"""A uniform distribution in which the dependent variable is between
`[0,2pi)`.
The domain of the distribution may optionally be made cyclic using the
`cyclic_domain` parameter.
Bounds may be provided to limit the range for which the pdf has support.
If provided, the parameter bounds are in radians.
Parameters
----------
cyclic_domain : {False, bool}
If True, cyclic bounds on [0, 2pi) are applied to all values when
evaluating the pdf. This is done prior to any additional bounds
specified for a parameter are applied. Default is False.
\**params :
The keyword arguments should provide the names of parameters and
(optionally) their corresponding bounds, as either
`boundaries.Bounds` instances or tuples. The bounds must be
in [0,2PI). These are converted to radians for storage. None may also
be passed; in that case, the domain bounds will be used.
Notes
------
For more information, see Uniform.
"""
name = 'uniform_angle'
_domainbounds = (0, 2*numpy.pi)
def __init__(self, cyclic_domain=False, **params):
# _domain is a bounds instance used to apply cyclic conditions; this is
# applied first, before any bounds specified in the initialization
# are used
self._domain = boundaries.Bounds(self._domainbounds[0],
self._domainbounds[1], cyclic=cyclic_domain)
for p,bnds in params.items():
if bnds is None:
bnds = self._domain
elif isinstance(bnds, boundaries.Bounds):
# convert to radians
bnds._min = bnds._min.__class__(bnds._min)
bnds._max = bnds._max.__class__(bnds._max)
else:
# create a Bounds instance from the given tuple
bnds = boundaries.Bounds(bnds[0], bnds[1])
# check that the bounds are in the domain
if bnds.min < self._domain.min or bnds.max > self._domain.max:
raise ValueError("bounds must be in [{x},{y}); "
"got [{a},{b})".format(x=self._domain.min,
y=self._domain.max, a=bnds.min,
b=bnds.max))
# update
params[p] = bnds
super(UniformAngle, self).__init__(**params)
@property
def domain(self):
"""Returns the domain of the distribution."""
return self._domain
def apply_boundary_conditions(self, **kwargs):
"""Maps values to be in [0, 2pi) (the domain) first, before applying
any additional boundary conditions.
Parameters
----------
\**kwargs :
The keyword args should be the name of a parameter and value to
apply its boundary conditions to. The arguments need not include
all of the parameters in self.
Returns
-------
dict
A dictionary of the parameter names and the conditioned values.
"""
# map values to be within the domain
kwargs = dict([[p, self._domain.apply_conditions(val)]
for p,val in kwargs.items() if p in self._bounds])
# now apply additional conditions
return super(UniformAngle, self).apply_boundary_conditions(**kwargs)
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file.
The parameters for the distribution are retrieved from the section
titled "[`section`-`variable_args`]" in the config file. By default,
only the name of the distribution (`uniform_angle`) needs to be
specified. This will results in a uniform prior on `[0, 2pi)`. To
make the domain cyclic, add `cyclic_domain =`. To specify boundaries
that are not `[0, 2pi)`, add `(min|max)-var` arguments, where `var`
is the name of the variable.
For example, this will initialize a variable called `theta` with a
uniform distribution on `[0, 2pi)` without cyclic boundaries:
.. code-block:: ini
[{section}-theta]
name = uniform_angle
This will make the domain cyclic on `[0, 2pi)`:
.. code-block:: ini
[{section}-theta]
name = uniform_angle
cyclic_domain =
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
UniformAngle
A distribution instance from the pycbc.inference.prior module.
"""
# we'll retrieve the setting for cyclic_domain directly
additional_opts = {'cyclic_domain': cp.has_option_tag(section,
'cyclic_domain', variable_args)}
return bounded.bounded_from_config(cls, cp, section, variable_args,
bounds_required=False,
additional_opts=additional_opts)
class SinAngle(UniformAngle):
r"""A sine distribution; the pdf of each parameter `\theta` is given by:
..math::
p(\theta) = \frac{\sin \theta}{\cos\theta_0 - \cos\theta_1}, \theta_0 \leq \theta < \theta_1,
and 0 otherwise. Here, :math:`\theta_0, \theta_1` are the bounds of the
parameter.
The domain of this distribution is `[0, pi]`. This is accomplished by
putting hard boundaries at `[0, pi]`. Bounds may be provided to further
limit the range for which the pdf has support. As with `UniformAngle`,
these are initialized in radians.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and
(optionally) their corresponding bounds, as either
`boundaries.Bounds` instances or tuples. The bounds must be
in [0,PI]. These are converted to radians for storage. None may also
be passed; in that case, the domain bounds will be used.
"""
name = 'sin_angle'
_func = numpy.cos
_dfunc = numpy.sin
_arcfunc = numpy.arccos
_domainbounds = (0, numpy.pi)
def __init__(self, **params):
super(SinAngle, self).__init__(**params)
# replace the domain
self._domain = boundaries.Bounds(self._domainbounds[0],
self._domainbounds[1], btype_min='closed', btype_max='closed',
cyclic=False)
self._lognorm = -sum([numpy.log(
abs(self._func(bnd[1]) - self._func(bnd[0]))) \
for bnd in self._bounds.values()])
self._norm = numpy.exp(self._lognorm)
def _cdfinv_param(self, arg, value):
"""Return inverse of cdf for mapping unit interval to parameter bounds.
"""
scale = (numpy.cos(self._bounds[arg][0])
- numpy.cos(self._bounds[arg][1]))
offset = 1. + numpy.cos(self._bounds[arg][1]) / scale
new_value = numpy.arccos(-scale * (value - offset))
return new_value
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
if kwargs not in self:
return 0.
return self._norm * \
self._dfunc(numpy.array([kwargs[p] for p in self._params])).prod()
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
if kwargs not in self:
return -numpy.inf
return self._lognorm + \
numpy.log(self._dfunc(
numpy.array([kwargs[p] for p in self._params]))).sum()
class CosAngle(SinAngle):
r"""A cosine distribution. This is the same thing as a sine distribution,
but with the domain shifted to `[-pi/2, pi/2]`. See SinAngle for more
details.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and
(optionally) their corresponding bounds, as either
`boundaries.Bounds` instances or tuples. The bounds must be
in [-PI/2, PI/2].
"""
name = 'cos_angle'
_func = numpy.sin
_dfunc = numpy.cos
_arcfunc = numpy.arcsin
_domainbounds = (-numpy.pi/2, numpy.pi/2)
def _cdfinv_param(self, param, value):
a = self._bounds[param][0]
b = self._bounds[param][1]
scale = numpy.sin(b) - numpy.sin(a)
offset = 1. - numpy.sin(b)/(numpy.sin(b) - numpy.sin(a))
new_value = numpy.arcsin((value - offset) * scale)
return new_value
class UniformSolidAngle(bounded.BoundedDist):
"""A distribution that is uniform in the solid angle of a sphere. The names
of the two angluar parameters can be specified on initalization.
Parameters
----------
polar_angle : {'theta', str}
The name of the polar angle.
azimuthal_angle : {'phi', str}
The name of the azimuthal angle.
polar_bounds : {None, tuple}
Limit the polar angle to the given bounds. If None provided, the polar
angle will vary from 0 (the north pole) to pi (the south pole). The
bounds should be specified as factors of pi. For example, to limit
the distribution to the northern hemisphere, set
`polar_bounds=(0,0.5)`.
azimuthal_bounds : {None, tuple}
Limit the azimuthal angle to the given bounds. If None provided, the
azimuthal angle will vary from 0 to 2pi. The
bounds should be specified as factors of pi. For example, to limit
the distribution to the one hemisphere, set `azimuthal_bounds=(0,1)`.
azimuthal_cyclic_domain : {False, bool}
Make the domain of the azimuthal angle be cyclic; i.e., azimuthal
values are constrained to be in [0, 2pi) using cyclic boundaries prior
to applying any other boundary conditions and prior to evaluating the
pdf. Default is False.
"""
name = 'uniform_solidangle'
_polardistcls = SinAngle
_azimuthaldistcls = UniformAngle
_default_polar_angle = 'theta'
_default_azimuthal_angle = 'phi'
def __init__(self, polar_angle=None, azimuthal_angle=None,
polar_bounds=None, azimuthal_bounds=None,
azimuthal_cyclic_domain=False):
if polar_angle is None:
polar_angle = self._default_polar_angle
if azimuthal_angle is None:
azimuthal_angle = self._default_azimuthal_angle
self._polardist = self._polardistcls(**{
polar_angle: polar_bounds})
self._azimuthaldist = self._azimuthaldistcls(**{
azimuthal_angle: azimuthal_bounds,
'cyclic_domain': azimuthal_cyclic_domain})
self._polar_angle = polar_angle
self._azimuthal_angle = azimuthal_angle
self._bounds = self._polardist.bounds.copy()
self._bounds.update(self._azimuthaldist.bounds)
self._params = sorted(self._bounds.keys())
@property
def bounds(self):
"""dict: The bounds on each angle. The keys are the names of the polar
and azimuthal angles, the values are the minimum and maximum of each,
in radians. For example, if the distribution was initialized with
`polar_angle='theta', polar_bounds=(0,0.5)` then the bounds will have
`'theta': 0, 1.5707963267948966` as an entry."""
return self._bounds
@property
def polar_angle(self):
"""str: The name of the polar angle."""
return self._polar_angle
@property
def azimuthal_angle(self):
"""str: The name of the azimuthal angle."""
return self._azimuthal_angle
def _cdfinv_param(self, param, value):
""" Return the cdfinv for a single given parameter """
if param == self.polar_angle:
return self._polardist._cdfinv_param(param, value)
elif param == self.azimuthal_angle:
return self._azimuthaldist._cdfinv_param(param, value)
def apply_boundary_conditions(self, **kwargs):
"""Maps the given values to be within the domain of the azimuthal and
polar angles, before applying any other boundary conditions.
Parameters
----------
\**kwargs :
The keyword args must include values for both the azimuthal and
polar angle, using the names they were initilialized with. For
example, if `polar_angle='theta'` and `azimuthal_angle=`phi`, then
the keyword args must be `theta={val1}, phi={val2}`.
Returns
-------
dict
A dictionary of the parameter names and the conditioned values.
"""
polarval = kwargs[self._polar_angle]
azval = kwargs[self._azimuthal_angle]
# constrain each angle to its domain
polarval = self._polardist._domain.apply_conditions(polarval)
azval = self._azimuthaldist._domain.apply_conditions(azval)
# apply any other boundary conditions
polarval = self._bounds[self._polar_angle].apply_conditions(polarval)
azval = self._bounds[self._azimuthal_angle].apply_conditions(azval)
return {self._polar_angle: polarval, self._azimuthal_angle: azval}
def _pdf(self, **kwargs):
"""
Returns the pdf at the given angles.
Parameters
----------
\**kwargs:
The keyword arguments should specify the value for each angle,
using the names of the polar and azimuthal angles as the keywords.
Unrecognized arguments are ignored.
Returns
-------
float
The value of the pdf at the given values.
"""
return self._polardist._pdf(**kwargs) * \
self._azimuthaldist._pdf(**kwargs)
def _logpdf(self, **kwargs):
"""
Returns the logpdf at the given angles.
Parameters
----------
\**kwargs:
The keyword arguments should specify the value for each angle,
using the names of the polar and azimuthal angles as the keywords.
Unrecognized arguments are ignored.
Returns
-------
float
The value of the pdf at the given values.
"""
return self._polardist._logpdf(**kwargs) +\
self._azimuthaldist._logpdf(**kwargs)
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file.
The section must have the names of the polar and azimuthal angles in
the tag part of the section header. For example:
.. code-block:: ini
[prior-theta+phi]
name = uniform_solidangle
If nothing else is provided, the default names and bounds of the polar
and azimuthal angles will be used. To specify a different name for
each angle, set the `polar-angle` and `azimuthal-angle` attributes. For
example:
.. code-block:: ini
[prior-foo+bar]
name = uniform_solidangle
polar-angle = foo
azimuthal-angle = bar
Note that the names of the variable args in the tag part of the section
name must match the names of the polar and azimuthal angles.
Bounds may also be specified for each angle, as factors of pi. For
example:
.. code-block:: ini
[prior-theta+phi]
polar-angle = theta
azimuthal-angle = phi
min-theta = 0
max-theta = 0.5
This will return a distribution that is uniform in the upper
hemisphere.
By default, the domain of the azimuthal angle is `[0, 2pi)`. To make
this domain cyclic, add `azimuthal_cyclic_domain =`.
Parameters
----------
cp : ConfigParser instance
The config file.
section : str
The name of the section.
variable_args : str
The names of the parameters for this distribution, separated by
``VARARGS_DELIM``. These must appear in the "tag" part
of the section header.
Returns
-------
UniformSolidAngle
A distribution instance from the pycbc.inference.prior module.
"""
tag = variable_args
variable_args = variable_args.split(VARARGS_DELIM)
# get the variables that correspond to the polar/azimuthal angles
try:
polar_angle = cp.get_opt_tag(section, 'polar-angle', tag)
except Error:
polar_angle = cls._default_polar_angle
try:
azimuthal_angle = cp.get_opt_tag(section, 'azimuthal-angle', tag)
except Error:
azimuthal_angle = cls._default_azimuthal_angle
if polar_angle not in variable_args:
raise Error("polar-angle %s is not one of the variable args (%s)"%(
polar_angle, ', '.join(variable_args)))
if azimuthal_angle not in variable_args:
raise Error("azimuthal-angle %s is not one of the variable args "%(
azimuthal_angle) + "(%s)"%(', '.join(variable_args)))
# get the bounds, if provided
polar_bounds = bounded.get_param_bounds_from_config(
cp, section, tag,
polar_angle)
azimuthal_bounds = bounded.get_param_bounds_from_config(
cp, section, tag,
azimuthal_angle)
# see if the a cyclic domain is desired for the azimuthal angle
azimuthal_cyclic_domain = cp.has_option_tag(section,
'azimuthal_cyclic_domain', tag)
return cls(polar_angle=polar_angle, azimuthal_angle=azimuthal_angle,
polar_bounds=polar_bounds,
azimuthal_bounds=azimuthal_bounds,
azimuthal_cyclic_domain=azimuthal_cyclic_domain)
__all__ = ['UniformAngle', 'SinAngle', 'CosAngle', 'UniformSolidAngle']
| 19,645
| 37.59725
| 101
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/constraints.py
|
# Copyright (C) 2017 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating multi-dimensional constraints.
"""
import re
import scipy.spatial
import numpy
import h5py
from pycbc import transforms
from pycbc.io import record
class Constraint(object):
"""Creates a constraint that evaluates to True if parameters obey
the constraint and False if they do not.
"""
name = "custom"
def __init__(self, constraint_arg, static_args=None, transforms=None,
**kwargs):
static_args = (
{} if static_args is None
else dict(sorted(
static_args.items(), key=lambda x: len(x[0]), reverse=True))
)
for arg, val in static_args.items():
swp = f"'{val}'" if isinstance(val, str) else str(val)
# Substitute static arg name for value if it appears in the
# constraint_arg string at the beginning of a word and is not
# followed by an underscore or equals sign.
# This ensures that static_args that are also kwargs in function calls are
# handled correctly, i.e., the kwarg is not touched while its value is replaced
# with the static_arg value.
constraint_arg = re.sub(
r'\b{}(?!\_|\=)'.format(arg), swp, constraint_arg)
self.constraint_arg = constraint_arg
self.transforms = transforms
for kwarg in kwargs.keys():
setattr(self, kwarg, kwargs[kwarg])
def __call__(self, params):
"""Evaluates constraint.
"""
# cast to FieldArray
if isinstance(params, dict):
params = record.FieldArray.from_kwargs(**params)
elif not isinstance(params, record.FieldArray):
raise ValueError("params must be dict or FieldArray instance")
# try to evaluate; this will assume that all of the needed parameters
# for the constraint exists in params
try:
out = self._constraint(params)
except NameError:
# one or more needed parameters don't exist; try applying the
# transforms
params = transforms.apply_transforms(params, self.transforms) \
if self.transforms else params
out = self._constraint(params)
if isinstance(out, record.FieldArray):
out = out.item() if params.size == 1 else out
return out
def _constraint(self, params):
""" Evaluates constraint function.
"""
return params[self.constraint_arg]
class SupernovaeConvexHull(Constraint):
"""Pre defined constraint for core-collapse waveforms that checks
whether a given set of coefficients lie within the convex hull of
the coefficients of the principal component basis vectors.
"""
name = "supernovae_convex_hull"
required_parameters = ["coeff_0", "coeff_1"]
def __init__(self, constraint_arg, transforms=None, **kwargs):
super(SupernovaeConvexHull,
self).__init__(constraint_arg, transforms=transforms, **kwargs)
if 'principal_components_file' in kwargs:
pc_filename = kwargs['principal_components_file']
hull_dimention = numpy.array(kwargs['hull_dimention'])
self.hull_dimention = int(hull_dimention)
pc_file = h5py.File(pc_filename, 'r')
pc_coefficients = numpy.array(pc_file.get('coefficients'))
pc_file.close()
hull_points = []
for dim in range(self.hull_dimention):
hull_points.append(pc_coefficients[:, dim])
hull_points = numpy.array(hull_points).T
pc_coeffs_hull = scipy.spatial.Delaunay(hull_points)
self._hull = pc_coeffs_hull
def _constraint(self, params):
output_array = []
points = numpy.array([params["coeff_0"],
params["coeff_1"],
params["coeff_2"]])
for coeff_index in range(len(params["coeff_0"])):
point = points[:, coeff_index][:self.hull_dimention]
output_array.append(self._hull.find_simplex(point) >= 0)
return numpy.array(output_array)
# list of all constraints
constraints = {
Constraint.name : Constraint,
SupernovaeConvexHull.name : SupernovaeConvexHull,
}
| 5,056
| 38.818898
| 91
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/__init__.py
|
# Copyright (C) 2016 Collin Capano, Christopher M. Biwer, Alex Nitz,
# 2021 Yifan Wang, Shichao Wu
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for drawing and calculating the
probability density function of distributions.
"""
# imports needed for functions below
import configparser as _ConfigParser
from pycbc.distributions import constraints
from pycbc import VARARGS_DELIM as _VARARGS_DELIM
# Promote some classes/functions to the distributions name space
from pycbc.distributions.utils import draw_samples_from_config
from pycbc.distributions.angular import UniformAngle, SinAngle, CosAngle, \
UniformSolidAngle
from pycbc.distributions.arbitrary import Arbitrary, FromFile
from pycbc.distributions.gaussian import Gaussian
from pycbc.distributions.power_law import UniformPowerLaw, UniformRadius
from pycbc.distributions.sky_location import UniformSky, FisherSky
from pycbc.distributions.uniform import Uniform
from pycbc.distributions.uniform_log import UniformLog10
from pycbc.distributions.spins import IndependentChiPChiEff
from pycbc.distributions.qnm import UniformF0Tau
from pycbc.distributions.joint import JointDistribution
from pycbc.distributions.external import External, DistributionFunctionFromFile
from pycbc.distributions.fixedsamples import FixedSamples
from pycbc.distributions.mass import MchirpfromUniformMass1Mass2, \
QfromUniformMass1Mass2
# a dict of all available distributions
distribs = {
IndependentChiPChiEff.name : IndependentChiPChiEff,
Arbitrary.name : Arbitrary,
FromFile.name : FromFile,
Gaussian.name : Gaussian,
UniformPowerLaw.name : UniformPowerLaw,
UniformRadius.name : UniformRadius,
Uniform.name : Uniform,
UniformAngle.name : UniformAngle,
CosAngle.name : CosAngle,
SinAngle.name : SinAngle,
UniformSolidAngle.name : UniformSolidAngle,
UniformSky.name : UniformSky,
UniformLog10.name : UniformLog10,
UniformF0Tau.name : UniformF0Tau,
External.name: External,
DistributionFunctionFromFile.name: DistributionFunctionFromFile,
FixedSamples.name: FixedSamples,
MchirpfromUniformMass1Mass2.name: MchirpfromUniformMass1Mass2,
QfromUniformMass1Mass2.name: QfromUniformMass1Mass2,
FisherSky.name: FisherSky
}
def read_distributions_from_config(cp, section="prior"):
"""Returns a list of PyCBC distribution instances for a section in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"prior", string}
Prefix on section names from which to retrieve the distributions.
Returns
-------
list
A list of the parsed distributions.
"""
dists = []
variable_args = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
dist = distribs[name].from_config(cp, section, subsection)
if set(dist.params).isdisjoint(variable_args):
dists.append(dist)
variable_args += dist.params
else:
raise ValueError("Same parameter in more than one distribution.")
return dists
def _convert_liststring_to_list(lstring):
"""Checks if an argument of the configuration file is a string of a list
and returns the corresponding list (of strings).
The argument is considered to be a list if it starts with '[' and ends
with ']'. List elements should be comma separated. For example, passing
`'[foo bar, cat]'` will result in `['foo bar', 'cat']` being returned. If
the argument does not start and end with '[' and ']', the argument will
just be returned as is.
"""
if lstring[0]=='[' and lstring[-1]==']':
lstring = [str(lstring[1:-1].split(',')[n].strip().strip("'"))
for n in range(len(lstring[1:-1].split(',')))]
return lstring
def read_params_from_config(cp, prior_section='prior',
vargs_section='variable_args',
sargs_section='static_args'):
"""Loads static and variable parameters from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
prior_section : str, optional
Check that priors exist in the given section. Default is 'prior.'
vargs_section : str, optional
The section to get the parameters that will be varied/need priors
defined for them. Default is 'variable_args'.
sargs_section : str, optional
The section to get the parameters that will remain fixed. Default is
'static_args'.
Returns
-------
variable_args : list
The names of the parameters to vary in the PE run.
static_args : dict
Dictionary of names -> values giving the parameters to keep fixed.
"""
# sanity check that each parameter in [variable_args] has a priors section
variable_args = cp.options(vargs_section)
subsections = cp.get_subsections(prior_section)
tags = set([p for tag in subsections for p in tag.split('+')])
missing_prior = set(variable_args) - tags
if any(missing_prior):
raise KeyError("You are missing a priors section in the config file "
"for parameter(s): {}".format(', '.join(missing_prior)))
# sanity check that each parameter with a priors section is in
# [variable_args]
missing_variable = tags - set(variable_args)
if any(missing_variable):
raise KeyError("Prior section found for parameter(s) {} but not "
"listed as variable parameter(s)."
.format(', '.join(missing_variable)))
# get static args
try:
static_args = dict([(key, cp.get_opt_tags(sargs_section, key, []))
for key in cp.options(sargs_section)])
except _ConfigParser.NoSectionError:
static_args = {}
# sanity check that each parameter in [variable_args]
# is not repeated in [static_args]
for arg in variable_args:
if arg in static_args:
raise KeyError("Parameter {} found both in static_args and in "
"variable_args sections.".format(arg))
# try converting values to float
for key in static_args:
val = static_args[key]
try:
# the following will raise a ValueError if it cannot be cast to
# float (as we would expect for string arguments)
static_args[key] = float(val)
except ValueError:
# try converting to a list of strings; this function will just
# return val if it does not begin (end) with [ (])
static_args[key] = _convert_liststring_to_list(val)
return variable_args, static_args
def read_constraints_from_config(cp, transforms=None, static_args=None,
constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
static_args : dict, optional
Dictionary of static parameters and their values to be applied
to constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, "name", subsection)
constraint_arg = cp.get_opt_tag(
constraint_section, "constraint_arg", subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + "-" + subsection
extra_opts = [key for key in cp.options(section)
if key not in ["name", "constraint_arg"]]
for key in extra_opts:
val = cp.get(section, key)
if key == "required_parameters":
val = val.split(_VARARGS_DELIM)
else:
try:
val = float(val)
except ValueError:
pass
kwargs[key] = val
cons.append(constraints.constraints[name](
constraint_arg, static_args=static_args, transforms=transforms,
**kwargs))
return cons
| 9,321
| 40.616071
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/qnm.py
|
# Copyright (C) 2018 Miriam Cabero, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
import numpy
import pycbc
from pycbc import conversions, boundaries
from . import uniform, bounded
class UniformF0Tau(uniform.Uniform):
"""A distribution uniform in QNM frequency and damping time.
Constraints may be placed to exclude frequencies and damping times
corresponding to specific masses and spins.
To ensure a properly normalized pdf that accounts for the constraints
on final mass and spin, a renormalization factor is calculated upon
initialization. This is calculated numerically: f0 and tau are drawn
randomly, then the norm is scaled by the fraction of points that yield
final masses and spins within the constraints. The `norm_tolerance` keyword
arguments sets the error on the estimate of the norm from this numerical
method. If this value is too large, such that no points are found in
the allowed region, a ValueError is raised.
Parameters
----------
f0 : tuple or boundaries.Bounds
The range of QNM frequencies (in Hz).
tau : tuple or boundaries.Bounds
The range of QNM damping times (in s).
final_mass : tuple or boundaries.Bounds, optional
The range of final masses to allow. Default is [0,inf).
final_spin : tuple or boundaries.Bounds, optional
The range final spins to allow. Must be in [-0.996, 0.996], which is
the default.
rdfreq : str, optional
Use the given string as the name for the f0 parameter. Default is 'f0'.
damping_time : str, optional
Use the given string as the name for the tau parameter. Default is
'tau'.
norm_tolerance : float, optional
The tolerance on the estimate of the normalization. Default is 1e-3.
norm_seed : int, optional
Seed to use for the random number generator when estimating the norm.
Default is 0. After the norm is estimated, the random number generator
is set back to the state it was in upon initialization.
Examples
--------
Create a distribution:
>>> dist = UniformF0Tau(f0=(10., 2048.), tau=(1e-4,1e-2))
Check that all random samples drawn from the distribution yield final
masses > 1:
>>> from pycbc import conversions
>>> samples = dist.rvs(size=1000)
>>> (conversions.final_mass_from_f0_tau(samples['f0'],
samples['tau']) > 1.).all()
True
Create a distribution with tighter bounds on final mass and spin:
>>> dist = UniformF0Tau(f0=(10., 2048.), tau=(1e-4,1e-2),
final_mass=(20., 200.), final_spin=(0,0.996))
Check that all random samples drawn from the distribution are in the
final mass and spin constraints:
>>> samples = dist.rvs(size=1000)
>>> (conversions.final_mass_from_f0_tau(samples['f0'],
samples['tau']) >= 20.).all()
True
>>> (conversions.final_mass_from_f0_tau(samples['f0'],
samples['tau']) < 200.).all()
True
>>> (conversions.final_spin_from_f0_tau(samples['f0'],
samples['tau']) >= 0.).all()
True
>>> (conversions.final_spin_from_f0_tau(samples['f0'],
samples['tau']) < 0.996).all()
True
"""
name = 'uniform_f0_tau'
def __init__(self, f0=None, tau=None, final_mass=None, final_spin=None,
rdfreq='f0', damping_time='tau', norm_tolerance=1e-3,
norm_seed=0):
if f0 is None:
raise ValueError("must provide a range for f0")
if tau is None:
raise ValueError("must provide a range for tau")
self.rdfreq = rdfreq
self.damping_time = damping_time
parent_args = {rdfreq: f0, damping_time: tau}
super(UniformF0Tau, self).__init__(**parent_args)
if final_mass is None:
final_mass = (0., numpy.inf)
if final_spin is None:
final_spin = (-0.996, 0.996)
self.final_mass_bounds = boundaries.Bounds(
min_bound=final_mass[0], max_bound=final_mass[1])
self.final_spin_bounds = boundaries.Bounds(
min_bound=final_spin[0], max_bound=final_spin[1])
# Re-normalize to account for cuts: we'll do this by just sampling
# a large number of spaces f0 taus, and seeing how many are in the
# desired range.
# perseve the current random state
s = numpy.random.get_state()
numpy.random.seed(norm_seed)
nsamples = int(1./norm_tolerance**2)
draws = super(UniformF0Tau, self).rvs(size=nsamples)
# reset the random state
numpy.random.set_state(s)
num_in = self._constraints(draws).sum()
# if num_in is 0, than the requested tolerance is too large
if num_in == 0:
raise ValueError("the normalization is < then the norm_tolerance; "
"try again with a smaller nrom_tolerance")
self._lognorm += numpy.log(num_in) - numpy.log(nsamples)
self._norm = numpy.exp(self._lognorm)
def __contains__(self, params):
isin = super(UniformF0Tau, self).__contains__(params)
if isin:
isin &= self._constraints(params)
return isin
def _constraints(self, params):
f0 = params[self.rdfreq]
tau = params[self.damping_time]
# check if we need to specify a particular mode (l,m) != (2,2)
if re.match(r'f_\d{3}', self.rdfreq):
mode = self.rdfreq.strip('f_')
l, m = int(mode[0]), int(mode[1])
else:
l, m = 2, 2
# temporarily silence invalid warnings... these will just be ruled out
# automatically
with numpy.errstate(invalid="ignore"):
mf = conversions.final_mass_from_f0_tau(f0, tau, l=l, m=m)
sf = conversions.final_spin_from_f0_tau(f0, tau, l=l, m=m)
isin = (self.final_mass_bounds.__contains__(mf)) & (
self.final_spin_bounds.__contains__(sf))
return isin
def rvs(self, size=1):
"""Draw random samples from this distribution.
Parameters
----------
size : int, optional
The number of draws to do. Default is 1.
Returns
-------
array
A structured array of the random draws.
"""
size = int(size)
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
remaining = size
keepidx = 0
while remaining:
draws = super(UniformF0Tau, self).rvs(size=remaining)
mask = self._constraints(draws)
addpts = mask.sum()
arr[keepidx:keepidx+addpts] = draws[mask]
keepidx += addpts
remaining = size - keepidx
return arr
@classmethod
def from_config(cls, cp, section, variable_args):
"""Initialize this class from a config file.
Bounds on ``f0``, ``tau``, ``final_mass`` and ``final_spin`` should
be specified by providing ``min-{param}`` and ``max-{param}``. If
the ``f0`` or ``tau`` param should be renamed, ``rdfreq`` and
``damping_time`` should be provided; these must match
``variable_args``. If ``rdfreq`` and ``damping_time`` are not
provided, ``variable_args`` are expected to be ``f0`` and ``tau``.
Only ``min/max-f0`` and ``min/max-tau`` need to be provided.
Example:
.. code-block:: ini
[{section}-f0+tau]
name = uniform_f0_tau
min-f0 = 10
max-f0 = 2048
min-tau = 0.0001
max-tau = 0.010
min-final_mass = 10
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
WorkflowConfigParser instance to read.
section : str
The name of the section to read.
variable_args : str
The name of the variable args. These should be separated by
``pycbc.VARARGS_DELIM``.
Returns
-------
UniformF0Tau :
This class initialized with the parameters provided in the config
file.
"""
tag = variable_args
variable_args = set(variable_args.split(pycbc.VARARGS_DELIM))
# get f0 and tau
f0 = bounded.get_param_bounds_from_config(cp, section, tag, 'f0')
tau = bounded.get_param_bounds_from_config(cp, section, tag, 'tau')
# see if f0 and tau should be renamed
if cp.has_option_tag(section, 'rdfreq', tag):
rdfreq = cp.get_opt_tag(section, 'rdfreq', tag)
else:
rdfreq = 'f0'
if cp.has_option_tag(section, 'damping_time', tag):
damping_time = cp.get_opt_tag(section, 'damping_time', tag)
else:
damping_time = 'tau'
# check that they match whats in the variable args
if not variable_args == set([rdfreq, damping_time]):
raise ValueError("variable args do not match rdfreq and "
"damping_time names")
# get the final mass and spin values, if provided
final_mass = bounded.get_param_bounds_from_config(
cp, section, tag, 'final_mass')
final_spin = bounded.get_param_bounds_from_config(
cp, section, tag, 'final_spin')
extra_opts = {}
if cp.has_option_tag(section, 'norm_tolerance', tag):
extra_opts['norm_tolerance'] = float(
cp.get_opt_tag(section, 'norm_tolerance', tag))
if cp.has_option_tag(section, 'norm_seed', tag):
extra_opts['norm_seed'] = int(
cp.get_opt_tag(section, 'norm_seed', tag))
return cls(f0=f0, tau=tau,
final_mass=final_mass, final_spin=final_spin,
rdfreq=rdfreq, damping_time=damping_time,
**extra_opts)
| 10,564
| 38.569288
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/distributions/arbitrary.py
|
# Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating arbitrary distributions from
a file.
"""
import h5py
import numpy
import scipy.stats
from pycbc.distributions import bounded
import pycbc.transforms
class Arbitrary(bounded.BoundedDist):
r"""A distribution constructed from a set of parameter values using a kde.
Bounds may be optionally provided to limit the range.
Parameters
----------
bounds : dict, optional
Independent bounds on one or more parameters may be provided to limit
the range of the kde.
bandwidth : str, optional
Set the bandwidth method for the KDE. See
:py:func:`scipy.stats.gaussian_kde` for details. Default is "scott".
\**params :
The keyword arguments should provide the names of the parameters and
a list of their parameter values. If multiple parameters are provided,
a single kde will be produced with dimension equal to the number of
parameters.
"""
name = 'arbitrary'
def __init__(self, bounds=None, bandwidth="scott", **kwargs):
# initialize the bounds
if bounds is None:
bounds = {}
bounds.update({p: None for p in kwargs if p not in bounds})
super(Arbitrary, self).__init__(**bounds)
# check that all parameters specified in bounds have samples
if set(self.params) != set(kwargs.keys()):
raise ValueError("Must provide samples for all parameters given "
"in the bounds dictionary")
# if bounds are provided use logit transform to move the points
# to +/- inifinity
self._transforms = {}
self._tparams = {}
for param,bnds in self.bounds.items():
if numpy.isfinite(bnds[1] - bnds[0]):
tparam = 'logit'+param
samples = kwargs[param]
t = pycbc.transforms.Logit(param, tparam, domain=bnds)
self._transforms[tparam] = t
self._tparams[param] = tparam
# remove any sample points that fall out side of the bounds
outside = bnds.__contains__(samples)
if outside.any():
samples = samples[outside]
# transform the sample points
kwargs[param] = t.transform({param: samples})[tparam]
elif not (~numpy.isfinite(bnds[0]) and ~numpy.isfinite(bnds[1])):
raise ValueError("if specifying bounds, both bounds must "
"be finite")
# build the kde
self._kde = self.get_kde_from_arrays(*[kwargs[p] for p in self.params])
self.set_bandwidth(bandwidth)
@property
def params(self):
return self._params
@property
def kde(self):
return self._kde
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
for p in self._params:
if p not in kwargs.keys():
raise ValueError('Missing parameter {} to construct pdf.'
.format(p))
if kwargs in self:
# transform into the kde space
jacobian = 1.
for param, tparam in self._tparams.items():
t = self._transforms[tparam]
try:
samples = t.transform({param: kwargs[param]})
except ValueError as e:
# can get a value error if the value is exactly == to
# the bounds, in which case, just return 0.
if kwargs[param] in self.bounds[param]:
return 0.
else:
raise ValueError(e)
kwargs[param] = samples[tparam]
# update the jacobian for the transform; if p is the pdf
# in the params frame (the one we want) and p' is the pdf
# in the transformed frame (the one that's calculated) then:
# p = J * p', where J is the Jacobian of going from p to p'
jacobian *= t.jacobian(samples)
# for scipy < 0.15.0, gaussian_kde.pdf = gaussian_kde.evaluate
this_pdf = jacobian * self._kde.evaluate([kwargs[p]
for p in self._params])
if len(this_pdf) == 1:
return float(this_pdf)
else:
return this_pdf
else:
return 0.
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params.
Unrecognized arguments are ignored.
"""
if kwargs not in self:
return -numpy.inf
else:
return numpy.log(self._pdf(**kwargs))
def set_bandwidth(self, set_bw="scott"):
self._kde.set_bandwidth(set_bw)
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from the kde.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
size = int(size)
arr = numpy.zeros(size, dtype=dtype)
draws = self._kde.resample(size)
draws = {param: draws[ii,:] for ii,param in enumerate(self.params)}
for (param,_) in dtype:
try:
# transform back to param space
tparam = self._tparams[param]
tdraws = {tparam: draws[param]}
draws[param] = self._transforms[tparam].inverse_transform(
tdraws)[param]
except KeyError:
pass
arr[param] = draws[param]
return arr
@staticmethod
def get_kde_from_arrays(*arrays):
"""Constructs a KDE from the given arrays.
\*arrays :
Each argument should be a 1D numpy array to construct the kde from.
The resulting KDE will have dimension given by the number of
parameters.
"""
return scipy.stats.gaussian_kde(numpy.vstack(arrays))
@classmethod
def from_config(cls, cp, section, variable_args):
"""Raises a NotImplementedError; to load from a config file, use
`FromFile`.
"""
raise NotImplementedError("This class does not support loading from a "
"config file. Use `FromFile` instead.")
class FromFile(Arbitrary):
r"""A distribution that reads the values of the parameter(s) from an hdf
file, computes the kde to construct the pdf, and draws random variables
from it.
Parameters
----------
filename : str
The path to an hdf file containing the values of the parameters that
want to be used to construct the distribution. Each parameter should
be a separate dataset in the hdf file, and all datasets should have
the same size. For example, to give a prior for mass1 and mass2 from
file f, f['mass1'] and f['mass2'] contain the n values for each
parameter.
datagroup : str, optional
The name of the group to look in for the samples. For example, if
``datagroup = 'samples'``, then parameter ``param`` will be retrived
from ``f['samples'][param]``. If none provided (the default) the data
sets will be assumed to be in the top level directory of the file.
\**params :
The keyword arguments should provide the names of the parameters to be
read from the file and (optionally) their bounds. If no parameters are
provided, it will use all the parameters found in the file. To provide
bounds, specify e.g. mass1=[10,100]. Otherwise, mass1=None.
Attributes
----------
norm : float
The normalization of the multi-dimensional pdf.
lognorm : float
The log of the normalization.
kde :
The kde obtained from the values in the file.
"""
name = 'fromfile'
def __init__(self, filename=None, datagroup=None, **params):
if filename is None:
raise ValueError('A file must be specified for this distribution.')
self._filename = filename
self.datagroup = datagroup
# Get the parameter names to pass to get_kde_from_file
if len(params) == 0:
ps = None
else:
ps = list(params.keys())
param_vals, bw = self.get_arrays_from_file(filename, params=ps)
super(FromFile, self).__init__(bounds=params, bandwidth=bw,
**param_vals)
@property
def filename(self):
"""str: The path to the file containing values for the parameter(s).
"""
return self._filename
def get_arrays_from_file(self, params_file, params=None):
"""Reads the values of one or more parameters from an hdf file and
returns as a dictionary.
Parameters
----------
params_file : str
The hdf file that contains the values of the parameters.
params : {None, list}
If provided, will just retrieve the given parameter names.
Returns
-------
dict
A dictionary of the parameters mapping `param_name -> array`.
"""
try:
f = h5py.File(params_file, 'r')
except:
raise ValueError('File not found.')
if self.datagroup is not None:
get = f[self.datagroup]
else:
get = f
if params is not None:
if not isinstance(params, list):
params = [params]
for p in params:
if p not in get.keys():
raise ValueError('Parameter {} is not in {}'
.format(p, params_file))
else:
params = [str(k) for k in get.keys()]
params_values = {p: get[p][()] for p in params}
try:
bandwidth = f.attrs["bandwidth"]
except KeyError:
bandwidth = "scott"
f.close()
return params_values, bandwidth
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file.
The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
The file to construct the distribution from must be provided by setting
`filename`. Boundary arguments can be provided in the same way as
described in `get_param_bounds_from_config`.
.. code-block:: ini
[{section}-{tag}]
name = fromfile
filename = ra_prior.hdf
min-ra = 0
max-ra = 6.28
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
BoundedDist
A distribution instance from the pycbc.inference.prior module.
"""
return bounded.bounded_from_config(cls, cp, section, variable_args,
bounds_required=False)
__all__ = ['Arbitrary', 'FromFile']
| 13,126
| 37.952522
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/catalog/__init__.py
|
# Copyright (C) 2017 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This package provides information about LIGO/Virgo detections of
compact binary mergers
"""
import numpy
from . import catalog
_aliases = {}
_aliases['mchirp'] = 'chirp_mass_source'
_aliases['mass1'] = 'mass_1_source'
_aliases['mass2'] = 'mass_2_source'
_aliases['snr'] = 'network_matched_filter_snr'
_aliases['z'] = _aliases['redshift'] = 'redshift'
_aliases['distance'] = 'luminosity_distance'
class Merger(object):
"""Informaton about a specific compact binary merger"""
def __init__(self, name, source='gwtc-1'):
""" Return the information of a merger
Parameters
----------
name: str
The name (GW prefixed date) of the merger event.
"""
try:
self.data = catalog.get_source(source)[name]
except KeyError:
# Try common name
data = catalog.get_source(source)
for mname in data:
cname = data[mname]['commonName']
if cname == name:
name = mname
self.data = data[name]
break
else:
raise ValueError('Did not find merger matching'
' name: {}'.format(name))
# Set some basic params from the dataset
for key in self.data:
setattr(self, '_raw_' + key, self.data[key])
for key in _aliases:
setattr(self, key, self.data[_aliases[key]])
self.common_name = self.data['commonName']
self.time = self.data['GPS']
self.frame = 'source'
def median1d(self, name, return_errors=False):
""" Return median 1d marginalized parameters
Parameters
----------
name: str
The name of the parameter requested
return_errors: Optional, {bool, False}
If true, return a second and third parameter that represents the
lower and upper 90% error on the parameter.
Returns
-------
param: float or tuple
The requested parameter
"""
if name in _aliases:
name = _aliases[name]
try:
if return_errors:
mid = self.data[name]
high = self.data[name + '_upper']
low = self.data[name + '_lower']
return (mid, low, high)
else:
return self.data[name]
except KeyError as e:
print(e)
raise RuntimeError("Cannot get parameter {}".format(name))
def strain(self, ifo, duration=32, sample_rate=4096):
""" Return strain around the event
Currently this will return the strain around the event in the smallest
format available. Selection of other data is not yet available.
Parameters
----------
ifo: str
The name of the observatory you want strain for. Ex. H1, L1, V1
Returns
-------
strain: pycbc.types.TimeSeries
Strain around the event.
"""
from pycbc.io import get_file
from pycbc.frame import read_frame
for fdict in self.data['strain']:
if (fdict['detector'] == ifo and fdict['duration'] == duration and
fdict['sampling_rate'] == sample_rate and
fdict['format'] == 'gwf'):
url = fdict['url']
break
else:
raise ValueError('no strain data is available as requested '
'for ' + self.common_name)
ver = url.split('/')[-1].split('-')[1].split('_')[-1]
sampling_map = {4096: "4KHZ",
16384: "16KHZ"}
channel = "{}:GWOSC-{}_{}_STRAIN".format(
ifo, sampling_map[sample_rate], ver)
filename = get_file(url, cache=True)
return read_frame(str(filename), str(channel))
class Catalog(object):
"""Manage a set of binary mergers"""
def __init__(self, source='gwtc-1'):
""" Return the set of detected mergers
The set of detected mergers. At some point this may have some selection
abilities.
"""
self.data = catalog.get_source(source=source)
self.mergers = {name: Merger(name,
source=source) for name in self.data}
self.names = self.mergers.keys()
def __len__(self):
return len(self.mergers)
def __getitem__(self, key):
try:
return self.mergers[key]
except KeyError:
# Try common name
for m in self.mergers:
if key == self.mergers[m].common_name:
break
else:
raise ValueError('Did not find merger matching'
' name: {}'.format(key))
return self.mergers[m]
def __setitem__(self, key, value):
self.mergers[key] = value
def __delitem__(self, key):
del self.mergers[key]
def __iter__(self):
return iter(self.mergers)
def median1d(self, param, return_errors=False):
""" Return median 1d marginalized parameters
Parameters
----------
name: str
The name of the parameter requested
return_errors: Optional, {bool, False}
If true, return a second and third parameter that represents the
lower and upper 90% error on the parameter.
Returns
-------
param: nump.ndarray or tuple
The requested parameter
"""
v = [self.mergers[m].median1d(param, return_errors=return_errors)
for m in self.mergers]
if return_errors:
value, merror, perror = zip(*v)
return numpy.array(value), numpy.array(merror), numpy.array(perror)
else:
return numpy.array(v)
| 6,843
| 31.903846
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/catalog/catalog.py
|
# Copyright (C) 2017 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This modules contains information about the announced LIGO/Virgo
compact binary mergers
"""
import json
from pycbc.io import get_file
# For the time being all quantities are the 1-d median value
# FIXME with posteriors when available and we can just post-process that
# LVC catalogs
base_lvc_url = "https://www.gwosc.org/eventapi/jsonfull/{}/"
_catalogs = {'GWTC-1-confident': 'LVC',
'GWTC-1-marginal': 'LVC',
'Initial_LIGO_Virgo': 'LVC',
'O1_O2-Preliminary': 'LVC',
'O3_Discovery_Papers': 'LVC',
'GWTC-2': 'LVC',
'GWTC-2.1-confident': 'LVC',
'GWTC-2.1-marginal': 'LVC',
'GWTC-3-confident': 'LVC',
'GWTC-3-marginal': 'LVC'}
# add some aliases
_aliases = {}
_aliases['gwtc-1'] = 'GWTC-1-confident'
_aliases['gwtc-2'] = 'GWTC-2'
_aliases['gwtc-2.1'] = 'GWTC-2.1-confident'
_aliases['gwtc-3'] = 'GWTC-3-confident'
def list_catalogs():
"""Return a list of possible GW catalogs to query"""
return list(_catalogs.keys())
def get_source(source):
"""Get the source data for a particular GW catalog
"""
if source in _aliases:
source = _aliases[source]
if source in _catalogs:
catalog_type = _catalogs[source]
if catalog_type == 'LVC':
fname = get_file(base_lvc_url.format(source), cache=True)
data = json.load(open(fname, 'r'))
else:
raise ValueError('Unkown catalog source {}'.format(source))
return data['events']
| 2,479
| 32.513514
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/results/psd.py
|
# Copyright (C) 2022
# Tito Dal Canton, Gareth Cabourn Davies, Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate PSD figures
"""
from pycbc.results import ifo_color
from pycbc import DYN_RANGE_FAC
def generate_asd_plot(psddict, output_filename):
"""
Generate an ASD plot as used for upload to GraceDB.
Parameters
----------
psddict: dictionary
A dictionary keyed on ifo containing the PSDs as
FrequencySeries objects
output_filename: string
The filename for the plot to be saved to
Returns
-------
None
"""
from matplotlib import pyplot as plt
asd_fig, asd_ax = plt.subplots(1)
for ifo in sorted(psddict.keys()):
curr_psd = psddict[ifo]
# Can't plot log(0) so start from point 1
asd_ax.loglog(curr_psd.sample_frequencies[1:],
curr_psd[1:] ** 0.5 / DYN_RANGE_FAC,
c=ifo_color(ifo), label=ifo)
asd_ax.legend()
asd_ax.set_xlim([10, 1300])
asd_ax.set_ylim([3E-24, 1E-20])
asd_ax.set_xlabel('Frequency (Hz)')
asd_ax.set_ylabel('ASD')
asd_fig.savefig(output_filename)
__all__ = ["generate_asd_plot"]
| 2,081
| 29.173913
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/results/versioning.py
|
#!/usr/bin/python
# Copyright (C) 2015 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import subprocess
import urllib.parse
import lal, lalframe
import pycbc.version, glue.git_version
def get_library_version_info():
"""This will return a list of dictionaries containing versioning
information about the various LIGO libraries that PyCBC will use in an
analysis run."""
library_list = []
def add_info_new_version(info_dct, curr_module, extra_str):
vcs_object = getattr(curr_module, extra_str +'VCSInfo')
info_dct['ID'] = vcs_object.vcsId
info_dct['Status'] = vcs_object.vcsStatus
info_dct['Version'] = vcs_object.version
info_dct['Tag'] = vcs_object.vcsTag
info_dct['Author'] = vcs_object.vcsAuthor
info_dct['Branch'] = vcs_object.vcsBranch
info_dct['Committer'] = vcs_object.vcsCommitter
info_dct['Date'] = vcs_object.vcsDate
lalinfo = {}
lalinfo['Name'] = 'LAL'
try:
lalinfo['ID'] = lal.VCSId
lalinfo['Status'] = lal.VCSStatus
lalinfo['Version'] = lal.VCSVersion
lalinfo['Tag'] = lal.VCSTag
lalinfo['Author'] = lal.VCSAuthor
lalinfo['Branch'] = lal.VCSBranch
lalinfo['Committer'] = lal.VCSCommitter
lalinfo['Date'] = lal.VCSDate
except AttributeError:
add_info_new_version(lalinfo, lal, '')
library_list.append(lalinfo)
lalframeinfo = {}
try:
lalframeinfo['Name'] = 'LALFrame'
lalframeinfo['ID'] = lalframe.FrameVCSId
lalframeinfo['Status'] = lalframe.FrameVCSStatus
lalframeinfo['Version'] = lalframe.FrameVCSVersion
lalframeinfo['Tag'] = lalframe.FrameVCSTag
lalframeinfo['Author'] = lalframe.FrameVCSAuthor
lalframeinfo['Branch'] = lalframe.FrameVCSBranch
lalframeinfo['Committer'] = lalframe.FrameVCSCommitter
lalframeinfo['Date'] = lalframe.FrameVCSDate
except AttributeError:
add_info_new_version(lalframeinfo, lalframe, 'Frame')
library_list.append(lalframeinfo)
lalsimulationinfo = {}
lalsimulationinfo['Name'] = 'LALSimulation'
try:
import lalsimulation
lalsimulationinfo['ID'] = lalsimulation.SimulationVCSId
lalsimulationinfo['Status'] = lalsimulation.SimulationVCSStatus
lalsimulationinfo['Version'] = lalsimulation.SimulationVCSVersion
lalsimulationinfo['Tag'] = lalsimulation.SimulationVCSTag
lalsimulationinfo['Author'] = lalsimulation.SimulationVCSAuthor
lalsimulationinfo['Branch'] = lalsimulation.SimulationVCSBranch
lalsimulationinfo['Committer'] = lalsimulation.SimulationVCSCommitter
lalsimulationinfo['Date'] = lalsimulation.SimulationVCSDate
except AttributeError:
add_info_new_version(lalsimulationinfo, lalsimulation, 'Simulation')
except ImportError:
pass
library_list.append(lalsimulationinfo)
glueinfo = {}
glueinfo['Name'] = 'LSCSoft-Glue'
glueinfo['ID'] = glue.git_version.id
glueinfo['Status'] = glue.git_version.status
glueinfo['Version'] = glue.git_version.version
glueinfo['Tag'] = glue.git_version.tag
glueinfo['Author'] = glue.git_version.author
glueinfo['Builder'] = glue.git_version.builder
glueinfo['Branch'] = glue.git_version.branch
glueinfo['Committer'] = glue.git_version.committer
glueinfo['Date'] = glue.git_version.date
library_list.append(glueinfo)
pycbcinfo = {}
pycbcinfo['Name'] = 'PyCBC'
pycbcinfo['ID'] = pycbc.version.version
pycbcinfo['Status'] = pycbc.version.git_status
pycbcinfo['Version'] = pycbc.version.release or ''
pycbcinfo['Tag'] = pycbc.version.git_tag
pycbcinfo['Author'] = pycbc.version.git_author
pycbcinfo['Builder'] = pycbc.version.git_builder
pycbcinfo['Branch'] = pycbc.version.git_branch
pycbcinfo['Committer'] = pycbc.version.git_committer
pycbcinfo['Date'] = pycbc.version.git_build_date
library_list.append(pycbcinfo)
return library_list
def get_code_version_numbers(executable_names, executable_files):
"""Will extract the version information from the executables listed in
the executable section of the supplied ConfigParser object.
Returns
--------
dict
A dictionary keyed by the executable name with values giving the
version string for each executable.
"""
code_version_dict = {}
for exe_name, value in zip(executable_names, executable_files):
value = urllib.parse.urlparse(value)
logging.info("Getting version info for %s", exe_name)
version_string = None
if value.scheme in ['gsiftp', 'http', 'https']:
code_version_dict[exe_name] = "Using bundle downloaded from %s" % value
elif value.scheme == 'singularity':
txt = (
"Executable run from a singularity image. See config file "
"and site catalog for details of what image was used."
)
code_version_dict[exe_name] = txt
else:
try:
version_string = subprocess.check_output(
[value.path, '--version'],
stderr=subprocess.STDOUT
).decode()
except subprocess.CalledProcessError:
version_string = "Executable fails on {} --version"
version_string = version_string.format(value.path)
except OSError:
version_string = "Executable doesn't seem to exist(!)"
code_version_dict[exe_name] = version_string
return code_version_dict
| 6,293
| 39.606452
| 83
|
py
|
pycbc
|
pycbc-master/pycbc/results/metadata.py
|
"""
This Module contains generic utility functions for creating plots within
PyCBC.
"""
import os.path, pycbc.version
import configparser as ConfigParser
from html.parser import HTMLParser
from xml.sax.saxutils import escape, unescape
escape_table = {
'"': """,
"'": "'",
"@": "@",
}
unescape_table = {
"@" : "@",
}
for k, v in escape_table.items():
unescape_table[v] = k
def html_escape(text):
""" Sanitize text for html parsing """
return escape(text, escape_table)
class MetaParser(HTMLParser):
def __init__(self):
self.metadata = {}
HTMLParser.__init__(self)
def handle_data(self, data):
pass
def handle_starttag(self, tag, attrs):
attr= {}
for key, value in attrs:
attr[key] = value
if tag == 'div' and 'class' in attr and attr['class'] == 'pycbc-meta':
self.metadata[attr['key']] = unescape(attr['value'], unescape_table)
def save_html_with_metadata(fig, filename, fig_kwds, kwds):
""" Save a html output to file with metadata """
if isinstance(fig, str):
text = fig
else:
from mpld3 import fig_to_html
text = fig_to_html(fig, **fig_kwds)
f = open(filename, 'w')
for key, value in kwds.items():
value = escape(value, escape_table)
line = "<div class=pycbc-meta key=\"%s\" value=\"%s\"></div>" % (str(key), value)
f.write(line)
f.write(text)
def load_html_metadata(filename):
""" Get metadata from html file """
parser = MetaParser()
data = open(filename, 'r').read()
if 'pycbc-meta' in data:
print("LOADING HTML FILE %s" % filename)
parser.feed(data)
cp = ConfigParser.ConfigParser(parser.metadata)
cp.add_section(os.path.basename(filename))
return cp
def save_png_with_metadata(fig, filename, fig_kwds, kwds):
""" Save a matplotlib figure to a png with metadata
"""
from PIL import Image, PngImagePlugin
fig.savefig(filename, **fig_kwds)
im = Image.open(filename)
meta = PngImagePlugin.PngInfo()
for key in kwds:
meta.add_text(str(key), str(kwds[key]))
im.save(filename, "png", pnginfo=meta)
def save_pdf_with_metadata(fig, filename, fig_kwds, kwds):
"""Save a matplotlib figure to a PDF file with metadata.
"""
# https://stackoverflow.com/a/17462125
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages(filename) as pdfp:
fig.savefig(pdfp, format='pdf', **fig_kwds)
metadata = pdfp.infodict()
for key in kwds:
if str(key).lower() == 'title':
# map the title to the official PDF keyword (capitalized)
metadata['Title'] = str(kwds[key])
else:
metadata[str(key)] = str(kwds[key])
def load_png_metadata(filename):
from PIL import Image
data = Image.open(filename).info
cp = ConfigParser.ConfigParser(data)
cp.add_section(os.path.basename(filename))
return cp
_metadata_saver = {'.png': save_png_with_metadata,
'.html': save_html_with_metadata,
'.pdf': save_pdf_with_metadata,
}
_metadata_loader = {'.png': load_png_metadata,
'.html': load_html_metadata,
}
def save_fig_with_metadata(fig, filename, fig_kwds=None, **kwds):
""" Save plot to file with metadata included. Kewords translate to metadata
that is stored directly in the plot file. Limited format types available.
Parameters
----------
fig: matplotlib figure
The matplotlib figure to save to the file
filename: str
Name of file to store the plot.
"""
if fig_kwds is None:
fig_kwds = {}
try:
extension = os.path.splitext(filename)[1]
kwds['version'] = pycbc.version.git_verbose_msg
_metadata_saver[extension](fig, filename, fig_kwds, kwds)
except KeyError:
raise TypeError('Cannot save file %s with metadata, extension %s not '
'supported at this time' % (filename, extension))
def load_metadata_from_file(filename):
""" Load the plot related metadata saved in a file
Parameters
----------
filename: str
Name of file load metadata from.
Returns
-------
cp: ConfigParser
A configparser object containing the metadata
"""
try:
extension = os.path.splitext(filename)[1]
return _metadata_loader[extension](filename)
except KeyError:
raise TypeError('Cannot read metadata from file %s, extension %s not '
'supported at this time' % (filename, extension))
| 4,769
| 29.974026
| 89
|
py
|
pycbc
|
pycbc-master/pycbc/results/render.py
|
#!/usr/bin/python
# Copyright (C) 2015 Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path, types
import codecs
from configparser import ConfigParser
from jinja2 import Environment, FileSystemLoader
from xml.sax.saxutils import unescape
import pycbc.results
from pycbc.results import unescape_table
from pycbc.results.metadata import save_html_with_metadata
from pycbc.workflow.core import SegFile, makedir
def render_workflow_html_template(filename, subtemplate, filelists, **kwargs):
""" Writes a template given inputs from the workflow generator. Takes
a list of tuples. Each tuple is a pycbc File object. Also the name of the
subtemplate to render and the filename of the output.
"""
dirnam = os.path.dirname(filename)
makedir(dirnam)
try:
filenames = [f.name for filelist in filelists for f in filelist if f is not None]
except TypeError:
filenames = []
# render subtemplate
subtemplate_dir = pycbc.results.__path__[0] + '/templates/wells'
env = Environment(loader=FileSystemLoader(subtemplate_dir))
env.globals.update(get_embedded_config=get_embedded_config)
env.globals.update(path_exists=os.path.exists)
env.globals.update(len=len)
subtemplate = env.get_template(subtemplate)
context = {'filelists' : filelists,
'dir' : dirnam}
context.update(kwargs)
output = subtemplate.render(context)
# save as html page
kwds = {'render-function' : 'render_tmplt',
'filenames' : ','.join(filenames)}
kwds.update(kwargs)
for key in kwds:
kwds[key] = str(kwds[key])
save_html_with_metadata(str(output), filename, None, kwds)
def get_embedded_config(filename):
""" Attempt to load config data attached to file
"""
def check_option(self, section, name):
return (self.has_section(section) and
(self.has_option(section, name) or (name in self.defaults())))
try:
cp = pycbc.results.load_metadata_from_file(filename)
except TypeError:
cp = ConfigParser()
cp.check_option = types.MethodType(check_option, cp)
return cp
def setup_template_render(path, config_path):
""" This function is the gateway for rendering a template for a file.
"""
# initialization
cp = get_embedded_config(path)
output = ''
filename = os.path.basename(path)
# use meta-data if not empty for rendering
if cp.has_option(filename, 'render-function'):
render_function_name = cp.get(filename, 'render-function')
render_function = eval(render_function_name)
output = render_function(path, cp)
# read configuration file for rendering
elif os.path.exists(config_path):
cp.read(config_path)
# render template
if cp.has_option(filename, 'render-function'):
render_function_name = cp.get(filename, 'render-function')
render_function = eval(render_function_name)
output = render_function(path, cp)
else:
output = render_default(path, cp)
# if no configuration file is present
# then render the default template
else:
output = render_default(path, cp)
return output
def render_default(path, cp):
""" This is the default function that will render a template to a string of HTML. The
string will be for a drop-down tab that contains a link to the file.
If the file extension requires information to be read, then that is passed to the
content variable (eg. a segmentlistdict).
"""
# define filename and slug from path
filename = os.path.basename(path)
slug = filename.replace('.', '_')
# initializations
content = None
if path.endswith('.xml') or path.endswith('.xml.gz'):
# segment or veto file return a segmentslistdict instance
try:
wf_file = SegFile.from_segment_xml(path)
# FIXME: This is a dictionary, but the code wants a segmentlist
# for now I just coalesce.
wf_file.return_union_seglist()
except Exception as e:
print('No segment table found in %s : %s' % (path, e))
# render template
template_dir = pycbc.results.__path__[0] + '/templates/files'
env = Environment(loader=FileSystemLoader(template_dir))
env.globals.update(abs=abs)
env.globals.update(open=open)
env.globals.update(path_exists=os.path.exists)
template = env.get_template('file_default.html')
context = {'path' : path,
'filename' : filename,
'slug' : slug,
'cp' : cp,
'content' : content}
output = template.render(context)
return output
def render_glitchgram(path, cp):
""" Render a glitchgram file template.
"""
# define filename and slug from path
filename = os.path.basename(path)
slug = filename.replace('.', '_')
# render template
template_dir = pycbc.results.__path__[0] + '/templates/files'
env = Environment(loader=FileSystemLoader(template_dir))
env.globals.update(abs=abs)
template = env.get_template(cp.get(filename, 'template'))
context = {'filename' : filename,
'slug' : slug,
'cp' : cp}
output = template.render(context)
return output
def render_text(path, cp):
""" Render a file as text.
"""
# define filename and slug from path
filename = os.path.basename(path)
slug = filename.replace('.', '_')
# initializations
content = None
# read file as a string
with codecs.open(path, 'r', encoding='utf-8', errors='replace') as fp:
content = fp.read()
# replace all the escaped characters
content = unescape(content, unescape_table)
# render template
template_dir = pycbc.results.__path__[0] + '/templates/files'
env = Environment(loader=FileSystemLoader(template_dir))
env.globals.update(abs=abs)
env.globals.update(path_exists=os.path.exists)
template = env.get_template('file_pre.html')
context = {'filename' : filename,
'slug' : slug,
'cp' : cp,
'content' : content}
output = template.render(context)
return output
def render_ignore(path, cp):
""" Does not render anything.
"""
return ''
def render_tmplt(path, cp):
""" Render a file as text.
"""
# define filename and slug from path
filename = os.path.basename(path)
slug = filename.replace('.', '_')
# initializations
content = None
# read file as a string
with open(path, 'r') as fp:
content = fp.read()
# replace all the escaped characters
content = unescape(content, unescape_table)
# render template
template_dir = '/'.join(path.split('/')[:-1])
env = Environment(loader=FileSystemLoader(template_dir))
env.globals.update(setup_template_render=setup_template_render)
env.globals.update(get_embedded_config=get_embedded_config)
env.globals.update(path_exists=os.path.exists)
template = env.get_template(filename)
context = {'filename' : filename,
'slug' : slug,
'cp' : cp,
'content' : content}
output = template.render(context)
return output
| 7,996
| 31.245968
| 89
|
py
|
pycbc
|
pycbc-master/pycbc/results/color.py
|
""" Utilities for managing matplotlib colors and mapping ifos to color
"""
_ifo_color_map = {
'G1': '#222222', # dark gray
'K1': '#ffb200', # yellow/orange
'H1': '#ee0000', # red
'I1': '#b0dd8b', # light green
'L1': '#4ba6ff', # blue
'V1': '#9b59b6', # magenta/purple
}
_source_color_map = {
'BNS': '#A2C8F5', # light blue
'NSBH': '#FFB482', # light orange
'BBH': '#FE9F9B', # light red
'Mass Gap': '#8EE5A1', # light green
'GNS': '#98D6CB', # turquoise
'GG': '#79BB87', # green
'BHG': '#C6C29E' # dark khaki
}
def ifo_color(ifo):
return _ifo_color_map[ifo]
def source_color(source):
return _source_color_map[source]
| 703
| 22.466667
| 70
|
py
|
pycbc
|
pycbc-master/pycbc/results/mpld3_utils.py
|
""" This module provides functionality to extend mpld3
"""
import mpld3, mpld3.plugins, mpld3.utils
class ClickLink(mpld3.plugins.PluginBase):
"""Plugin for following a link on click"""
JAVASCRIPT = """
mpld3.register_plugin("clicklink", ClickLink);
ClickLink.prototype = Object.create(mpld3.Plugin.prototype);
ClickLink.prototype.constructor = ClickLink;
ClickLink.prototype.requiredProps = ["id"];
ClickLink.prototype.defaultProps = {
links: null
}
function ClickLink(fig, props){
mpld3.Plugin.call(this, fig, props);
};
ClickLink.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var links = this.props.links;
obj.elements().on("mousedown",
function(d, i){
window.open(links[i]);
}
);
}
"""
def __init__(self, points, links):
self.dict_ = {"type": "clicklink",
"id": mpld3.utils.get_id(points),
"links": links,
}
class MPLSlide(mpld3.plugins.PluginBase):
JAVASCRIPT = """
mpld3.Axes.prototype.zoomed = function(propagate) {
propagate = typeof propagate == "undefined" ? true : propagate;
if (propagate) {
var dt0 = this.zoom.translate()[0] - this.zoom.last_t[0];
var dt1 = this.zoom.translate()[1] - this.zoom.last_t[1];
var ds = this.zoom.scale() / this.zoom.last_s;
this.zoom_x.translate([ this.zoom_x.translate()[0] + dt0, 0 ]);
this.zoom_x.scale(this.zoom_x.scale() * ds);
this.zoom.last_t = this.zoom.translate();
this.zoom.last_s = this.zoom.scale();
this.sharex.forEach(function(ax) {
ax.zoom_x.translate(this.zoom_x.translate()).scale(this.zoom_x.scale());
}.bind(this));
this.sharex.forEach(function(ax) {
ax.zoomed(false);
});
}
for (var i = 0; i < this.elements.length; i++) {
this.elements[i].zoomed();
}
};
mpld3.ZoomPlugin = mpld3_ZoomPlugin;
mpld3.register_plugin("zoom", mpld3_ZoomPlugin);
mpld3_ZoomPlugin.prototype = Object.create(mpld3.Plugin.prototype);
mpld3_ZoomPlugin.prototype.constructor = mpld3_ZoomPlugin;
mpld3_ZoomPlugin.prototype.requiredProps = [];
mpld3_ZoomPlugin.prototype.defaultProps = {
button: true,
enabled: null
};
function mpld3_ZoomPlugin(fig, props) {
mpld3.Plugin.call(this, fig, props);
if (this.props.enabled === null) {
this.props.enabled = !this.props.button;
}
var enabled = this.props.enabled;
if (this.props.button) {
var ZoomButton = mpld3.ButtonFactory({
buttonID: "zoom",
sticky: true,
actions: [ "scroll", "drag" ],
onActivate: this.activate.bind(this),
onDeactivate: this.deactivate.bind(this),
onDraw: function() {
this.setState(enabled);
},
icon: function() {
return mpld3.icons["move"];
}
});
this.fig.buttons.push(ZoomButton);
}
}
mpld3_ZoomPlugin.prototype.activate = function() {
this.fig.enable_zoom();
};
mpld3_ZoomPlugin.prototype.deactivate = function() {
this.fig.disable_zoom();
};
mpld3_ZoomPlugin.prototype.draw = function() {
if (this.props.enabled) this.fig.enable_zoom(); else this.fig.disable_zoom();
};
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "zoom",
"button": button,
"enabled": enabled}
class Tooltip(mpld3.plugins.PointHTMLTooltip):
JAVASCRIPT = ""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
super(Tooltip, self).__init__(points, labels, hoffset, voffset, "")
class LineTooltip(mpld3.plugins.LineHTMLTooltip):
JAVASCRIPT = ""
def __init__(self, line, label=None, hoffset=0, voffset=10, css=None):
super(LineTooltip, self).__init__(line, label, hoffset, voffset, "")
| 4,810
| 38.434426
| 93
|
py
|
pycbc
|
pycbc-master/pycbc/results/layout.py
|
# Copyright (C) 2015 Alexander Harvey Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This module contains result page layout and numbering helper functions
"""
import os.path
from itertools import zip_longest
def two_column_layout(path, cols, unique='', **kwargs):
""" Make a well layout in a two column format
Parameters
----------
path: str
Location to make the well html file
unique: str
String to add to end of well name. Used if you want more than one well.
cols: list of tuples
The format of the items on the well result section. Each tuple
contains the two files that are shown in the left and right hand
side of a row in the well.html page.
"""
path = os.path.join(os.getcwd(), path, 'well{}.html'.format(unique))
from pycbc.results.render import render_workflow_html_template
render_workflow_html_template(path, 'two_column.html', cols, **kwargs)
def single_layout(path, files, **kwargs):
""" Make a well layout in single column format
path: str
Location to make the well html file
files: list of pycbc.workflow.core.Files
This list of images to show in order within the well layout html file.
"""
two_column_layout(path, [(f,) for f in files], **kwargs)
def grouper(iterable, n, fillvalue=None):
""" Group items into chunks of n length
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def group_layout(path, files, **kwargs):
""" Make a well layout in chunks of two from a list of files
path: str
Location to make the well html file
files: list of pycbc.workflow.core.Files
This list of images to show in order within the well layout html file.
Every two are placed on the same row.
"""
if len(files) > 0:
two_column_layout(path, list(grouper(files, 2)), **kwargs)
class SectionNumber(object):
""" Class to help with numbering sections in an output page.
"""
def __init__(self, base, secs):
""" Create section numbering instance
Parameters
----------
base: path
The path of the of output html results directory
secs: list of strings
List of the subsections of the output html page
"""
self.base = base
self.secs = secs
self.name = {}
self.count = {}
self.num = {}
for num, sec in enumerate(secs):
self.name[sec] = '%s._%s' % (num + 1, sec)
self.num[sec] = num
self.count[sec] = 1
def __getitem__ (self, path):
""" Return the path to use for the given subsection with numbering
included. The numbering increments for each new subsection request. If
a section is re-requested, it gets the original numbering.
"""
if path in self.name:
name = self.name[path]
else:
sec, subsec = path.split('/')
subnum = self.count[sec]
num = self.num[sec]
name = '%s/%s.%02d_%s' % (self.name[sec], num + 1, subnum, subsec)
self.count[sec] += 1
self.name[path] = name
path = os.path.join(os.getcwd(), self.base, name)
return path
| 3,937
| 35.803738
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/results/table_utils.py
|
# Copyright (C) 2014 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides functions to generate sortable html tables
"""
import mako.template, uuid
google_table_template = mako.template.Template("""
<script type='text/javascript' src='https://www.google.com/jsapi'></script>
<script type='text/javascript'>
google.load('visualization', '1', {packages:['table']});
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable();
% for type, name in column_descriptions:
data.addColumn('${str(type)}', '${str(name)}');
% endfor
data.addRows(${data});
% if format_strings is not None:
% for i, format_string in enumerate(format_strings):
% if format_string is not None:
var formatter = new google.visualization.NumberFormat({pattern:'${format_string}'});
formatter.format(data, ${i});
% endif
% endfor
% endif
var table = new google.visualization.Table(document.getElementById('${div_id}'));
table.draw(data, {showRowNumber: 'true',
page: '${page_enable}',
allowHtml: 'true',
pageSize: ${page_size}});
}
</script>
<div id='${div_id}'></div>
""")
def html_table(columns, names, page_size=None, format_strings=None):
""" Return an html table of this data
Parameters
----------
columns : list of numpy arrays
names : list of strings
The list of columns names
page_size : {int, None}, optional
The number of items to show on each page of the table
format_strings : {lists of strings, None}, optional
The ICU format string for this column, None for no formatting. All
columns must have a format string if provided.
Returns
-------
html_table : str
A str containing the html code to display a table of this data
"""
if page_size is None:
page = 'disable'
else:
page = 'enable'
div_id = uuid.uuid4()
column_descriptions = []
for column, name in zip(columns, names):
if column.dtype.kind == 'S' or column.dtype.kind == 'U':
ctype = 'string'
else:
ctype = 'number'
column_descriptions.append((ctype, name))
data = []
for item in zip(*columns):
data.append(list(item))
return google_table_template.render(div_id=div_id,
page_enable=page,
column_descriptions = column_descriptions,
page_size=page_size,
data=data,
format_strings=format_strings,
)
static_table_template = mako.template.Template("""
<table class="table">
% if titles is not None:
<tr>
% for i in range(len(titles)):
<th>
${titles[i]}
</th>
% endfor
</tr>
% endif
% for i in range(len(data)):
<tr>
% for j in range(len(data[i])):
<td>
${data[i][j]}
</td>
% endfor
</tr>
% endfor
</table>
""")
def static_table(data, titles=None):
""" Return an html tableo of this data
Parameters
----------
data : two-dimensional numpy string array
Array containing the cell values
titles : numpy array
Vector str of titles
Returns
-------
html_table : str
A string containing the html table.
"""
return static_table_template.render(data=data, titles=titles)
| 4,726
| 31.6
| 104
|
py
|
pycbc
|
pycbc-master/pycbc/results/pygrb_postprocessing_utils.py
|
# Copyright (C) 2019 Francesco Pannarale, Gino Contestabile, Cameron Mills
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# =============================================================================
# Preamble
# =============================================================================
"""
Module to generate PyGRB figures: scatter plots and timeseries.
"""
import os
import logging
import argparse
import copy
import numpy
import h5py
from scipy import stats
from pycbc.detector import Detector
# All/most of these final imports will become obsolete with hdf5 switch
try:
from ligo import segments
from ligo.lw import utils, lsctables
from ligo.lw.table import Table
from ligo.segments.utils import fromsegwizard
# Handle MultiInspiral xml-tables with glue,
# as ligo.lw no longer supports them
from glue.ligolw import lsctables as glsctables
# from glue.ligolw.ilwd import ilwdchar as gilwdchar
from glue.ligolw.ligolw import LIGOLWContentHandler
except ImportError:
pass
# =============================================================================
# Arguments functions:
# * Initialize a parser object with arguments shared by all plotting scripts
# * Add to the parser object the arguments used for Monte-Carlo on distance
# * Add to the parser object the arguments used for BestNR calculation
# * Add to the parser object the arguments for found/missed injection files
# =============================================================================
def pygrb_initialize_plot_parser(description=None, version=None):
"""Sets up a basic argument parser object for PyGRB plotting scripts"""
formatter_class = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(description=description,
formatter_class=formatter_class)
parser.add_argument("--version", action="version", version=version)
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="Verbose output")
parser.add_argument("-o", "--output-file", default=None,
help="Output file.")
parser.add_argument("--x-lims", action="store", default=None,
help="Comma separated minimum and maximum values " +
"for the horizontal axis. When using negative " +
"values an equal sign after --x-lims is necessary.")
parser.add_argument("--y-lims", action="store", default=None,
help="Comma separated minimum and maximum values " +
"for the vertical axis. When using negative values " +
"an equal sign after --y-lims is necessary.")
parser.add_argument("--use-logs", default=False, action="store_true",
help="Produce a log-log plot")
parser.add_argument("-i", "--ifo", default=None, help="IFO used for IFO " +
"specific plots")
parser.add_argument("-a", "--seg-files", nargs="+", action="store",
default=None, help="The location of the buffer, " +
"onsource and offsource segment files.")
parser.add_argument("-V", "--veto-files", nargs="+", action="store",
default=None, help="The location of the CATX veto " +
"files provided as a list of space-separated values.")
parser.add_argument("-b", "--veto-category", action="store", type=int,
default=None, help="Apply vetoes up to this level " +
"inclusive.")
parser.add_argument('--plot-title', default=None,
help="If provided, use the given string as the plot " +
"title.")
parser.add_argument('--plot-caption', default=None,
help="If provided, use the given string as the plot " +
"caption")
return parser
def pygrb_add_injmc_opts(parser):
"""Add to parser object the arguments used for Monte-Carlo on distance."""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument("-M", "--num-mc-injections", action="store",
type=int, default=100, help="Number of Monte " +
"Carlo injection simulations to perform.")
parser.add_argument("-S", "--seed", action="store", type=int,
default=1234, help="Seed to initialize Monte Carlo.")
parser.add_argument("-U", "--upper-inj-dist", action="store",
type=float, default=1000, help="The upper distance " +
"of the injections in Mpc, if used.")
parser.add_argument("-L", "--lower-inj-dist", action="store",
type=float, default=0, help="The lower distance of " +
"the injections in Mpc, if used.")
parser.add_argument("-n", "--num-bins", action="store", type=int,
default=0, help="The number of bins used to " +
"calculate injection efficiency.")
parser.add_argument("-w", "--waveform-error", action="store",
type=float, default=0, help="The standard deviation " +
"to use when calculating the waveform error.")
for ifo in ["g1", "h1", "k1", "l1", "v1"]:
parser.add_argument(f"--{ifo}-cal-error", action="store", type=float,
default=0, help="The standard deviation to use " +
f"when calculating the {ifo.upper()} " +
"calibration amplitude error.")
parser.add_argument(f"--{ifo}-dc-cal-error", action="store",
type=float, default=1.0, help="The scaling " +
"factor to use when calculating the " +
f"{ifo.upper()} calibration amplitude error.")
def pygrb_add_bestnr_opts(parser):
"""Add to the parser object the arguments used for BestNR calculation."""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument("-Q", "--chisq-index", action="store", type=float,
default=6.0, help="chisq_index for newSNR calculation")
parser.add_argument("-N", "--chisq-nhigh", action="store", type=float,
default=2.0, help="nhigh for newSNR calculation")
parser.add_argument("-B", "--sngl-snr-threshold", action="store",
type=float, default=4.0, help="Single detector SNR " +
"threshold, the two most sensitive detectors " +
"should have SNR above this.")
parser.add_argument("-d", "--snr-threshold", action="store", type=float,
default=6.0, help="SNR threshold for recording " +
"triggers.")
parser.add_argument("-c", "--newsnr-threshold", action="store", type=float,
default=6.0, help="NewSNR threshold for " +
"calculating the chisq of triggers (based on value " +
"of auto and bank chisq values. By default will " +
"take the same value as snr-threshold.")
parser.add_argument("-A", "--null-snr-threshold", action="store",
default="3.5,5.25",
help="Comma separated lower,higher null SNR " +
"threshold for null SNR cut")
parser.add_argument("-T", "--null-grad-thresh", action="store", type=float,
default=20., help="Threshold above which to " +
"increase the values of the null SNR cut")
parser.add_argument("-D", "--null-grad-val", action="store", type=float,
default=0.2, help="Rate the null SNR cut will " +
"increase above the threshold")
# =============================================================================
# Wrapper to read segments files
# =============================================================================
def _read_seg_files(seg_files):
"""Read segments txt files"""
if len(seg_files) != 3:
err_msg = "The location of three segment files is necessary."
err_msg += "[bufferSeg.txt, offSourceSeg.txt, onSourceSeg.txt]"
raise RuntimeError(err_msg)
times = {}
keys = ["buffer", "off", "on"]
for key, seg_file in zip(keys, seg_files):
segs = fromsegwizard(open(seg_file, 'r'))
if len(segs) > 1:
err_msg = 'More than one segment, an error has occured.'
raise RuntimeError(err_msg)
times[key] = segs[0]
return times
# =============================================================================
# Function to load a table from an xml file
# =============================================================================
def load_xml_table(file_name, table_name):
"""Load xml table from file."""
xml_doc = utils.load_filename(
file_name,
compress='auto',
contenthandler=glsctables.use_in(LIGOLWContentHandler)
)
return Table.get_table(xml_doc, table_name)
# ==============================================================================
# Function to load segments from an xml file
# ==============================================================================
def _load_segments_from_xml(xml_doc, return_dict=False, select_id=None):
"""Read a ligo.segments.segmentlist from the file object file containing an
xml segment table.
Parameters
----------
xml_doc: name of segment xml file
Keyword Arguments:
return_dict : [ True | False ]
return a ligo.segments.segmentlistdict containing coalesced
ligo.segments.segmentlists keyed by seg_def.name for each entry
in the contained segment_def_table. Default False
select_id : int
return a ligo.segments.segmentlist object containing only
those segments matching the given segment_def_id integer
"""
# Load SegmentDefTable and SegmentTable
seg_def_table = load_xml_table(xml_doc,
glsctables.SegmentDefTable.tableName)
seg_table = load_xml_table(xml_doc, glsctables.SegmentTable.tableName)
if return_dict:
segs = segments.segmentlistdict()
else:
segs = segments.segmentlist()
seg_id = {}
for seg_def in seg_def_table:
seg_id[int(seg_def.segment_def_id)] = str(seg_def.name)
if return_dict:
segs[str(seg_def.name)] = segments.segmentlist()
for seg in seg_table:
if return_dict:
segs[seg_id[int(seg.segment_def_id)]]\
.append(segments.segment(seg.start_time, seg.end_time))
continue
if select_id and int(seg.segment_def_id) == select_id:
segs.append(segments.segment(seg.start_time, seg.end_time))
continue
segs.append(segments.segment(seg.start_time, seg.end_time))
if return_dict:
for seg_name in seg_id.values():
segs[seg_name] = segs[seg_name].coalesce()
else:
segs = segs.coalesce()
return segs
# =============================================================================
# Function to extract vetoes
# =============================================================================
def _extract_vetoes(all_veto_files, ifos, veto_cat):
"""Extracts vetoes from veto filelist"""
if all_veto_files and (veto_cat is None):
err_msg = "Must supply veto category to apply vetoes."
raise RuntimeError(err_msg)
# Initialize veto containers
vetoes = segments.segmentlistdict()
for ifo in ifos:
vetoes[ifo] = segments.segmentlist()
veto_files = []
veto_cats = range(2, veto_cat+1)
for cat in veto_cats:
veto_files += [vf for vf in all_veto_files if "CAT"+str(cat) in vf]
n_found = len(veto_files)
n_expected = len(ifos)*len(veto_cats)
if n_found != n_expected:
err_msg = f"Found {n_found} veto files instead of the expected "
err_msg += f"{n_expected}; check the options."
raise RuntimeError(err_msg)
# Construct veto list from veto filelist
if veto_files:
for veto_file in veto_files:
ifo = os.path.basename(veto_file)[:2]
if ifo in ifos:
# This returns a coalesced list of the vetoes
tmp_veto_segs = _load_segments_from_xml(veto_file)
for entry in tmp_veto_segs:
vetoes[ifo].append(entry)
for ifo in ifos:
vetoes[ifo].coalesce()
return vetoes
# =============================================================================
# Function to get the ID numbers from a LIGO-LW table
# =============================================================================
def _get_id_numbers(ligolw_table, column):
"""Grab the IDs of a LIGO-LW table"""
ids = [int(getattr(row, column)) for row in ligolw_table]
return ids
# =============================================================================
# Function to build a dictionary (indexed by ifo) of time-slid vetoes
# =============================================================================
def _slide_vetoes(vetoes, slide_dict_or_list, slide_id):
"""Build a dictionary (indexed by ifo) of time-slid vetoes"""
# Copy vetoes
slid_vetoes = copy.deepcopy(vetoes)
# Slide them
ifos = vetoes.keys()
for ifo in ifos:
slid_vetoes[ifo].shift(-slide_dict_or_list[slide_id][ifo])
return slid_vetoes
#
# Used (also) in executables
#
# =============================================================================
# Function to load triggers
# =============================================================================
def load_triggers(input_file, vetoes):
"""Loads triggers from PyGRB output file"""
trigs = h5py.File(input_file, 'r')
if vetoes is not None:
# Developers: see PR 3972 for previous implementation
raise NotImplementedError
return trigs
# =============================================================================
# Detector utils:
# * Function to calculate the antenna response F+^2 + Fx^2
# * Function to calculate the antenna distance factor
# =============================================================================
def _get_antenna_single_response(antenna, ra, dec, geocent_time):
"""Returns the antenna response F+^2 + Fx^2 of an IFO (passed as pycbc
Detector type) at a given sky location and time."""
fp, fc = antenna.antenna_pattern(ra, dec, 0, geocent_time)
return fp**2 + fc**2
# Vectorize the function above on all but the first argument
get_antenna_responses = numpy.vectorize(_get_antenna_single_response,
otypes=[float])
get_antenna_responses.excluded.add(0)
def get_antenna_dist_factor(antenna, ra, dec, geocent_time, inc=0.0):
"""Returns the antenna factors (defined as eq. 4.3 on page 57 of
Duncan Brown's Ph.D.) for an IFO (passed as pycbc Detector type) at
a given sky location and time."""
fp, fc = antenna.antenna_pattern(ra, dec, 0, geocent_time)
return numpy.sqrt(fp ** 2 * (1 + numpy.cos(inc)) ** 2 / 4 + fc ** 2)
# =============================================================================
# Function to calculate the detection statistic of a list of triggers
# =============================================================================
def get_bestnrs(trigs, q=4.0, n=3.0, null_thresh=(4.25, 6), snr_threshold=6.,
sngl_snr_threshold=4., chisq_threshold=None,
null_grad_thresh=20., null_grad_val=0.2):
"""Calculate BestNR (coh_PTF detection statistic) of triggers through
signal based vetoes. The (default) signal based vetoes are:
* Coherent SNR < 6
* Bank chi-squared reduced (new) SNR < 6
* Auto veto reduced (new) SNR < 6
* Single-detector SNR (from two most sensitive IFOs) < 4
* Null SNR (CoincSNR^2 - CohSNR^2)^(1/2) < null_thresh
Returns Numpy array of BestNR values.
"""
if not trigs:
return numpy.array([])
# Grab sky position and timing
ra = trigs.get_column('ra')
dec = trigs.get_column('dec')
time = trigs.get_end()
# Initialize BestNRs
snr = trigs.get_column('snr')
bestnr = numpy.ones(len(snr))
# Coherent SNR cut
bestnr[numpy.asarray(snr) < snr_threshold] = 0
# Bank and auto chi-squared cuts
if not chisq_threshold:
chisq_threshold = snr_threshold
for chisq in ['bank_chisq', 'cont_chisq']:
bestnr[numpy.asarray(trigs.get_new_snr(index=q, nhigh=n,
column=chisq))
< chisq_threshold] = 0
# Define IFOs for sngl cut
ifos = list(map(str, trigs[0].get_ifos()))
# Single detector SNR cut
sens = {}
sigmasqs = trigs.get_sigmasqs()
ifo_snr = dict((ifo, trigs.get_sngl_snr(ifo)) for ifo in ifos)
for ifo in ifos:
antenna = Detector(ifo)
sens[ifo] = sigmasqs[ifo] * get_antenna_responses(antenna, ra,
dec, time)
# Apply this cut only if there is more than 1 IFO
if len(ifos) > 1:
for i_trig, _ in enumerate(trigs):
# Apply only to triggers that were not already cut previously
if bestnr[i_trig] != 0:
ifos.sort(key=lambda ifo, j=i_trig: sens[ifo][j], reverse=True)
if (ifo_snr[ifos[0]][i_trig] < sngl_snr_threshold or
ifo_snr[ifos[1]][i_trig] < sngl_snr_threshold):
bestnr[i_trig] = 0
for i_trig, trig in enumerate(trigs):
# Get chisq reduced (new) SNR for triggers that were not cut so far
# NOTE: .get_bestnr is in glue.ligolw.lsctables.MultiInspiralTable
if bestnr[i_trig] != 0:
bestnr[i_trig] = trig.get_bestnr(index=q, nhigh=n,
null_snr_threshold=null_thresh[0],
null_grad_thresh=null_grad_thresh,
null_grad_val=null_grad_val)
# If we got this far and the bestNR is non-zero, verify that chisq
# was actually calculated for the trigger, otherwise raise an
# error with info useful to figure out why this happened.
if bestnr[i_trig] != 0 and trig.chisq == 0:
err_msg = "Chisq not calculated for trigger with end time "
err_msg += f"{trig.get_end()} and SNR {trig.snr}."
raise RuntimeError(err_msg)
return bestnr
# =============================================================================
# Construct sorted triggers from trials
# =============================================================================
def sort_trigs(trial_dict, trigs, slide_dict, seg_dict):
"""Constructs sorted triggers from a trials dictionary"""
sorted_trigs = {}
# Begin by sorting the triggers into each slide
# New seems pretty slow, so run it once and then use deepcopy
tmp_table = glsctables.New(glsctables.MultiInspiralTable)
for slide_id in slide_dict:
sorted_trigs[slide_id] = copy.deepcopy(tmp_table)
for trig in trigs:
sorted_trigs[int(trig.time_slide_id)].append(trig)
for slide_id in slide_dict:
# These can only *reduce* the analysis time
curr_seg_list = seg_dict[slide_id]
# Check the triggers are all in the analysed segment lists
for trig in sorted_trigs[slide_id]:
if trig.end_time not in curr_seg_list:
# This can be raised if the trigger is on the segment boundary,
# so check if the trigger is within 1/100 of a second within
# the list
if trig.get_end() + 0.01 in curr_seg_list:
continue
if trig.get_end() - 0.01 in curr_seg_list:
continue
err_msg = "Triggers found in input files not in the list of "
err_msg += "analysed segments. This should not happen."
raise RuntimeError(err_msg)
# END OF CHECK #
# The below line works like the inverse of .veto and only returns trigs
# that are within the segment specified by trial_dict[slide_id]
sorted_trigs[slide_id] = \
sorted_trigs[slide_id].vetoed(trial_dict[slide_id])
return sorted_trigs
# =============================================================================
# Extract basic trigger properties and store them as dictionaries
# =============================================================================
def extract_basic_trig_properties(trial_dict, trigs, slide_dict, seg_dict,
opts):
"""Extract and store as dictionaries time, SNR, and BestNR of
time-slid triggers"""
# Sort the triggers into each slide
sorted_trigs = sort_trigs(trial_dict, trigs, slide_dict, seg_dict)
logging.info("Triggers sorted.")
# Local copies of variables entering the BestNR definition
chisq_index = opts.chisq_index
chisq_nhigh = opts.chisq_nhigh
null_thresh = list(map(float, opts.null_snr_threshold.split(',')))
snr_thresh = opts.snr_threshold
sngl_snr_thresh = opts.sngl_snr_threshold
new_snr_thresh = opts.newsnr_threshold
null_grad_thresh = opts.null_grad_thresh
null_grad_val = opts.null_grad_val
# Build the 3 dictionaries
trig_time = {}
trig_snr = {}
trig_bestnr = {}
for slide_id in slide_dict:
slide_trigs = sorted_trigs[slide_id]
if slide_trigs:
trig_time[slide_id] = numpy.asarray(slide_trigs.get_end()).\
astype(float)
trig_snr[slide_id] = numpy.asarray(slide_trigs.get_column('snr'))
else:
trig_time[slide_id] = numpy.asarray([])
trig_snr[slide_id] = numpy.asarray([])
trig_bestnr[slide_id] = get_bestnrs(slide_trigs,
q=chisq_index,
n=chisq_nhigh,
null_thresh=null_thresh,
snr_threshold=snr_thresh,
sngl_snr_threshold=sngl_snr_thresh,
chisq_threshold=new_snr_thresh,
null_grad_thresh=null_grad_thresh,
null_grad_val=null_grad_val)
logging.info("Time, SNR, and BestNR of triggers extracted.")
return trig_time, trig_snr, trig_bestnr
# =============================================================================
# Function to extract ifos from hdfs
# =============================================================================
def extract_ifos(trig_file):
"""Extracts IFOs from hdf file"""
# Load hdf file
hdf_file = h5py.File(trig_file, 'r')
# Extract IFOs
ifos = sorted(list(hdf_file.keys()))
# Remove 'network' key from list of ifos
if 'network' in ifos:
ifos.remove('network')
return ifos
# =============================================================================
# Function to extract IFOs and vetoes
# =============================================================================
def extract_ifos_and_vetoes(trig_file, veto_files, veto_cat):
"""Extracts IFOs from HDF files and vetoes from a directory"""
logging.info("Extracting IFOs and vetoes.")
# Extract IFOs
ifos = extract_ifos(trig_file)
# Extract vetoes
if veto_files is not None:
vetoes = _extract_vetoes(veto_files, ifos, veto_cat)
else:
vetoes = None
return ifos, vetoes
# =============================================================================
# Function to load injections
# =============================================================================
def load_injections(inj_file, vetoes, sim_table=False, label=None):
"""Loads injections from PyGRB output file"""
if label is None:
logging.info("Loading injections...")
else:
logging.info("Loading %s...", label)
insp_table = glsctables.MultiInspiralTable
if sim_table:
insp_table = glsctables.SimInspiralTable
# Load injections in injection file
inj_table = load_xml_table(inj_file, insp_table.tableName)
# Extract injections in time-slid non-vetoed data
injs = lsctables.New(insp_table, columns=insp_table.loadcolumns)
injs.extend(inj for inj in inj_table if inj.get_end() not in vetoes)
if label is None:
logging.info("%d injections found.", len(injs))
else:
logging.info("%d %s found.", len(injs), label)
return injs
# =============================================================================
# Function to load timeslides
# =============================================================================
def load_time_slides(xml_file):
"""Loads timeslides from PyGRB output file as a dictionary"""
# Get all timeslides: these are number_of_ifos * number_of_timeslides
time_slide = load_xml_table(xml_file, glsctables.TimeSlideTable.tableName)
# Get a list of unique timeslide dictionaries
time_slide_list = [dict(i) for i in time_slide.as_dict().values()]
# Turn it into a dictionary indexed by the timeslide ID
time_slide_dict = {int(time_slide.get_time_slide_id(ov)): ov
for ov in time_slide_list}
# Check time_slide_ids are ordered correctly.
ids = _get_id_numbers(time_slide,
"time_slide_id")[::len(time_slide_dict[0].keys())]
if not (numpy.all(ids[1:] == numpy.array(ids[:-1])+1) and ids[0] == 0):
err_msg = "time_slide_ids list should start at zero and increase by "
err_msg += "one for every element"
raise RuntimeError(err_msg)
# Check that the zero-lag slide has time_slide_id == 0.
if not numpy.all(numpy.array(list(time_slide_dict[0].values())) == 0):
err_msg = "The slide with time_slide_id == 0 should be the "
err_msg += "zero-lag-slide but it has non-zero slide values: "
err_msg += f"{time_slide_dict[0]}."
raise RuntimeError(err_msg)
return time_slide_dict
# =============================================================================
# Function to load the segment dicitonary
# =============================================================================
def load_segment_dict(xml_file):
"""Loads the segment dictionary """
# Get the mapping table
time_slide_map_table = \
load_xml_table(xml_file, glsctables.TimeSlideSegmentMapTable.tableName)
# Perhaps unnecessary as segment_def_id and time_slide_id seem to always
# be identical identical
segment_map = {
int(entry.segment_def_id): int(entry.time_slide_id)
for entry in time_slide_map_table
}
# Extract the segment table
segment_table = load_xml_table(
xml_file, glsctables.SegmentTable.tableName)
segment_dict = {}
for entry in segment_table:
curr_slid_id = segment_map[int(entry.segment_def_id)]
curr_seg = entry.get()
if curr_slid_id not in segment_dict:
segment_dict[curr_slid_id] = segments.segmentlist()
segment_dict[curr_slid_id].append(curr_seg)
segment_dict[curr_slid_id].coalesce()
return segment_dict
# =============================================================================
# Construct the trials from the timeslides, segments, and vetoes
# =============================================================================
def construct_trials(seg_files, seg_dict, ifos, slide_dict, vetoes):
"""Constructs trials from triggers, timeslides, segments and vetoes"""
trial_dict = {}
# Get segments
segs = _read_seg_files(seg_files)
# Separate segments
trial_time = abs(segs['on'])
for slide_id in slide_dict:
# These can only *reduce* the analysis time
curr_seg_list = seg_dict[slide_id]
# Construct the buffer segment list
seg_buffer = segments.segmentlist()
for ifo in ifos:
slide_offset = slide_dict[slide_id][ifo]
seg_buffer.append(segments.segment(segs['buffer'][0] -
slide_offset,
segs['buffer'][1] -
slide_offset))
seg_buffer.coalesce()
# Construct the ifo-indexed dictionary of slid veteoes
slid_vetoes = _slide_vetoes(vetoes, slide_dict, slide_id)
# Construct trial list and check against buffer
trial_dict[slide_id] = segments.segmentlist()
for curr_seg in curr_seg_list:
iter_int = 1
while 1:
trial_end = curr_seg[0] + trial_time*iter_int
if trial_end > curr_seg[1]:
break
curr_trial = segments.segment(trial_end - trial_time,
trial_end)
if not seg_buffer.intersects_segment(curr_trial):
intersect = numpy.any([slid_vetoes[ifo].
intersects_segment(curr_trial)
for ifo in ifos])
if not intersect:
trial_dict[slide_id].append(curr_trial)
iter_int += 1
return trial_dict
# =============================================================================
# Find max and median of loudest SNRs or BestNRs
# =============================================================================
def sort_stat(time_veto_max_stat):
"""Sort a dictionary of loudest SNRs/BestNRs"""
full_time_veto_max_stat = list(time_veto_max_stat.values())
full_time_veto_max_stat = numpy.concatenate(full_time_veto_max_stat)
full_time_veto_max_stat.sort()
return full_time_veto_max_stat
# =============================================================================
# Find max and median of loudest SNRs or BestNRs
# =============================================================================
def max_median_stat(slide_dict, time_veto_max_stat, trig_stat, total_trials):
"""Deterine the maximum and median of the loudest SNRs/BestNRs"""
max_stat = max([trig_stat[slide_id].max() if trig_stat[slide_id].size
else 0 for slide_id in slide_dict])
full_time_veto_max_stat = sort_stat(time_veto_max_stat)
if total_trials % 2:
median_stat = full_time_veto_max_stat[(total_trials - 1) // 2]
else:
median_stat = numpy.mean((full_time_veto_max_stat)
[total_trials//2 - 1: total_trials//2 + 1])
return max_stat, median_stat, full_time_veto_max_stat
# =============================================================================
# Function to determine calibration and waveform errors for injection sets
# =============================================================================
def mc_cal_wf_errs(num_mc_injs, inj_dists, cal_err, wf_err, max_dc_cal_err):
"""Includes calibration and waveform errors by running an MC"""
# The efficiency calculations include calibration and waveform
# errors incorporated by running over each injection num_mc_injs times,
# where each time we draw a random value of distance.
num_injs = len(inj_dists)
inj_dist_mc = numpy.ndarray((num_mc_injs+1, num_injs))
inj_dist_mc[0, :] = inj_dists
for i in range(num_mc_injs):
cal_dist_red = stats.norm.rvs(size=num_injs) * cal_err
wf_dist_red = numpy.abs(stats.norm.rvs(size=num_injs) * wf_err)
inj_dist_mc[i+1, :] = inj_dists / (max_dc_cal_err *
(1 + cal_dist_red) *
(1 + wf_dist_red))
return inj_dist_mc
# =============================================================================
# Function to calculate the coincident SNR
# =============================================================================
def get_coinc_snr(trigs_or_injs, ifos):
""" Calculate coincident SNR using single IFO SNRs"""
num_trigs_or_injs = len(trigs_or_injs['network/end_time_gc'][:])
# Calculate coincident SNR
single_snr_sq = dict((ifo, None) for ifo in ifos)
snr_sum_square = numpy.zeros(num_trigs_or_injs)
for ifo in ifos:
key = ifo + '/snr_' + ifo.lower()
if ifo.lower() != 'h1':
key = key[:-1]
# Square the individual SNRs
single_snr_sq[ifo] = numpy.square(
trigs_or_injs[key][:])
# Add them
snr_sum_square = numpy.add(snr_sum_square, single_snr_sq[ifo])
# Obtain the square root
coinc_snr = numpy.sqrt(snr_sum_square)
return coinc_snr
| 33,850
| 40.843016
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/results/plot.py
|
""" Plotting utilities and premade plot configurations
"""
def hist_overflow(val, val_max, **kwds):
""" Make a histogram with an overflow bar above val_max """
import pylab
overflow = len(val[val>=val_max])
pylab.hist(val[val<val_max], **kwds)
if 'color' in kwds:
color = kwds['color']
else:
color = None
if overflow > 0:
rect = pylab.bar(val_max+0.05, overflow, .5, color=color)[0]
pylab.text(rect.get_x(),
1.10*rect.get_height(), '%s+' % val_max)
def add_style_opt_to_parser(parser, default=None):
"""Adds an option to set the matplotlib style to a parser.
Parameters
----------
parser : argparse.ArgumentParser
The parser to add the option to.
default : str, optional
The default style to use. Default, None, will result in the default
matplotlib style to be used.
"""
from matplotlib import pyplot
parser.add_argument('--mpl-style', default=default,
choices=['default']+pyplot.style.available+['xkcd'],
help='Set the matplotlib style to use.')
def set_style_from_cli(opts):
"""Uses the mpl-style option to set the style for plots.
Note: This will change the global rcParams.
"""
from matplotlib import pyplot
if opts.mpl_style == 'xkcd':
# this is treated differently for some reason
pyplot.xkcd()
elif opts.mpl_style is not None:
pyplot.style.use(opts.mpl_style)
| 1,509
| 29.2
| 76
|
py
|
pycbc
|
pycbc-master/pycbc/results/__init__.py
|
from pycbc.results.table_utils import *
from pycbc.results.metadata import *
from pycbc.results.versioning import *
from pycbc.results.color import *
from pycbc.results.plot import *
from pycbc.results.psd import *
from pycbc.results.layout import *
from pycbc.results.dq import *
from pycbc.results.str_utils import *
from pycbc.results.pygrb_postprocessing_utils import *
from pycbc.results.pygrb_plotting_utils import *
| 423
| 34.333333
| 54
|
py
|
pycbc
|
pycbc-master/pycbc/results/followup.py
|
# Copyright (C) 2014 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides functions to generate followup plots and trigger
time series.
"""
import h5py, numpy, matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
import sys
if 'matplotlib.backends' not in sys.modules:
matplotlib.use('agg')
import pylab, mpld3, mpld3.plugins
from ligo.segments import segment
def columns_from_file_list(file_list, columns, ifo, start, end):
""" Return columns of information stored in single detector trigger
files.
Parameters
----------
file_list_file : string
pickle file containing the list of single detector
triggers.
ifo : string
The ifo to return triggers for.
columns : list of strings
The list of columns to read from the trigger files.
start : int
The start time to get triggers from
end : int
The end time to get triggers from
Returns
-------
trigger_dict : dict
A dictionary of column vectors with column names as keys.
"""
file_list = file_list.find_output_with_ifo(ifo)
file_list = file_list.find_all_output_in_range(ifo, segment(start, end))
trig_dict = {}
for trig_file in file_list:
f = h5py.File(trig_file.storage_path, 'r')
time = f['end_time'][:]
pick = numpy.logical_and(time < end, time > start)
pick_loc = numpy.where(pick)[0]
for col in columns:
if col not in trig_dict:
trig_dict[col] = []
trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]])
return trig_dict
ifo_color = {'H1': 'blue', 'L1':'red', 'V1':'green'}
def coinc_timeseries_plot(coinc_file, start, end):
fig = pylab.figure()
f = h5py.File(coinc_file, 'r')
stat1 = f['foreground/stat1']
stat2 = f['foreground/stat2']
time1 = f['foreground/time1']
time2 = f['foreground/time2']
ifo1 = f.attrs['detector_1']
ifo2 = f.attrs['detector_2']
pylab.scatter(time1, stat1, label=ifo1, color=ifo_color[ifo1])
pylab.scatter(time2, stat2, label=ifo2, color=ifo_color[ifo2])
fmt = '.12g'
mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt))
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('NewSNR')
pylab.grid()
return mpld3.fig_to_html(fig)
def trigger_timeseries_plot(file_list, ifos, start, end):
fig = pylab.figure()
for ifo in ifos:
trigs = columns_from_file_list(file_list,
['snr', 'end_time'],
ifo, start, end)
print(trigs)
pylab.scatter(trigs['end_time'], trigs['snr'], label=ifo,
color=ifo_color[ifo])
fmt = '.12g'
mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt))
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('SNR')
pylab.grid()
return mpld3.fig_to_html(fig)
def times_to_urls(times, window, tag):
base = '/../followup/%s/%s/%s'
return times_to_links(times, window, tag, base=base)
def times_to_links(times, window, tag, base=None):
if base is None:
base = "<a href='/../followup/%s/%s/%s' target='_blank'>followup</a>"
urls = []
for time in times:
start = time - window
end = time + window
urls.append(base % (tag, start, end))
return urls
def get_gracedb_search_link(time):
# Set up a search string for a 3s window around the coincidence
gdb_search_query = '%.0f+..+%.0f' % (numpy.floor(time) - 1,
numpy.ceil(time) + 1)
gdb_search_url = ('https://gracedb.ligo.org/search/?query='
'{}&query_type=S'.format(gdb_search_query))
gdb_search_link = '<a href="' + gdb_search_url + '">Search</a>'
return gdb_search_link
| 5,023
| 33.410959
| 85
|
py
|
pycbc
|
pycbc-master/pycbc/results/scatter_histograms.py
|
# Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate figures with scatter plots and histograms.
"""
import itertools
import sys
import numpy
import scipy.stats
import matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
if 'matplotlib.backends' not in sys.modules: # nopep8
matplotlib.use('agg')
from matplotlib import (offsetbox, pyplot, gridspec)
from pycbc.results import str_utils
from pycbc.io import FieldArray
def create_axes_grid(parameters, labels=None, height_ratios=None,
width_ratios=None, no_diagonals=False):
"""Given a list of parameters, creates a figure with an axis for
every possible combination of the parameters.
Parameters
----------
parameters : list
Names of the variables to be plotted.
labels : {None, dict}, optional
A dictionary of parameters -> parameter labels.
height_ratios : {None, list}, optional
Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
width_ratios : {None, list}, optional
Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
no_diagonals : {False, bool}, optional
Do not produce axes for the same parameter on both axes.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
elif any(p not in labels for p in parameters):
raise ValueError("labels must be provided for all parameters")
# Create figure with adequate size for number of parameters.
ndim = len(parameters)
if no_diagonals:
ndim -= 1
if ndim < 3:
fsize = (8, 7)
else:
fsize = (ndim*3 - 1, ndim*3 - 2)
fig = pyplot.figure(figsize=fsize)
# create the axis grid
gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios,
height_ratios=height_ratios,
wspace=0.05, hspace=0.05)
# create grid of axis numbers to easily create axes in the right locations
axes = numpy.arange(ndim**2).reshape((ndim, ndim))
# Select possible combinations of plots and establish rows and columns.
combos = list(itertools.combinations(parameters, 2))
# add the diagonals
if not no_diagonals:
combos += [(p, p) for p in parameters]
# create the mapping between parameter combos and axes
axis_dict = {}
# cycle over all the axes, setting thing as needed
for nrow in range(ndim):
for ncolumn in range(ndim):
ax = pyplot.subplot(gs[axes[nrow, ncolumn]])
# map to a parameter index
px = parameters[ncolumn]
if no_diagonals:
py = parameters[nrow+1]
else:
py = parameters[nrow]
if (px, py) in combos:
axis_dict[px, py] = (ax, nrow, ncolumn)
# x labels only on bottom
if nrow + 1 == ndim:
ax.set_xlabel('{}'.format(labels[px]), fontsize=18)
else:
pyplot.setp(ax.get_xticklabels(), visible=False)
# y labels only on left
if ncolumn == 0:
ax.set_ylabel('{}'.format(labels[py]), fontsize=18)
else:
pyplot.setp(ax.get_yticklabels(), visible=False)
else:
# make non-used axes invisible
ax.axis('off')
return fig, axis_dict
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5
def construct_kde(samples_array, use_kombine=False, kdeargs=None):
"""Constructs a KDE from the given samples.
Parameters
----------
samples_array : array
Array of values to construct the KDE for.
use_kombine : bool, optional
Use kombine's clustered KDE instead of scipy's. Default is False.
kdeargs : dict, optional
Additional arguments to pass to the KDE. Can be any argument recognized
by :py:func:`scipy.stats.gaussian_kde` or
:py:func:`kombine.clustered_kde.optimized_kde`. In either case, you can
also set ``max_kde_samples`` to limit the number of samples that are
used for KDE construction.
Returns
-------
kde :
The KDE.
"""
# make sure samples are randomly sorted
numpy.random.seed(0)
numpy.random.shuffle(samples_array)
# if kde arg specifies a maximum number of samples, limit them
if kdeargs is None:
kdeargs = {}
else:
kdeargs = kdeargs.copy()
max_nsamples = kdeargs.pop('max_kde_samples', None)
samples_array = samples_array[:max_nsamples]
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
if kdeargs is None:
kdeargs = {}
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.optimized_kde(samples_array, **kdeargs)
else:
kde = scipy.stats.gaussian_kde(samples_array.T, **kdeargs)
return kde
def create_density_plot(xparam, yparam, samples, plot_density=True,
plot_contours=True, percentiles=None, cmap='viridis',
contour_color=None, label_contours=True,
contour_linestyles=None,
xmin=None, xmax=None,
ymin=None, ymax=None, exclude_region=None,
fig=None, ax=None, use_kombine=False,
kdeargs=None):
"""Computes and plots posterior density and confidence intervals using the
given samples.
Parameters
----------
xparam : string
The parameter to plot on the x-axis.
yparam : string
The parameter to plot on the y-axis.
samples : dict, numpy structured array, or FieldArray
The samples to plot.
plot_density : {True, bool}
Plot a color map of the density.
plot_contours : {True, bool}
Plot contours showing the n-th percentiles of the density.
percentiles : {None, float or array}
What percentile contours to draw. If None, will plot the 50th
and 90th percentiles.
cmap : {'viridis', string}
The name of the colormap to use for the density plot.
contour_color : {None, string}
What color to make the contours. Default is white for density
plots and black for other plots.
label_contours : bool, optional
Whether to label the contours. Default is True.
contour_linestyles : list, optional
Linestyles to use for the contours. Default (None) will use solid.
xmin : {None, float}
Minimum value to plot on x-axis.
xmax : {None, float}
Maximum value to plot on x-axis.
ymin : {None, float}
Minimum value to plot on y-axis.
ymax : {None, float}
Maximum value to plot on y-axis.
exclue_region : {None, str}
Exclude the specified region when plotting the density or contours.
Must be a string in terms of `xparam` and `yparam` that is
understandable by numpy's logical evaluation. For example, if
`xparam = m_1` and `yparam = m_2`, and you want to exclude the region
for which `m_2` is greater than `m_1`, then exclude region should be
`'m_2 > m_1'`.
fig : {None, pyplot.figure}
Add the plot to the given figure. If None and ax is None, will create
a new figure.
ax : {None, pyplot.axes}
Draw plot on the given axis. If None, will create a new axis from
`fig`.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
kdeargs : dict, optional
Pass the given keyword arguments to the KDE.
Returns
-------
fig : pyplot.figure
The figure the plot was made on.
ax : pyplot.axes
The axes the plot was drawn on.
"""
if percentiles is None:
percentiles = numpy.array([50., 90.])
percentiles = 100. - numpy.array(percentiles)
percentiles.sort()
if ax is None and fig is None:
fig = pyplot.figure()
if ax is None:
ax = fig.add_subplot(111)
# convert samples to array and construct kde
xsamples = samples[xparam]
ysamples = samples[yparam]
arr = numpy.vstack((xsamples, ysamples)).T
kde = construct_kde(arr, use_kombine=use_kombine, kdeargs=kdeargs)
# construct grid to evaluate on
if xmin is None:
xmin = xsamples.min()
if xmax is None:
xmax = xsamples.max()
if ymin is None:
ymin = ysamples.min()
if ymax is None:
ymax = ysamples.max()
npts = 100
X, Y = numpy.mgrid[
xmin:xmax:complex(0, npts), # pylint:disable=invalid-slice-index
ymin:ymax:complex(0, npts)] # pylint:disable=invalid-slice-index
pos = numpy.vstack([X.ravel(), Y.ravel()])
if use_kombine:
Z = numpy.exp(kde(pos.T).reshape(X.shape))
draw = kde.draw
else:
Z = kde(pos).T.reshape(X.shape)
draw = kde.resample
if exclude_region is not None:
# convert X,Y to a single FieldArray so we can use it's ability to
# evaluate strings
farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
Z[farr[exclude_region]] = 0.
if plot_density:
ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax],
aspect='auto', cmap=cmap, zorder=1)
if contour_color is None:
contour_color = 'w'
if plot_contours:
# compute the percentile values
resamps = kde(draw(int(npts**2)))
if use_kombine:
resamps = numpy.exp(resamps)
s = numpy.percentile(resamps, percentiles)
if contour_color is None:
contour_color = 'k'
# make linewidths thicker if not plotting density for clarity
if plot_density:
lw = 1
else:
lw = 2
ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw,
linestyles=contour_linestyles, zorder=3)
# label contours
if label_contours:
lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
fmt = dict(zip(ct.levels, lbls))
fs = 12
ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)
return fig, ax
def create_marginalized_hist(ax, values, label, percentiles=None,
color='k', fillcolor='gray', linecolor='navy',
linestyle='-', plot_marginal_lines=True,
title=True, expected_value=None,
expected_color='red', rotated=False,
plot_min=None, plot_max=None):
"""Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
plot_marginal_lines : bool, optional
Put vertical lines at the marginal percentiles. Default is True.
linestyle : str, optional
What line style to use for the histogram. Default is '-'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : bool, optional
Add a title with a estimated value +/- uncertainty. The estimated value
is the pecentile halfway between the max/min of ``percentiles``, while
the uncertainty is given by the max/min of the ``percentiles``. If no
percentiles are specified, defaults to quoting the median +/- 95/5
percentiles.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
"""
if fillcolor is None:
htype = 'step'
else:
htype = 'stepfilled'
if rotated:
orientation = 'horizontal'
else:
orientation = 'vertical'
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2,
density=True)
if percentiles is None:
percentiles = [5., 50., 95.]
if len(percentiles) > 0:
plotp = numpy.percentile(values, percentiles)
else:
plotp = []
if plot_marginal_lines:
for val in plotp:
if rotated:
ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3)
# plot expected
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
if len(percentiles) > 0:
minp = min(percentiles)
maxp = max(percentiles)
medp = (maxp + minp) / 2.
else:
minp = 5
medp = 50
maxp = 95
values_min = numpy.percentile(values, minp)
values_med = numpy.percentile(values, medp)
values_max = numpy.percentile(values, maxp)
negerror = values_med - values_min
poserror = values_max - values_med
fmt = '${0}$'.format(str_utils.format_value(
values_med, negerror, plus_error=poserror))
if rotated:
ax.yaxis.set_label_position("right")
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
else:
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color, label=label)
# remove ticks and set limits
if rotated:
# Remove x-ticks
ax.set_xticks([])
# turn off x-labels
ax.set_xlabel('')
# set limits
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
# Remove y-ticks
ax.set_yticks([])
# turn off y-label
ax.set_ylabel('')
# set limits
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False):
""" Sets the title of the marginal histograms.
Parameters
----------
ax : Axes
The `Axes` instance for the plot.
fmt : str
The string to add to the title.
color : str
The color of the text to add to the title.
label : str
If title does not exist, then include label at beginning of the string.
rotated : bool
If `True` then rotate the text 270 degrees for sideways title.
"""
# get rotation angle of the title
rotation = 270 if rotated else 0
# get how much to displace title on axes
xscale = 1.05 if rotated else 0.0
if rotated:
yscale = 1.0
elif len(ax.get_figure().axes) > 1:
yscale = 1.15
else:
yscale = 1.05
# get class that packs text boxes vertical or horizonitally
packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker
# if no title exists
if not hasattr(ax, "title_boxes"):
# create a text box
title = "{} = {}".format(label, fmt)
tbox1 = offsetbox.TextArea(
title,
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
# save a list of text boxes as attribute for later
ax.title_boxes = [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# else append existing title
else:
# delete old title
ax.title_anchor.remove()
# add new text box to list
tbox1 = offsetbox.TextArea(
" {}".format(fmt),
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
ax.title_boxes = ax.title_boxes + [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# add new title and keep reference to instance as an attribute
anchored_ybox = offsetbox.AnchoredOffsetbox(
loc=2, child=ybox, pad=0.,
frameon=False, bbox_to_anchor=(xscale, yscale),
bbox_transform=ax.transAxes, borderpad=0.)
ax.title_anchor = ax.add_artist(anchored_ybox)
def create_multidim_plot(parameters, samples, labels=None,
mins=None, maxs=None, expected_parameters=None,
expected_parameters_color='r',
plot_marginal=True, plot_scatter=True,
plot_maxl=False,
plot_marginal_lines=True,
marginal_percentiles=None, contour_percentiles=None,
marginal_title=True, marginal_linestyle='-',
zvals=None, show_colorbar=True, cbar_label=None,
vmin=None, vmax=None, scatter_cmap='plasma',
plot_density=False, plot_contours=True,
density_cmap='viridis',
contour_color=None, label_contours=True,
contour_linestyles=None,
hist_color='black',
line_color=None, fill_color='gray',
use_kombine=False, kdeargs=None,
fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
label_contours : bool, optional
Whether to label the contours. Default is True.
contour_linestyles : list, optional
Linestyles to use for the contours. Default (None) will use solid.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
kdeargs : dict, optional
Pass the given keyword arguments to the KDE.
fig : pyplot.figure
Use the given figure instead of creating one.
axis_dict : dict
Use the given dictionary of axes instead of creating one.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3, 1]
height_ratios = [1, 3]
else:
width_ratios = height_ratios = None
if plot_maxl:
# make sure loglikelihood is provide
if 'loglikelihood' not in samples.fieldnames:
raise ValueError("plot-maxl requires loglikelihood")
maxidx = samples['loglikelihood'].argmax()
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k'
elif show_colorbar:
raise ValueError("must provide z values to create a colorbar")
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy'
# create the axis grid
if fig is None and axis_dict is None:
fig, axis_dict = create_axes_grid(
parameters, labels=labels,
width_ratios=width_ratios, height_ratios=height_ratios,
no_diagonals=not plot_marginal)
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
# only try to plot what's available
sd = {}
for p in parameters:
try:
sd[p] = samples[p]
except (ValueError, TypeError, IndexError):
continue
samples = sd
parameters = list(sd.keys())
# values for axis bounds
if mins is None:
mins = {p: samples[p].min() for p in parameters}
else:
# copy the dict
mins = {p: val for p, val in mins.items()}
if maxs is None:
maxs = {p: samples[p].max() for p in parameters}
else:
# copy the dict
maxs = {p: val for p, val in maxs.items()}
# Diagonals...
if plot_marginal:
for pi, param in enumerate(parameters):
ax, _, _ = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams-1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param]
except KeyError:
expected_value = None
else:
expected_value = None
create_marginalized_hist(
ax, samples[param], label=labels[param],
color=hist_color, fillcolor=fill_color,
plot_marginal_lines=plot_marginal_lines,
linestyle=marginal_linestyle, linecolor=line_color,
title=marginal_title, expected_value=expected_value,
expected_color=expected_parameters_color,
rotated=rotated, plot_min=mins[param], plot_max=maxs[param],
percentiles=marginal_percentiles)
# Off-diagonals...
for px, py in axis_dict:
if px == py or px not in parameters or py not in parameters:
continue
ax, _, _ = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3
else:
alpha = 1.
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5,
edgecolors='none', vmin=vmin, vmax=vmax,
cmap=scatter_cmap, alpha=alpha, zorder=2)
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p'
else:
exclude_region = None
create_density_plot(
px, py, samples, plot_density=plot_density,
plot_contours=plot_contours, cmap=density_cmap,
percentiles=contour_percentiles,
contour_color=contour_color, label_contours=label_contours,
contour_linestyles=contour_linestyles,
xmin=mins[px], xmax=maxs[px],
ymin=mins[py], ymax=maxs[py],
exclude_region=exclude_region, ax=ax,
use_kombine=use_kombine, kdeargs=kdeargs)
if plot_maxl:
maxlx = samples[px][maxidx]
maxly = samples[py][maxidx]
ax.scatter(maxlx, maxly, marker='x', s=20, c=contour_color,
zorder=5)
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
try:
ax.axhline(expected_parameters[py], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py])
# adjust tick number for large number of plots
if len(parameters) > 3:
for px, py in axis_dict:
ax, _, _ = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3))
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12*scale_fac)
cb.ax.tick_params(labelsize=8*scale_fac)
return fig, axis_dict
def remove_common_offset(arr):
"""Given an array of data, removes a common offset > 1000, returning the
removed value.
"""
offset = 0
isneg = (arr <= 0).all()
# make sure all values have the same sign
if isneg or (arr >= 0).all():
# only remove offset if the minimum and maximum values are the same
# order of magintude and > O(1000)
minpwr = numpy.log10(abs(arr).min())
maxpwr = numpy.log10(abs(arr).max())
if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3:
offset = numpy.floor(10**minpwr)
if isneg:
offset *= -1
arr = arr - offset
return arr, int(offset)
def reduce_ticks(ax, which, maxticks=3):
"""Given a pyplot axis, resamples its `which`-axis ticks such that are at most
`maxticks` left.
Parameters
----------
ax : axis
The axis to adjust.
which : {'x' | 'y'}
Which axis to adjust.
maxticks : {3, int}
Maximum number of ticks to use.
Returns
-------
array
An array of the selected ticks.
"""
ticks = getattr(ax, 'get_{}ticks'.format(which))()
if len(ticks) > maxticks:
# make sure the left/right value is not at the edge
minax, maxax = getattr(ax, 'get_{}lim'.format(which))()
dw = abs(maxax-minax)/10.
start_idx, end_idx = 0, len(ticks)
if ticks[0] < minax + dw:
start_idx += 1
if ticks[-1] > maxax - dw:
end_idx -= 1
# get reduction factor
fac = int(len(ticks) / maxticks)
ticks = ticks[start_idx:end_idx:fac]
return ticks
| 33,143
| 37.184332
| 82
|
py
|
pycbc
|
pycbc-master/pycbc/results/dq.py
|
'''This module contains utilities for following up search triggers'''
# JavaScript for searching the aLOG
redirect_javascript = """<script type="text/javascript">
function redirect(form,way)
{
// Set location to form and submit.
if(form != '')
{
document.forms[form].action=way;
document.forms[form].submit();
}
else
{
window.top.location = way;
}
}
</script>"""
search_form_string="""<form name="%s_alog_search" id="%s_alog_search" method="post">
<input type="hidden" name="srcDateFrom" id="srcDateFrom" value="%s" size="20"/>
<input type="hidden" name="srcDateTo" id="srcDateTo" value="%s" size="20"/>
</form>"""
data_h1_string = """H1
<a href=https://ldas-jobs.ligo-wa.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('h1_alog_search',
'https://alog.ligo-wa.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
data_l1_string="""L1
<a href=https://ldas-jobs.ligo-la.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('l1_alog_search',
'https://alog.ligo-la.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
def get_summary_page_link(ifo, utc_time):
"""Return a string that links to the summary page and aLOG for this ifo
Parameters
----------
ifo : string
The detector name
utc_time : sequence
First three elements must be strings giving year, month, day resp.
Returns
-------
return_string : string
String containing HTML for links to summary page and aLOG search
"""
search_form = search_form_string
data = {'H1': data_h1_string, 'L1': data_l1_string}
if ifo not in data:
return ifo
else:
# alog format is day-month-year
alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0])
# summary page is exactly the reverse
ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2])
return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc)
return return_string + data[ifo] % ext
| 2,187
| 29.816901
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/results/pygrb_plotting_utils.py
|
# Copyright (C) 2019 Francesco Pannarale, Gino Contestabile, Cameron Mills
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# =============================================================================
# Preamble
# =============================================================================
"""
Module to generate PyGRB figures: scatter plots and timeseries.
"""
import copy
import numpy
from ligo import segments
from pycbc.results import save_fig_with_metadata
# =============================================================================
# Used locally: plot contours in a scatter plot with SNR as horizontal axis
# =============================================================================
def contour_plotter(axis, snr_vals, contours, colors, vert_spike=False):
"""Plot contours in a scatter plot where SNR is on the horizontal axis"""
for i, _ in enumerate(contours):
plot_vals_x = []
plot_vals_y = []
if vert_spike:
for j, _ in enumerate(snr_vals):
# Workaround to ensure vertical spike is shown on veto plots
if contours[i][j] > 1E-15 and not plot_vals_x:
plot_vals_x.append(snr_vals[j])
plot_vals_y.append(0.1)
if contours[i][j] > 1E-15 and plot_vals_x:
plot_vals_x.append(snr_vals[j])
plot_vals_y.append(contours[i][j])
else:
plot_vals_x = snr_vals
plot_vals_y = contours[i]
axis.plot(plot_vals_x, plot_vals_y, colors[i])
#
# Functions used in executables
#
# =============================================================================
# Plot trigger time and offsource extent over segments
# Courtesy of Alex Dietz
# =============================================================================
def make_grb_segments_plot(wkflow, science_segs, trigger_time, trigger_name,
out_dir, coherent_seg=None, fail_criterion=None):
"""Plot trigger time and offsource extent over segments"""
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from pycbc.results.color import ifo_color
ifos = wkflow.ifos
if len(science_segs.keys()) == 0:
extent = segments.segment(int(wkflow.cp.get("workflow", "start-time")),
int(wkflow.cp.get("workflow", "end-time")))
else:
pltpad = [science_segs.extent_all()[1] - trigger_time,
trigger_time - science_segs.extent_all()[0]]
extent = segments.segmentlist([science_segs.extent_all(),
segments.segment(trigger_time
- pltpad[0],
trigger_time
+ pltpad[1])]).extent()
ifo_colors = {}
for ifo in ifos:
ifo_colors[ifo] = ifo_color(ifo)
if ifo not in science_segs.keys():
science_segs[ifo] = segments.segmentlist([])
# Make plot
fig, subs = plt.subplots(len(ifos), sharey=True)
if len(ifos) == 1:
subs = [subs]
plt.xticks(rotation=20, ha='right')
for sub, ifo in zip(subs, ifos):
for seg in science_segs[ifo]:
sub.add_patch(Rectangle((seg[0], 0.1), abs(seg), 0.8,
facecolor=ifo_colors[ifo],
edgecolor='none'))
if coherent_seg:
if len(science_segs[ifo]) > 0 and \
coherent_seg in science_segs[ifo]:
sub.plot([trigger_time, trigger_time], [0, 1], '-',
c='orange')
sub.add_patch(Rectangle((coherent_seg[0], 0),
abs(coherent_seg), 1, alpha=0.5,
facecolor='orange', edgecolor='none'))
else:
sub.plot([trigger_time, trigger_time], [0, 1], ':',
c='orange')
sub.plot([coherent_seg[0], coherent_seg[0]], [0, 1], '--',
c='orange', alpha=0.5)
sub.plot([coherent_seg[1], coherent_seg[1]], [0, 1], '--',
c='orange', alpha=0.5)
else:
sub.plot([trigger_time, trigger_time], [0, 1], ':k')
if fail_criterion:
if len(science_segs[ifo]) > 0:
style_str = '--'
else:
style_str = '-'
sub.plot([fail_criterion[0], fail_criterion[0]], [0, 1], style_str,
c='black', alpha=0.5)
sub.plot([fail_criterion[1], fail_criterion[1]], [0, 1], style_str,
c='black', alpha=0.5)
sub.set_frame_on(False)
sub.set_yticks([])
sub.set_ylabel(ifo, rotation=45)
sub.set_ylim([0, 1])
sub.set_xlim([float(extent[0]), float(extent[1])])
sub.get_xaxis().get_major_formatter().set_useOffset(False)
sub.get_xaxis().get_major_formatter().set_scientific(False)
sub.get_xaxis().tick_bottom()
if sub is subs[-1]:
sub.tick_params(labelsize=10, pad=1)
else:
sub.get_xaxis().set_ticks([])
sub.get_xaxis().set_ticklabels([])
xmin, xmax = fig.axes[-1].get_xaxis().get_view_interval()
ymin, _ = fig.axes[-1].get_yaxis().get_view_interval()
fig.axes[-1].add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black',
linewidth=2))
fig.axes[-1].set_xlabel('GPS Time')
fig.axes[0].set_title('Science Segments for GRB%s' % trigger_name)
plt.tight_layout()
fig.subplots_adjust(hspace=0)
plot_name = 'GRB%s_segments.png' % trigger_name
plot_url = 'file://localhost%s/%s' % (out_dir, plot_name)
fig.savefig('%s/%s' % (out_dir, plot_name))
return [ifos, plot_name, extent, plot_url]
# =============================================================================
# Given the trigger and injection values of a quantity, determine the maximum
# =============================================================================
def axis_max_value(trig_values, inj_values, inj_file):
"""Deterime the maximum of a quantity in the trigger and injection data"""
axis_max = trig_values.max()
if inj_file and inj_values.size and inj_values.max() > axis_max:
axis_max = inj_values.max()
return axis_max
# =============================================================================
# Master plotting function: fits all plotting needs in for PyGRB results
# =============================================================================
def pygrb_plotter(trigs, injs, xlabel, ylabel, opts,
snr_vals=None, conts=None, shade_cont_value=None,
colors=None, vert_spike=False, cmd=None):
"""Master function to plot PyGRB results"""
from matplotlib import pyplot as plt
# Set up plot
fig = plt.figure()
cax = fig.gca()
# Plot trigger-related and (if present) injection-related quantities
cax_plotter = cax.loglog if opts.use_logs else cax.plot
cax_plotter(trigs[0], trigs[1], 'bx')
if not (injs[0] is None and injs[1] is None):
cax_plotter(injs[0], injs[1], 'r+')
cax.grid()
# Plot contours
if conts is not None:
contour_plotter(cax, snr_vals, conts, colors, vert_spike=vert_spike)
# Add shading above a specific contour (typically used for vetoed area)
if shade_cont_value is not None:
limy = cax.get_ylim()[1]
polyx = copy.deepcopy(snr_vals)
polyy = copy.deepcopy(conts[shade_cont_value])
polyx = numpy.append(polyx, [max(snr_vals), min(snr_vals)])
polyy = numpy.append(polyy, [limy, limy])
cax.fill(polyx, polyy, color='#dddddd')
# Axes: labels and limits
cax.set_xlabel(xlabel)
cax.set_ylabel(ylabel)
if opts.x_lims:
x_lims = map(float, opts.x_lims.split(','))
cax.set_xlim(x_lims)
if opts.y_lims:
y_lims = map(float, opts.y_lims.split(','))
cax.set_ylim(y_lims)
# Wrap up
plt.tight_layout()
save_fig_with_metadata(fig, opts.output_file, cmd=cmd,
title=opts.plot_title,
caption=opts.plot_caption)
plt.close()
| 9,071
| 40.806452
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/results/str_utils.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides functions for formatting values into strings for display.
"""
import numpy
mjax_header = """
<script type="text/x-mathjax-config">
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$']]}});
</script>
<script type="text/javascript"
src="//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>
"""
def mathjax_html_header():
"""Standard header to use for html pages to display latex math.
Returns
-------
header: str
The necessary html head needed to use latex on an html page.
"""
return mjax_header
def drop_trailing_zeros(num):
"""
Drops the trailing zeros in a float that is printed.
"""
txt = '%f' %(num)
txt = txt.rstrip('0')
if txt.endswith('.'):
txt = txt[:-1]
return txt
def get_signum(val, err, max_sig=numpy.inf):
"""
Given an error, returns a string for val formated to the appropriate
number of significant figures.
"""
coeff, pwr = ('%e' % err).split('e')
if pwr.startswith('-'):
pwr = int(pwr[1:])
if round(float(coeff)) == 10.:
pwr -= 1
pwr = min(pwr, max_sig)
tmplt = '%.' + str(pwr+1) + 'f'
return tmplt % val
else:
pwr = int(pwr[1:])
if round(float(coeff)) == 10.:
pwr += 1
# if the error is large, we can sometimes get 0;
# adjust the round until we don't get 0 (assuming the actual
# value isn't 0)
return_val = round(val, -pwr+1)
if val != 0.:
loop_count = 0
max_recursion = 100
while return_val == 0.:
pwr -= 1
return_val = round(val, -pwr+1)
loop_count += 1
if loop_count > max_recursion:
raise ValueError("Maximum recursion depth hit! Input " +\
"values are: val = %f, err = %f" %(val, err))
return drop_trailing_zeros(return_val)
def format_value(value, error, plus_error=None, use_scientific_notation=3,
include_error=True, use_relative_error=False, ndecs=None):
"""Given a numerical value and some bound on it, formats the number into a
string such that the value is rounded to the nearest significant figure,
which is determined by the error = abs(value-bound).
Note: if either use_scientific_notation or include_error are True, the
returned string will include LaTeX characters.
Parameters
----------
value : float
The value to format.
error : float
The uncertainty in the value. This is used to determine the
number of significant figures to print. If the value has no
uncertainty, you can just do value*1e-k, where k+1 is the number
of significant figures you want.
plus_error : {None, float}
The upper uncertainty on the value; i.e., what you need to add to the
value to get its upper bound. If provided, ``error`` is assumed to be
the negative; i.e., value +plus_error -error. The number of
significant figures printed is determined from min(error,
plus_error).
use_scientific_notation : int, optional
If ``abs(log10(value))`` is greater than the given, the return string
will be formated to "\%.1f \\times 10^{p}", where p is the powers of 10
needed for the leading number in the value to be in the singles spot.
Otherwise will return "\%.(p+1)f". Default is 3. To turn off, set to
``numpy.inf``. Note: using scientific notation assumes that the
returned value will be enclosed in LaTeX math mode.
include_error : {True, bool}
Include the error in the return string; the output will be formated
val \\pm err, where err is the error rounded to the same
power of 10 as val. Otherwise, just the formatted value will
be returned. If plus_error is provided then the return text will be
formatted as ``val^{+plus_error}_{-error}``.
use_relative_error : {False, bool}
If include_error, the error will be formatted as a percentage of the
the value.
ndecs: {None, int}
Number of values after the decimal point. If not provided,
it will default to the number of values in the error.
Returns
-------
string
The value (and error, if include_error is True) formatted as a string.
Examples
--------
Given a value and its uncertainty:
>>> val, err
(3.9278372067613837e-22, 2.2351435286500487e-23)
Format with error quoted:
>>> format_value(val, err)
'3.93 \\pm 0.22\\times 10^{-22}'
Quote error as a relative error:
>>> format_value(val, err, use_relative_error=True)
'3.93 \\times 10^{-22} \\pm5.6\\%'
Format without the error and without scientific notation:
>>> format_value(val, err, use_scientific_notation=float('inf'),
include_error=False)
'0.000000000000000000000393'
Given an plus error:
>>> err_plus
8.2700310560051804e-24
Format with both bounds quoted:
>>> format_value(val, err, plus_error=err_plus)
'3.928^{+0.083}_{-0.224}\\times 10^{-22}'
Format with both bounds quoted as a relative error:
>>> format_value(val, err, plus_error=err_plus, use_relative_error=True)
'3.928\\times 10^{-22}\\,^{+2.1\\%}_{-5.7\\%}'
"""
minus_sign = '-' if value < 0. else ''
value = abs(value)
minus_err = abs(error)
if plus_error is None:
plus_err = minus_err
else:
plus_err = abs(plus_error)
error = min(minus_err, plus_err)
if value == 0. or abs(numpy.log10(value)) < use_scientific_notation:
conversion_factor = 0.
else:
conversion_factor = numpy.floor(numpy.log10(value))
value = value * 10**(-conversion_factor)
error = error * 10**(-conversion_factor)
if conversion_factor == 0.:
powfactor = ''
elif conversion_factor == 1.:
powfactor = r'\times 10'
else:
powfactor = r'\times 10^{%i}' %(int(conversion_factor))
if ndecs is not None:
decs = value * 10**(-ndecs)
else:
decs = error
# now round the the appropriate number of sig figs
valtxt = get_signum(value, decs)
valtxt = '{}{}'.format(minus_sign, valtxt)
if include_error:
if plus_error is None:
errtxt = get_signum(error, error)
if use_relative_error and float(valtxt) != 0.:
relative_err = 100.*float(errtxt)/float(valtxt)
# we round the relative error to the nearest 1% using
# get_signum; Note that if the relative error is < 1%,
# get_signum will automatically increase the number of values
# after the decimal until it gets to the first non-zero value
relative_err = get_signum(relative_err, 1.)
txt = r'%s %s \pm%s\%%' %(valtxt, powfactor, relative_err)
else:
txt = r'%s \pm %s%s' %(valtxt, errtxt, powfactor)
else:
plus_err = plus_err * 10**(-conversion_factor)
minus_err = minus_err * 10**(-conversion_factor)
minus_err_txt = get_signum(minus_err, decs)
plus_err_txt = get_signum(plus_err, decs)
if use_relative_error and float(valtxt) != 0.:
# same as above, but with plus and minus
rel_plus_err = get_signum(
100.*float(plus_err_txt)/float(valtxt), 1.)
rel_minus_err = get_signum(
100.*float(minus_err_txt)/float(valtxt), 1.)
txt = r'%s%s\,^{+%s\%%}_{-%s\%%}' %(valtxt, powfactor,
rel_plus_err, rel_minus_err)
else:
txt = r'%s^{+%s}_{-%s}%s' %(valtxt, plus_err_txt,
minus_err_txt, powfactor)
else:
txt = r'%s%s' %(valtxt, powfactor)
return txt
__all__ = [
"mathjax_html_header",
"drop_trailing_zeros",
"get_signum",
"format_value"
]
| 9,027
| 34.968127
| 83
|
py
|
pycbc
|
pycbc-master/pycbc/frame/frame.py
|
# Copyright (C) 2014 Andrew Miller, Alex Nitz, Tito Dal Canton, Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions for reading in data from frame files or caches
"""
import lalframe, logging
import lal
import numpy
import math
import os.path, glob, time
from gwdatafind import find_urls as find_frame_urls
import pycbc
from urllib.parse import urlparse
from pycbc.types import TimeSeries, zeros
# map LAL series types to corresponding functions and Numpy types
_fr_type_map = {
lal.S_TYPE_CODE: [
lalframe.FrStreamReadREAL4TimeSeries, numpy.float32,
lal.CreateREAL4TimeSeries,
lalframe.FrStreamGetREAL4TimeSeriesMetadata,
lal.CreateREAL4Sequence,
lalframe.FrameAddREAL4TimeSeriesProcData
],
lal.D_TYPE_CODE: [
lalframe.FrStreamReadREAL8TimeSeries, numpy.float64,
lal.CreateREAL8TimeSeries,
lalframe.FrStreamGetREAL8TimeSeriesMetadata,
lal.CreateREAL8Sequence,
lalframe.FrameAddREAL8TimeSeriesProcData
],
lal.C_TYPE_CODE: [
lalframe.FrStreamReadCOMPLEX8TimeSeries, numpy.complex64,
lal.CreateCOMPLEX8TimeSeries,
lalframe.FrStreamGetCOMPLEX8TimeSeriesMetadata,
lal.CreateCOMPLEX8Sequence,
lalframe.FrameAddCOMPLEX8TimeSeriesProcData
],
lal.Z_TYPE_CODE: [
lalframe.FrStreamReadCOMPLEX16TimeSeries, numpy.complex128,
lal.CreateCOMPLEX16TimeSeries,
lalframe.FrStreamGetCOMPLEX16TimeSeriesMetadata,
lal.CreateCOMPLEX16Sequence,
lalframe.FrameAddCOMPLEX16TimeSeriesProcData
],
lal.U4_TYPE_CODE: [
lalframe.FrStreamReadUINT4TimeSeries, numpy.uint32,
lal.CreateUINT4TimeSeries,
lalframe.FrStreamGetUINT4TimeSeriesMetadata,
lal.CreateUINT4Sequence,
lalframe.FrameAddUINT4TimeSeriesProcData
],
lal.I4_TYPE_CODE: [
lalframe.FrStreamReadINT4TimeSeries, numpy.int32,
lal.CreateINT4TimeSeries,
lalframe.FrStreamGetINT4TimeSeriesMetadata,
lal.CreateINT4Sequence,
lalframe.FrameAddINT4TimeSeriesProcData
],
}
def _read_channel(channel, stream, start, duration):
""" Get channel using lalframe """
channel_type = lalframe.FrStreamGetTimeSeriesType(channel, stream)
read_func = _fr_type_map[channel_type][0]
d_type = _fr_type_map[channel_type][1]
data = read_func(stream, channel, start, duration, 0)
return TimeSeries(data.data.data, delta_t=data.deltaT, epoch=start,
dtype=d_type)
def _is_gwf(file_path):
"""Test if a file is a frame file by checking if its contents begins with
the magic string 'IGWD'."""
try:
with open(file_path, 'rb') as f:
if f.read(4) == b'IGWD':
return True
except IOError:
pass
return False
def locations_to_cache(locations, latest=False):
""" Return a cumulative cache file build from the list of locations
Parameters
----------
locations : list
A list of strings containing files, globs, or cache files used to
build a combined lal cache file object.
latest : Optional, {False, Boolean}
Only return a cache with the most recent frame in the locations.
If false, all results are returned.
Returns
-------
cache : lal.Cache
A cumulative lal cache object containing the files derived from the
list of locations.
"""
cum_cache = lal.Cache()
for source in locations:
flist = glob.glob(source)
if latest:
def relaxed_getctime(fn):
# when building a cache from a directory of temporary
# low-latency frames, files might disappear between
# the glob() and getctime() calls
try:
return os.path.getctime(fn)
except OSError:
return 0
if not flist:
raise ValueError('no frame or cache files found in ' + source)
flist = [max(flist, key=relaxed_getctime)]
for file_path in flist:
dir_name, file_name = os.path.split(file_path)
_, file_extension = os.path.splitext(file_name)
if file_extension in [".lcf", ".cache"]:
cache = lal.CacheImport(file_path)
elif file_extension == ".gwf" or _is_gwf(file_path):
cache = lalframe.FrOpen(str(dir_name), str(file_name)).cache
else:
raise TypeError("Invalid location name")
cum_cache = lal.CacheMerge(cum_cache, cache)
return cum_cache
def read_frame(location, channels, start_time=None,
end_time=None, duration=None, check_integrity=False,
sieve=None):
"""Read time series from frame data.
Using the `location`, which can either be a frame file ".gwf" or a
frame cache ".gwf", read in the data for the given channel(s) and output
as a TimeSeries or list of TimeSeries.
Parameters
----------
location : string
A source of gravitational wave frames. Either a frame filename
(can include pattern), a list of frame files, or frame cache file.
channels : string or list of strings
Either a string that contains the channel name or a list of channel
name strings.
start_time : {None, LIGOTimeGPS}, optional
The gps start time of the time series. Defaults to reading from the
beginning of the available frame(s).
end_time : {None, LIGOTimeGPS}, optional
The gps end time of the time series. Defaults to the end of the frame.
Note, this argument is incompatible with `duration`.
duration : {None, float}, optional
The amount of data to read in seconds. Note, this argument is
incompatible with `end`.
check_integrity : {True, bool}, optional
Test the frame files for internal integrity.
sieve : string, optional
Selects only frames where the frame URL matches the regular
expression sieve
Returns
-------
Frame Data: TimeSeries or list of TimeSeries
A TimeSeries or a list of TimeSeries, corresponding to the data from
the frame file/cache for a given channel or channels.
"""
if end_time and duration:
raise ValueError("end time and duration are mutually exclusive")
if type(location) is list:
locations = location
else:
locations = [location]
cum_cache = locations_to_cache(locations)
if sieve:
logging.info("Using frames that match regexp: %s", sieve)
lal.CacheSieve(cum_cache, 0, 0, None, None, sieve)
if start_time is not None and end_time is not None:
# Before sieving, check if this is sane. Otherwise it will fail later.
if (int(math.ceil(end_time)) - int(start_time)) <= 0:
raise ValueError("Negative or null duration")
lal.CacheSieve(cum_cache, int(start_time), int(math.ceil(end_time)),
None, None, None)
stream = lalframe.FrStreamCacheOpen(cum_cache)
stream.mode = lalframe.FR_STREAM_VERBOSE_MODE
if check_integrity:
stream.mode = (stream.mode | lalframe.FR_STREAM_CHECKSUM_MODE)
lalframe.FrStreamSetMode(stream, stream.mode)
# determine duration of data
if type(channels) is list:
first_channel = channels[0]
else:
first_channel = channels
data_length = lalframe.FrStreamGetVectorLength(first_channel, stream)
channel_type = lalframe.FrStreamGetTimeSeriesType(first_channel, stream)
create_series_func = _fr_type_map[channel_type][2]
get_series_metadata_func = _fr_type_map[channel_type][3]
series = create_series_func(first_channel, stream.epoch, 0, 0,
lal.ADCCountUnit, 0)
get_series_metadata_func(series, stream)
data_duration = (data_length + 0.5) * series.deltaT
if start_time is None:
start_time = stream.epoch*1
if end_time is None:
end_time = start_time + data_duration
if type(start_time) is not lal.LIGOTimeGPS:
start_time = lal.LIGOTimeGPS(start_time)
if type(end_time) is not lal.LIGOTimeGPS:
end_time = lal.LIGOTimeGPS(end_time)
if duration is None:
duration = float(end_time - start_time)
else:
duration = float(duration)
# lalframe behaves dangerously with invalid duration so catch it here
if duration <= 0:
raise ValueError("Negative or null duration")
#if duration > data_duration:
# raise ValueError("Requested duration longer than available data")
if type(channels) is list:
all_data = []
for channel in channels:
channel_data = _read_channel(channel, stream, start_time, duration)
lalframe.FrStreamSeek(stream, start_time)
all_data.append(channel_data)
return all_data
else:
return _read_channel(channels, stream, start_time, duration)
def frame_paths(frame_type, start_time, end_time, server=None, url_type='file'):
"""Return the paths to a span of frame files
Parameters
----------
frame_type : string
The string representation of the frame type (ex. 'H1_ER_C00_L1')
start_time : int
The start time that we need the frames to span.
end_time : int
The end time that we need the frames to span.
server : {None, SERVER:PORT string}, optional
Optional string to specify the datafind server to use. By default an
attempt is made to use a local datafind server.
url_type : string
Returns only frame URLs with a particular scheme or head such
as "file" or "https". Default is "file", which queries locally
stored frames. Option can be disabled if set to None.
Returns
-------
paths : list of paths
The list of paths to the frame files.
Examples
--------
>>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048)
"""
site = frame_type[0]
cache = find_frame_urls(site, frame_type, start_time, end_time,
urltype=url_type, host=server)
return [urlparse(entry).path for entry in cache]
def query_and_read_frame(frame_type, channels, start_time, end_time,
sieve=None, check_integrity=False):
"""Read time series from frame data.
Query for the locatin of physical frames matching the frame type. Return
a time series containing the channel between the given start and end times.
Parameters
----------
frame_type : string
The type of frame file that we are looking for.
channels : string or list of strings
Either a string that contains the channel name or a list of channel
name strings.
start_time : LIGOTimeGPS or int
The gps start time of the time series. Defaults to reading from the
beginning of the available frame(s).
end_time : LIGOTimeGPS or int
The gps end time of the time series. Defaults to the end of the frame.
sieve : string, optional
Selects only frames where the frame URL matches the regular
expression sieve
check_integrity : boolean
Do an expensive checksum of the file before returning.
Returns
-------
Frame Data: TimeSeries or list of TimeSeries
A TimeSeries or a list of TimeSeries, corresponding to the data from
the frame file/cache for a given channel or channels.
Examples
--------
>>> ts = query_and_read_frame('H1_LDAS_C02_L2', 'H1:LDAS-STRAIN',
>>> 968995968, 968995968+2048)
"""
# Allows compatibility with our standard tools
# We may want to place this into a higher level frame getting tool
if frame_type in ['LOSC_STRAIN', 'GWOSC_STRAIN']:
from pycbc.frame.gwosc import read_strain_gwosc
if not isinstance(channels, list):
channels = [channels]
data = [read_strain_gwosc(c[:2], start_time, end_time)
for c in channels]
return data if len(data) > 1 else data[0]
if frame_type in ['LOSC', 'GWOSC']:
from pycbc.frame.gwosc import read_frame_gwosc
return read_frame_gwosc(channels, start_time, end_time)
logging.info('querying datafind server')
paths = frame_paths(frame_type, int(start_time), int(numpy.ceil(end_time)))
logging.info('found files: %s' % (' '.join(paths)))
return read_frame(paths, channels,
start_time=start_time,
end_time=end_time,
sieve=sieve,
check_integrity=check_integrity)
__all__ = ['read_frame', 'frame_paths',
'query_and_read_frame']
def write_frame(location, channels, timeseries):
"""Write a list of time series to a single frame file.
Parameters
----------
location : string
A frame filename.
channels : string or list of strings
Either a string that contains the channel name or a list of channel
name strings.
timeseries: TimeSeries
A TimeSeries or list of TimeSeries, corresponding to the data to be
written to the frame file for a given channel.
"""
# check if a single channel or a list of channels
if type(channels) is list and type(timeseries) is list:
channels = channels
timeseries = timeseries
else:
channels = [channels]
timeseries = [timeseries]
# check that timeseries have the same start and end time
gps_start_times = {series.start_time for series in timeseries}
gps_end_times = {series.end_time for series in timeseries}
if len(gps_start_times) != 1 or len(gps_end_times) != 1:
raise ValueError("Start and end times of TimeSeries must be identical.")
# check that start, end time, and duration are integers
gps_start_time = gps_start_times.pop()
gps_end_time = gps_end_times.pop()
duration = int(gps_end_time - gps_start_time)
if gps_start_time % 1 or gps_end_time % 1:
raise ValueError("Start and end times of TimeSeries must be integer seconds.")
# create frame
frame = lalframe.FrameNew(epoch=gps_start_time, duration=duration,
project='', run=1, frnum=1,
detectorFlags=lal.LALDETECTORTYPE_ABSENT)
for i,tseries in enumerate(timeseries):
# get data type
for seriestype in _fr_type_map.keys():
if _fr_type_map[seriestype][1] == tseries.dtype:
create_series_func = _fr_type_map[seriestype][2]
create_sequence_func = _fr_type_map[seriestype][4]
add_series_func = _fr_type_map[seriestype][5]
break
# add time series to frame
series = create_series_func(channels[i], tseries.start_time,
0, tseries.delta_t, lal.ADCCountUnit,
len(tseries.numpy()))
series.data = create_sequence_func(len(tseries.numpy()))
series.data.data = tseries.numpy()
add_series_func(frame, series)
# write frame
lalframe.FrameWrite(frame, location)
class DataBuffer(object):
"""A linear buffer that acts as a FILO for reading in frame data
"""
def __init__(self, frame_src,
channel_name,
start_time,
max_buffer=2048,
force_update_cache=True,
increment_update_cache=None,
dtype=numpy.float64):
""" Create a rolling buffer of frame data
Parameters
---------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
channel_name: str
Name of the channel to read from the frame files
start_time:
Time to start reading from.
max_buffer: {int, 2048}, Optional
Length of the buffer in seconds
dtype: {dtype, numpy.float32}, Optional
Data type to use for the interal buffer
"""
self.frame_src = frame_src
self.channel_name = channel_name
self.read_pos = start_time
self.force_update_cache = force_update_cache
self.increment_update_cache = increment_update_cache
self.detector = channel_name.split(':')[0]
self.update_cache()
self.channel_type, self.raw_sample_rate = self._retrieve_metadata(self.stream, self.channel_name)
raw_size = self.raw_sample_rate * max_buffer
self.raw_buffer = TimeSeries(zeros(raw_size, dtype=dtype),
copy=False,
epoch=start_time - max_buffer,
delta_t=1.0/self.raw_sample_rate)
def update_cache(self):
"""Reset the lal cache. This can be used to update the cache if the
result may change due to more files being added to the filesystem,
for example.
"""
cache = locations_to_cache(self.frame_src, latest=True)
stream = lalframe.FrStreamCacheOpen(cache)
self.stream = stream
@staticmethod
def _retrieve_metadata(stream, channel_name):
"""Retrieve basic metadata by reading the first file in the cache
Parameters
----------
stream: lal stream object
Stream containing a channel we want to learn about
channel_name: str
The name of the channel we want to know the dtype and sample rate of
Returns
-------
channel_type: lal type enum
Enum value which indicates the dtype of the channel
sample_rate: int
The sample rate of the data within this channel
"""
lalframe.FrStreamGetVectorLength(channel_name, stream)
channel_type = lalframe.FrStreamGetTimeSeriesType(channel_name, stream)
create_series_func = _fr_type_map[channel_type][2]
get_series_metadata_func = _fr_type_map[channel_type][3]
series = create_series_func(channel_name, stream.epoch, 0, 0,
lal.ADCCountUnit, 0)
get_series_metadata_func(series, stream)
return channel_type, int(1.0/series.deltaT)
def _read_frame(self, blocksize):
"""Try to read the block of data blocksize seconds long
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
data: TimeSeries
TimeSeries containg 'blocksize' seconds of frame data
Raises
------
RuntimeError:
If data cannot be read for any reason
"""
try:
read_func = _fr_type_map[self.channel_type][0]
dtype = _fr_type_map[self.channel_type][1]
data = read_func(self.stream, self.channel_name,
self.read_pos, int(blocksize), 0)
return TimeSeries(data.data.data, delta_t=data.deltaT,
epoch=self.read_pos,
dtype=dtype)
except Exception:
raise RuntimeError('Cannot read {0} frame data'.format(self.channel_name))
def null_advance(self, blocksize):
"""Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
self.raw_buffer.roll(-int(blocksize * self.raw_sample_rate))
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize
def advance(self, blocksize):
"""Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
ts = self._read_frame(blocksize)
self.raw_buffer.roll(-len(ts))
self.raw_buffer[-len(ts):] = ts[:]
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize
return ts
def update_cache_by_increment(self, blocksize):
"""Update the internal cache by starting from the first frame
and incrementing.
Guess the next frame file name by incrementing from the first found
one. This allows a pattern to be used for the GPS folder of the file,
which is indicated by `GPSX` where x is the number of digits to use.
Parameters
----------
blocksize: int
Number of seconds to increment the next frame file.
"""
start = float(self.raw_buffer.end_time)
end = float(start + blocksize)
if not hasattr(self, 'dur'):
fname = glob.glob(self.frame_src[0])[0]
fname = os.path.splitext(os.path.basename(fname))[0].split('-')
self.beg = '-'.join([fname[0], fname[1]])
self.ref = int(fname[2])
self.dur = int(fname[3])
fstart = int(self.ref + numpy.floor((start - self.ref) / float(self.dur)) * self.dur)
starts = numpy.arange(fstart, end, self.dur).astype(int)
keys = []
for s in starts:
pattern = self.increment_update_cache
if 'GPS' in pattern:
n = int(pattern[int(pattern.index('GPS') + 3)])
pattern = pattern.replace('GPS%s' % n, str(s)[0:n])
name = f'{pattern}/{self.beg}-{s}-{self.dur}.gwf'
# check that file actually exists, else abort now
if not os.path.exists(name):
raise RuntimeError
keys.append(name)
cache = locations_to_cache(keys)
stream = lalframe.FrStreamCacheOpen(cache)
self.stream = stream
self.channel_type, self.raw_sample_rate = \
self._retrieve_metadata(self.stream, self.channel_name)
def attempt_advance(self, blocksize, timeout=10):
""" Attempt to advance the frame buffer. Retry upon failure, except
if the frame file is beyond the timeout limit.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
timeout: {int, 10}, Optional
Number of seconds before giving up on reading a frame
Returns
-------
data: TimeSeries
TimeSeries containg 'blocksize' seconds of frame data
"""
if self.force_update_cache:
self.update_cache()
while True:
try:
if self.increment_update_cache:
self.update_cache_by_increment(blocksize)
return DataBuffer.advance(self, blocksize)
except RuntimeError:
if pycbc.gps_now() > timeout + self.raw_buffer.end_time:
# The frame is not there and it should be by now,
# so we give up and treat it as zeros
DataBuffer.null_advance(self, blocksize)
return None
# I am too early to give up on this frame,
# so we should try again
time.sleep(0.1)
class StatusBuffer(DataBuffer):
""" Read state vector or DQ information from a frame file """
def __init__(self, frame_src,
channel_name,
start_time,
max_buffer=2048,
valid_mask=3,
force_update_cache=False,
increment_update_cache=None,
valid_on_zero=False):
""" Create a rolling buffer of status data from a frame
Parameters
---------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
channel_name: str
Name of the channel to read from the frame files
start_time:
Time to start reading from.
max_buffer: {int, 2048}, Optional
Length of the buffer in seconds
valid_mask: {int, HOFT_OK | SCIENCE_INTENT}, Optional
Set of flags that must be on to indicate valid frame data.
valid_on_zero: bool
If True, `valid_mask` is ignored and the status is considered
"good" simply when the channel is zero.
"""
DataBuffer.__init__(self, frame_src, channel_name, start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache,
dtype=numpy.int32)
self.valid_mask = valid_mask
self.valid_on_zero = valid_on_zero
def check_valid(self, values, flag=None):
"""Check if the data contains any non-valid status information
Parameters
----------
values: pycbc.types.Array
Array of status information
flag: str, optional
Override the default valid mask with a user defined mask.
Returns
-------
status: boolean
Returns True if all of the status information if valid,
False if any is not.
"""
if self.valid_on_zero:
valid = values.numpy() == 0
else:
if flag is None:
flag = self.valid_mask
valid = numpy.bitwise_and(values.numpy(), flag) == flag
return bool(numpy.all(valid))
def is_extent_valid(self, start_time, duration, flag=None):
"""Check if the duration contains any non-valid frames
Parameters
----------
start_time: int
Beginning of the duration to check in gps seconds
duration: int
Number of seconds after the start_time to check
flag: str, optional
Override the default valid mask with a user defined mask.
Returns
-------
status: boolean
Returns True if all of the status information if valid,
False if any is not.
"""
sr = self.raw_buffer.sample_rate
s = int((start_time - self.raw_buffer.start_time) * sr)
e = s + int(duration * sr) + 1
data = self.raw_buffer[s:e]
return self.check_valid(data, flag=flag)
def indices_of_flag(self, start_time, duration, times, padding=0):
""" Return the indices of the times lying in the flagged region
Parameters
----------
start_time: int
Beginning time to request for
duration: int
Number of seconds to check.
padding: float
Number of seconds to add around flag inactive times to be considered
inactive as well.
Returns
-------
indices: numpy.ndarray
Array of indices marking the location of triggers within valid
time.
"""
from pycbc.events.veto import indices_outside_times
sr = self.raw_buffer.sample_rate
s = int((start_time - self.raw_buffer.start_time - padding) * sr) - 1
e = s + int((duration + padding) * sr) + 1
data = self.raw_buffer[s:e]
stamps = data.sample_times.numpy()
if self.valid_on_zero:
invalid = data.numpy() != 0
else:
invalid = numpy.bitwise_and(data.numpy(), self.valid_mask) \
!= self.valid_mask
starts = stamps[invalid] - padding
ends = starts + 1.0 / sr + padding * 2.0
idx = indices_outside_times(times, starts, ends)
return idx
def advance(self, blocksize):
""" Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if all of the status information if valid,
False if any is not.
"""
try:
if self.increment_update_cache:
self.update_cache_by_increment(blocksize)
ts = DataBuffer.advance(self, blocksize)
return self.check_valid(ts)
except RuntimeError:
self.null_advance(blocksize)
return False
class iDQBuffer(object):
""" Read iDQ timeseries from a frame file """
def __init__(self, frame_src,
idq_channel_name,
idq_status_channel_name,
idq_threshold,
start_time,
max_buffer=512,
force_update_cache=False,
increment_update_cache=None):
"""
Parameters
----------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
idq_channel_name: str
Name of the channel to read the iDQ statistic from
idq_status_channel_name: str
Name of the channel to read the iDQ status from
idq_threshold: float
Threshold which triggers a veto if iDQ channel falls below this threshold
start_time:
Time to start reading from.
max_buffer: {int, 512}, Optional
Length of the buffer in seconds
force_update_cache: {boolean, True}, Optional
Re-check the filesystem for frame files on every attempt to
read more data.
increment_update_cache: {str, None}, Optional
Pattern to look for frame files in a GPS dependent directory. This
is an alternate to the forced updated of the frame cache, and
apptempts to predict the next frame file name without probing the
filesystem.
"""
self.threshold = idq_threshold
self.idq = DataBuffer(frame_src, idq_channel_name, start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.idq_state = DataBuffer(frame_src, idq_status_channel_name, start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
def indices_of_flag(self, start_time, duration, times, padding=0):
""" Return the indices of the times lying in the flagged region
Parameters
----------
start_time: int
Beginning time to request for
duration: int
Number of seconds to check.
padding: float
Number of seconds to add around flag inactive times to be considered
inactive as well.
Returns
-------
indices: numpy.ndarray
Array of indices marking the location of triggers within valid
time.
"""
from pycbc.events.veto import indices_outside_times
sr = self.idq.raw_buffer.sample_rate
s = int((start_time - self.idq.raw_buffer.start_time - padding) * sr) - 1
e = s + int((duration + padding) * sr) + 1
idq_fap = self.idq.raw_buffer[s:e]
stamps = idq_fap.sample_times.numpy()
low_fap = idq_fap.numpy() <= self.threshold
idq_valid = self.idq_state.raw_buffer[s:e]
idq_valid = idq_valid.numpy().astype(bool)
valid_low_fap = numpy.logical_and(idq_valid, low_fap)
glitch_idx = numpy.flatnonzero(valid_low_fap)
glitch_times = stamps[glitch_idx]
starts = glitch_times - padding
ends = starts + 1.0 / sr + padding * 2.0
idx = indices_outside_times(times, starts, ends)
return idx
def advance(self, blocksize):
""" Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read
Returns
-------
status: boolean
Returns True if advance is succesful,
False if not.
"""
idq_ts = self.idq.attempt_advance(blocksize)
idq_state_ts = self.idq_state.attempt_advance(blocksize)
return (idq_ts is not None) and (idq_state_ts is not None)
def null_advance(self, blocksize):
"""Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to advance the buffers
"""
self.idq.null_advance(blocksize)
self.idq_state.null_advance(blocksize)
| 33,651
| 36.811236
| 105
|
py
|
pycbc
|
pycbc-master/pycbc/frame/gwosc.py
|
# Copyright (C) 2017 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions for getting data from the Gravitational Wave
Open Science Center (GWOSC).
"""
import json
from urllib.request import urlopen
from pycbc.io import get_file
from pycbc.frame import read_frame
_GWOSC_URL = "https://www.gwosc.org/archive/links/%s/%s/%s/%s/json/"
def get_run(time, ifo=None):
"""Return the run name for a given time.
Parameters
----------
time: int
The GPS time.
ifo: str
The interferometer prefix string. Optional and normally unused,
except for some special times where data releases were made for a
single detector under unusual circumstances. For example, to get
the data around GW170608 in the Hanford detector.
"""
cases = [
(
# ifo is only needed in this special case, otherwise,
# the run name is the same for all ifos
1180911618 <= time <= 1180982427 and ifo == 'H1',
'BKGW170608_16KHZ_R1'
),
(1253977219 <= time <= 1320363336, 'O3b_16KHZ_R1'),
(1238166018 <= time <= 1253977218, 'O3a_16KHZ_R1'),
(1164556817 <= time <= 1187733618, 'O2_16KHZ_R1'),
(1126051217 <= time <= 1137254417, 'O1'),
(815011213 <= time <= 875318414, 'S5'),
(930787215 <= time <= 971568015, 'S6')
]
for condition, name in cases:
if condition:
return name
raise ValueError(f'Time {time} not available in a public dataset')
def _get_channel(time):
if time < 1164556817:
return 'LOSC-STRAIN'
return 'GWOSC-16KHZ_R1_STRAIN'
def gwosc_frame_json(ifo, start_time, end_time):
"""Get the information about the public data files in a duration of time.
Parameters
----------
ifo: str
The name of the interferometer to find the information about.
start_time: int
The start time in GPS seconds.
end_time: int
The end time in GPS seconds.
Returns
-------
info: dict
A dictionary containing information about the files that span the
requested times.
"""
run = get_run(start_time)
run2 = get_run(end_time)
if run != run2:
raise ValueError(
'Spanning multiple runs is not currently supported. '
f'You have requested data that uses both {run} and {run2}'
)
url = _GWOSC_URL % (run, ifo, int(start_time), int(end_time))
try:
return json.loads(urlopen(url).read().decode())
except Exception as exc:
msg = ('Failed to find gwf files for '
f'ifo={ifo}, run={run}, between {start_time}-{end_time}')
raise ValueError(msg) from exc
def gwosc_frame_urls(ifo, start_time, end_time):
"""Get a list of URLs to GWOSC frame files.
Parameters
----------
ifo: str
The name of the interferometer to find the information about.
start_time: int
The start time in GPS seconds.
end_time: int
The end time in GPS seconds.
Returns
-------
frame_files: list
A dictionary containing information about the files that span the
requested times.
"""
data = gwosc_frame_json(ifo, start_time, end_time)['strain']
return [d['url'] for d in data if d['format'] == 'gwf']
def read_frame_gwosc(channels, start_time, end_time):
"""Read channels from GWOSC data.
Parameters
----------
channels: str or list
The channel name to read or list of channel names.
start_time: int
The start time in GPS seconds.
end_time: int
The end time in GPS seconds.
Returns
-------
ts: TimeSeries
Returns a timeseries or list of timeseries with the requested data.
"""
if not isinstance(channels, list):
channels = [channels]
ifos = [c[0:2] for c in channels]
urls = {}
for ifo in ifos:
urls[ifo] = gwosc_frame_urls(ifo, start_time, end_time)
if len(urls[ifo]) == 0:
raise ValueError("No data found for %s so we "
"can't produce a time series" % ifo)
fnames = {ifo: [] for ifo in ifos}
for ifo in ifos:
for url in urls[ifo]:
fname = get_file(url, cache=True)
fnames[ifo].append(fname)
ts_list = [read_frame(fnames[channel[0:2]], channel,
start_time=start_time, end_time=end_time)
for channel in channels]
if len(ts_list) == 1:
return ts_list[0]
return ts_list
def read_strain_gwosc(ifo, start_time, end_time):
"""Get the strain data from the GWOSC data.
Parameters
----------
ifo: str
The name of the interferometer to read data for. Ex. 'H1', 'L1', 'V1'.
start_time: int
The start time in GPS seconds.
end_time: int
The end time in GPS seconds.
Returns
-------
ts: TimeSeries
Returns a timeseries with the strain data.
"""
channel = _get_channel(start_time)
return read_frame_gwosc(f'{ifo}:{channel}', start_time, end_time)
| 5,783
| 30.096774
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/frame/__init__.py
|
from . frame import (locations_to_cache, read_frame,
query_and_read_frame, frame_paths, write_frame,
DataBuffer, StatusBuffer, iDQBuffer)
from . store import (read_store)
# Status flags for the calibration state vector
# See e.g. https://dcc.ligo.org/LIGO-G1700234
# https://wiki.ligo.org/DetChar/DataQuality/O3Flags
HOFT_OK = 1
SCIENCE_INTENT = 2
SCIENCE_QUALITY = 4
HOFT_PROD = 8
FILTERS_OK = 16
NO_STOCH_HW_INJ = 32
NO_CBC_HW_INJ = 64
NO_BURST_HW_INJ = 128
NO_DETCHAR_HW_INJ = 256
KAPPA_A_OK = 512
KAPPA_PU_OK = 1024
KAPPA_TST_OK = 2048
KAPPA_C_OK = 4096
FCC_OK = 8192
NO_GAP = 16384
NO_HWINJ = NO_STOCH_HW_INJ | NO_CBC_HW_INJ | \
NO_BURST_HW_INJ | NO_DETCHAR_HW_INJ
# relevant bits in the LIGO O2/O3 low-latency DQ vector
# If the bit is 0 then we should veto
# https://wiki.ligo.org/DetChar/DmtDqVector
# https://wiki.ligo.org/DetChar/DataQuality/O3Flags
OMC_DCPD_ADC_OVERFLOW = 2
ETMY_ESD_DAC_OVERFLOW = 4
ETMX_ESD_DAC_OVERFLOW = 16
# CAT1 bit in the Virgo state vector
# https://wiki.virgo-gw.eu/DetChar/DetCharVirgoStateVector
VIRGO_GOOD_DQ = 1 << 10
def flag_names_to_bitmask(flags):
"""Takes a list of flag names corresponding to bits in a status channel
and returns the corresponding bit mask.
"""
mask = 0
for flag in flags:
mask |= globals()[flag]
return mask
| 1,365
| 26.32
| 75
|
py
|
pycbc
|
pycbc-master/pycbc/frame/store.py
|
# Copyright (C) 2019 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions for reading in data from hdf stores
"""
import h5py
import numpy
from pycbc.types import TimeSeries
def read_store(fname, channel, start_time, end_time):
""" Read time series data from hdf store
Parameters
----------
fname: str
Name of hdf store file
channel: str
Channel name to read
start_time: int
GPS time to start reading from
end_time: int
GPS time to end time series
Returns
-------
ts: pycbc.types.TimeSeries
Time series containing the requested data
"""
fhandle = h5py.File(fname, 'r')
if channel not in fhandle:
raise ValueError('Could not find channel name {}'.format(channel))
# Determine which segment data lies in (can only read contiguous data now)
starts = fhandle[channel]['segments']['start'][:]
ends = fhandle[channel]['segments']['end'][:]
diff = start_time - starts
loc = numpy.where(diff >= 0)[0]
sidx = loc[diff[loc].argmin()]
stime = starts[sidx]
etime = ends[sidx]
if stime > start_time:
raise ValueError("Cannot read data segment before {}".format(stime))
if etime < end_time:
raise ValueError("Cannot read data segment past {}".format(etime))
data = fhandle[channel][str(sidx)]
sample_rate = len(data) / (etime - stime)
start = int((start_time - stime) * sample_rate)
end = int((end_time - stime) * sample_rate)
return TimeSeries(data[start:end], delta_t=1.0/sample_rate,
epoch=start_time)
| 2,303
| 31
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/filter/resample.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import functools
import lal
import numpy
import scipy.signal
from pycbc.types import TimeSeries, Array, zeros, FrequencySeries, real_same_precision_as
from pycbc.types import complex_same_precision_as
from pycbc.fft import ifft, fft
_resample_func = {numpy.dtype('float32'): lal.ResampleREAL4TimeSeries,
numpy.dtype('float64'): lal.ResampleREAL8TimeSeries}
@functools.lru_cache(maxsize=20)
def cached_firwin(*args, **kwargs):
"""Cache the FIR filter coefficients.
This is mostly done for PyCBC Live, which rapidly and repeatedly resamples data.
"""
return scipy.signal.firwin(*args, **kwargs)
# Change to True in front-end if you want this function to use caching
# This is a mostly-hidden optimization option that most users will not want
# to use. It is used in PyCBC Live
USE_CACHING_FOR_LFILTER = False
# If using caching we want output to be unique if called at different places
# (and if called from different modules/functions), these unique IDs acheive
# that. The numbers are not significant, only that they are unique.
LFILTER_UNIQUE_ID_1 = 651273657
LFILTER_UNIQUE_ID_2 = 154687641
LFILTER_UNIQUE_ID_3 = 548946442
def lfilter(coefficients, timeseries):
""" Apply filter coefficients to a time series
Parameters
----------
coefficients: numpy.ndarray
Filter coefficients to apply
timeseries: numpy.ndarray
Time series to be filtered.
Returns
-------
tseries: numpy.ndarray
filtered array
"""
from pycbc.filter import correlate
fillen = len(coefficients)
# If there aren't many points just use the default scipy method
if len(timeseries) < 2**7:
series = scipy.signal.lfilter(coefficients, 1.0, timeseries)
return TimeSeries(series,
epoch=timeseries.start_time,
delta_t=timeseries.delta_t)
elif (len(timeseries) < fillen * 10) or (len(timeseries) < 2**18):
from pycbc.strain.strain import create_memory_and_engine_for_class_based_fft
from pycbc.strain.strain import execute_cached_fft
cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype)
cseries.resize(len(timeseries))
cseries.roll(len(timeseries) - fillen + 1)
flen = len(cseries) // 2 + 1
ftype = complex_same_precision_as(timeseries)
if not USE_CACHING_FOR_LFILTER:
cfreq = zeros(flen, dtype=ftype)
tfreq = zeros(flen, dtype=ftype)
fft(Array(cseries), cfreq)
fft(Array(timeseries), tfreq)
cout = zeros(flen, ftype)
correlate(cfreq, tfreq, cout)
out = zeros(len(timeseries), dtype=timeseries)
ifft(cout, out)
else:
npoints = len(cseries)
# NOTE: This function is cached!
ifftouts = create_memory_and_engine_for_class_based_fft(
npoints,
timeseries.dtype,
ifft=True,
uid=LFILTER_UNIQUE_ID_1
)
# FFT contents of cseries into cfreq
cfreq = execute_cached_fft(cseries, uid=LFILTER_UNIQUE_ID_2,
copy_output=False,
normalize_by_rate=False)
# FFT contents of timeseries into tfreq
tfreq = execute_cached_fft(timeseries, uid=LFILTER_UNIQUE_ID_3,
copy_output=False,
normalize_by_rate=False)
cout, out, fft_class = ifftouts
# Correlate cfreq and tfreq
correlate(cfreq, tfreq, cout)
# IFFT correlation output into out
fft_class.execute()
return TimeSeries(out.numpy() / len(out), epoch=timeseries.start_time,
delta_t=timeseries.delta_t)
else:
# recursively perform which saves a bit on memory usage
# but must keep within recursion limit
chunksize = max(fillen * 5, len(timeseries) // 128)
part1 = lfilter(coefficients, timeseries[0:chunksize])
part2 = lfilter(coefficients, timeseries[chunksize - fillen:])
out = timeseries.copy()
out[:len(part1)] = part1
out[len(part1):] = part2[fillen:]
return out
def fir_zero_filter(coeff, timeseries):
"""Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
timeseries: pycbc.types.TimeSeries
Time series to be filtered.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
"""
# apply the filter
series = lfilter(coeff, timeseries)
# reverse the time shift caused by the filter,
# corruption regions contain zeros
# If the number of filter coefficients is odd, the central point *should*
# be included in the output so we only zero out a region of len(coeff) - 1
series[:(len(coeff) // 2) * 2] = 0
series.roll(-len(coeff)//2)
return series
def resample_to_delta_t(timeseries, delta_t, method='butterworth'):
"""Resmple the time_series to delta_t
Resamples the TimeSeries instance time_series to the given time step,
delta_t. Only powers of two and real valued time series are supported
at this time. Additional restrictions may apply to particular filter
methods.
Parameters
----------
time_series: TimeSeries
The time series to be resampled
delta_t: float
The desired time step
Returns
-------
Time Series: TimeSeries
A TimeSeries that has been resampled to delta_t.
Raises
------
TypeError:
time_series is not an instance of TimeSeries.
TypeError:
time_series is not real valued
Examples
--------
>>> h_plus_sampled = resample_to_delta_t(h_plus, 1.0/2048)
"""
if not isinstance(timeseries,TimeSeries):
raise TypeError("Can only resample time series")
if timeseries.kind != 'real':
raise TypeError("Time series must be real")
if timeseries.sample_rate_close(1.0 / delta_t):
return timeseries * 1
if method == 'butterworth':
lal_data = timeseries.lal()
_resample_func[timeseries.dtype](lal_data, delta_t)
data = lal_data.data.data
elif method == 'ldas':
factor = int(round(delta_t / timeseries.delta_t))
numtaps = factor * 20 + 1
# The kaiser window has been testing using the LDAS implementation
# and is in the same configuration as used in the original lalinspiral
filter_coefficients = cached_firwin(numtaps, 1.0 / factor,
window=('kaiser', 5))
# apply the filter and decimate
data = fir_zero_filter(filter_coefficients, timeseries)[::factor]
else:
raise ValueError('Invalid resampling method: %s' % method)
ts = TimeSeries(data, delta_t = delta_t,
dtype=timeseries.dtype,
epoch=timeseries._epoch)
# From the construction of the LDAS FIR filter there will be 10 corrupted samples
# explanation here http://software.ligo.org/docs/lalsuite/lal/group___resample_time_series__c.html
ts.corrupted_samples = 10
return ts
_highpass_func = {numpy.dtype('float32'): lal.HighPassREAL4TimeSeries,
numpy.dtype('float64'): lal.HighPassREAL8TimeSeries}
_lowpass_func = {numpy.dtype('float32'): lal.LowPassREAL4TimeSeries,
numpy.dtype('float64'): lal.LowPassREAL8TimeSeries}
def notch_fir(timeseries, f1, f2, order, beta=5.0):
""" notch filter the time series using an FIR filtered generated from
the ideal response passed through a time-domain kaiser window (beta = 5.0)
The suppression of the notch filter is related to the bandwidth and
the number of samples in the filter length. For a few Hz bandwidth,
a length corresponding to a few seconds is typically
required to create significant suppression in the notched band.
To achieve frequency resolution df at sampling frequency fs,
order should be at least fs/df.
Parameters
----------
Time Series: TimeSeries
The time series to be notched.
f1: float
The start of the frequency suppression.
f2: float
The end of the frequency suppression.
order: int
Number of corrupted samples on each side of the time series
(Extent of the filter on either side of zero)
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
"""
k1 = f1 / float((int(1.0 / timeseries.delta_t) / 2))
k2 = f2 / float((int(1.0 / timeseries.delta_t) / 2))
coeff = cached_firwin(order * 2 + 1, [k1, k2], window=('kaiser', beta))
return fir_zero_filter(coeff, timeseries)
def lowpass_fir(timeseries, frequency, order, beta=5.0):
""" Lowpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be low-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
"""
k = frequency / float((int(1.0 / timeseries.delta_t) / 2))
coeff = cached_firwin(order * 2 + 1, k, window=('kaiser', beta))
return fir_zero_filter(coeff, timeseries)
def highpass_fir(timeseries, frequency, order, beta=5.0):
""" Highpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be high-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
"""
k = frequency / float((int(1.0 / timeseries.delta_t) / 2))
coeff = cached_firwin(order * 2 + 1, k, window=('kaiser', beta), pass_zero=False)
return fir_zero_filter(coeff, timeseries)
def highpass(timeseries, frequency, filter_order=8, attenuation=0.1):
"""Return a new timeseries that is highpassed.
Return a new time series that is highpassed above the `frequency`.
Parameters
----------
Time Series: TimeSeries
The time series to be high-passed.
frequency: float
The frequency below which is suppressed.
filter_order: {8, int}, optional
The order of the filter to use when high-passing the time series.
attenuation: {0.1, float}, optional
The attenuation of the filter.
Returns
-------
Time Series: TimeSeries
A new TimeSeries that has been high-passed.
Raises
------
TypeError:
time_series is not an instance of TimeSeries.
TypeError:
time_series is not real valued
"""
if not isinstance(timeseries, TimeSeries):
raise TypeError("Can only resample time series")
if timeseries.kind != 'real':
raise TypeError("Time series must be real")
lal_data = timeseries.lal()
_highpass_func[timeseries.dtype](lal_data, frequency,
1-attenuation, filter_order)
return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT,
dtype=timeseries.dtype, epoch=timeseries._epoch)
def lowpass(timeseries, frequency, filter_order=8, attenuation=0.1):
"""Return a new timeseries that is lowpassed.
Return a new time series that is lowpassed below the `frequency`.
Parameters
----------
Time Series: TimeSeries
The time series to be low-passed.
frequency: float
The frequency above which is suppressed.
filter_order: {8, int}, optional
The order of the filter to use when low-passing the time series.
attenuation: {0.1, float}, optional
The attenuation of the filter.
Returns
-------
Time Series: TimeSeries
A new TimeSeries that has been low-passed.
Raises
------
TypeError:
time_series is not an instance of TimeSeries.
TypeError:
time_series is not real valued
"""
if not isinstance(timeseries, TimeSeries):
raise TypeError("Can only resample time series")
if timeseries.kind != 'real':
raise TypeError("Time series must be real")
lal_data = timeseries.lal()
_lowpass_func[timeseries.dtype](lal_data, frequency,
1-attenuation, filter_order)
return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT,
dtype=timeseries.dtype, epoch=timeseries._epoch)
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'):
"""Interpolate complex frequency series to desired delta_f.
Return a new complex frequency series that has been interpolated to the
desired delta_f.
Parameters
----------
series : FrequencySeries
Frequency series to be interpolated.
delta_f : float
The desired delta_f of the output
zeros_offset : optional, {0, int}
Number of sample to delay the start of the zero padding
side : optional, {'right', str}
The side of the vector to zero pad
Returns
-------
interpolated series : FrequencySeries
A new FrequencySeries that has been interpolated.
"""
new_n = int( (len(series)-1) * series.delta_f / delta_f + 1)
old_N = int( (len(series)-1) * 2 )
new_N = int( (new_n - 1) * 2 )
time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N),
dtype=real_same_precision_as(series))
ifft(series, time_series)
time_series.roll(-zeros_offset)
time_series.resize(new_N)
if side == 'left':
time_series.roll(zeros_offset + new_N - old_N)
elif side == 'right':
time_series.roll(zeros_offset)
out_series = FrequencySeries(zeros(new_n), epoch=series.epoch,
delta_f=delta_f, dtype=series.dtype)
fft(time_series, out_series)
return out_series
__all__ = ['resample_to_delta_t', 'highpass', 'lowpass',
'interpolate_complex_frequency', 'highpass_fir',
'lowpass_fir', 'notch_fir', 'fir_zero_filter']
| 15,735
| 34.361798
| 102
|
py
|
pycbc
|
pycbc-master/pycbc/filter/matchedfilter_cuda.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
from pycuda.elementwise import ElementwiseKernel
from pycuda.tools import context_dependent_memoize
from pycuda.tools import dtype_to_ctype
from pycuda.gpuarray import _get_common_dtype
from .matchedfilter import _BaseCorrelator
@context_dependent_memoize
def get_correlate_kernel(dtype_x, dtype_y,dtype_out):
return ElementwiseKernel(
"%(tp_x)s *x, %(tp_y)s *y, %(tp_z)s *z" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_y": dtype_to_ctype(dtype_y),
"tp_z": dtype_to_ctype(dtype_out),
},
"z[i] = conj(x[i]) * y[i]",
"correlate")
def correlate(a, b, out, stream=None):
dtype_out = _get_common_dtype(a,b)
krnl = get_correlate_kernel(a.dtype, b.dtype, dtype_out)
krnl(a.data, b.data, out.data)
class CUDACorrelator(_BaseCorrelator):
def __init__(self, x, y, z):
self.x = x.data
self.y = y.data
self.z = z.data
dtype_out = _get_common_dtype(x, y)
self.krnl = get_correlate_kernel(x.dtype, y.dtype, dtype_out)
def correlate(self):
self.krnl(self.x, self.y, self.z)
def _correlate_factory(x, y, z):
return CUDACorrelator
| 2,147
| 34.213115
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/filter/matchedfilter_numpy.py
|
# Copyright (C) 2017 Ian Harry
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import numpy
def correlate(x, y, z):
z.data[:] = numpy.conjugate(x.data)[:]
z *= y
| 830
| 38.571429
| 75
|
py
|
pycbc
|
pycbc-master/pycbc/filter/autocorrelation.py
|
# Copyright (C) 2016 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides functions for calculating the autocorrelation function
and length of a data series.
"""
import numpy
from pycbc.filter.matchedfilter import correlate
from pycbc.types import FrequencySeries, TimeSeries, zeros
def calculate_acf(data, delta_t=1.0, unbiased=False):
r"""Calculates the one-sided autocorrelation function.
Calculates the autocorrelation function (ACF) and returns the one-sided
ACF. The ACF is defined as the autocovariance divided by the variance. The
ACF can be estimated using
.. math::
\hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right)
Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at
time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is
the variance of :math:`X_{t}`.
Parameters
-----------
data : TimeSeries or numpy.array
A TimeSeries or numpy.array of data.
delta_t : float
The time step of the data series if it is not a TimeSeries instance.
unbiased : bool
If True the normalization of the autocovariance function is n-k
instead of n. This is called the unbiased estimation of the
autocovariance. Note that this does not mean the ACF is unbiased.
Returns
-------
acf : numpy.array
If data is a TimeSeries then acf will be a TimeSeries of the
one-sided ACF. Else acf is a numpy.array.
"""
# if given a TimeSeries instance then get numpy.array
if isinstance(data, TimeSeries):
y = data.numpy()
delta_t = data.delta_t
else:
y = data
# Zero mean
y = y - y.mean()
ny_orig = len(y)
npad = 1
while npad < 2*ny_orig:
npad = npad << 1
ypad = numpy.zeros(npad)
ypad[:ny_orig] = y
# FFT data minus the mean
fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries()
# correlate
# do not need to give the congjugate since correlate function does it
cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype),
delta_f=fdata.delta_f, copy=False)
correlate(fdata, fdata, cdata)
# IFFT correlated data to get unnormalized autocovariance time series
acf = cdata.to_timeseries()
acf = acf[:ny_orig]
# normalize the autocovariance
# note that dividing by acf[0] is the same as ( y.var() * len(acf) )
if unbiased:
acf /= ( y.var() * numpy.arange(len(acf), 0, -1) )
else:
acf /= acf[0]
# return input datatype
if isinstance(data, TimeSeries):
return TimeSeries(acf, delta_t=delta_t)
else:
return acf
def calculate_acl(data, m=5, dtype=int):
r"""Calculates the autocorrelation length (ACL).
Given a normalized autocorrelation function :math:`\rho[i]` (by normalized,
we mean that :math:`\rho[0] = 1`), the ACL :math:`\tau` is:
.. math::
\tau = 1 + 2 \sum_{i=1}^{K} \rho[i].
The number of samples used :math:`K` is found by using the first point
such that:
.. math::
m \tau[K] \leq K,
where :math:`m` is a tuneable parameter (default = 5). If no such point
exists, then the given data set it too short to estimate the ACL; in this
case ``inf`` is returned.
This algorithm for computing the ACL is taken from:
N. Madras and A.D. Sokal, J. Stat. Phys. 50, 109 (1988).
Parameters
-----------
data : TimeSeries or array
A TimeSeries of data.
m : int
The number of autocorrelation lengths to use for determining the window
size :math:`K` (see above).
dtype : int or float
The datatype of the output. If the dtype was set to int, then the
ceiling is returned.
Returns
-------
acl : int or float
The autocorrelation length. If the ACL cannot be estimated, returns
``numpy.inf``.
"""
# sanity check output data type
if dtype not in [int, float]:
raise ValueError("The dtype must be either int or float.")
# if we have only a single point, just return 1
if len(data) < 2:
return 1
# calculate ACF that is normalized by the zero-lag value
acf = calculate_acf(data)
cacf = 2 * acf.numpy().cumsum() - 1
win = m * cacf <= numpy.arange(len(cacf))
if win.any():
acl = cacf[numpy.where(win)[0][0]]
if dtype == int:
acl = int(numpy.ceil(acl))
else:
acl = numpy.inf
return acl
| 5,472
| 30.635838
| 116
|
py
|
pycbc
|
pycbc-master/pycbc/filter/simd_correlate.py
|
# Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from pycbc.types import float32, complex64
import numpy as _np
from .. import opt
from .simd_correlate_cython import ccorrf_simd, ccorrf_parallel
"""
This module interfaces to C functions for multiplying
the complex conjugate of one vector by a second vector, writing the output
to a third vector. They do this multi-threaded and with SIMD vectorization.
The code defined here, and the other calling that function,
are imported and used in the CPUCorrelator class defined in
matchedfilter_cpu.py.
Two functions are defined in the 'support' C/Cython module:
ccorrf_simd: Runs on a single core, but vectorized
ccorrf_parallel: Runs multicore, but not explicitly vectorized.
Parallelized using OpenMP, and calls ccorrf_simd
"""
def correlate_simd(ht, st, qt):
htilde = _np.array(ht.data, copy=False, dtype=float32)
stilde = _np.array(st.data, copy=False, dtype=float32)
qtilde = _np.array(qt.data, copy=False, dtype=float32)
arrlen = len(htilde)
ccorrf_simd(htilde, stilde, qtilde, arrlen)
# We need a segment size (number of complex elements) such that *three* segments
# of that size will fit in the L2 cache. We also want it to be a power of two.
# We are dealing with single-precision complex numbers, which each require 8 bytes.
#
# Our kernel is written to assume a complex correlation of single-precision vectors,
# so that's all we support here. Note that we are assuming that the correct target
# is that the vectors should fit in L2 cache. Figuring out cache topology dynamically
# is a harder problem than we attempt to solve here.
if opt.HAVE_GETCONF:
# Since we need 3 vectors fitting in L2 cache, divide by 3
# We find the nearest power-of-two that fits, and the length
# of the single-precision complex array that fits into that size.
pow2 = int(_np.log(opt.LEVEL2_CACHE_SIZE/3.0)/_np.log(2.0))
default_segsize = pow(2, pow2)/_np.dtype(_np.complex64).itemsize
else:
# Seems to work for Sandy Bridge/Ivy Bridge/Haswell, for now?
default_segsize = 8192
def correlate_parallel(ht, st, qt):
htilde = _np.array(ht.data, copy=False, dtype=complex64)
stilde = _np.array(st.data, copy=False, dtype=complex64)
qtilde = _np.array(qt.data, copy=False, dtype=complex64)
arrlen = len(htilde)
segsize = default_segsize
ccorrf_parallel(htilde, stilde, qtilde, arrlen, segsize)
| 3,116
| 42.291667
| 86
|
py
|
pycbc
|
pycbc-master/pycbc/filter/__init__.py
|
from .matchedfilter import *
from .resample import *
| 53
| 17
| 28
|
py
|
pycbc
|
pycbc-master/pycbc/filter/matchedfilter.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides functions for matched filtering along with associated
utilities.
"""
import logging
from math import sqrt
from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
from pycbc.types import complex_same_precision_as, real_same_precision_as
from pycbc.fft import fft, ifft, IFFT
import pycbc.scheme
from pycbc import events
from pycbc.events import ranking
import pycbc
import numpy
BACKEND_PREFIX="pycbc.filter.matchedfilter_"
@pycbc.scheme.schemed(BACKEND_PREFIX)
def correlate(x, y, z):
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
class BatchCorrelator(object):
""" Create a batch correlation engine
"""
def __init__(self, xs, zs, size):
""" Correlate x and y, store in z. Arrays need not be equal length, but
must be at least size long and of the same dtype. No error checking
will be performed, so be careful. All dtypes must be complex64.
Note, must be created within the processing context that it will be used in.
"""
self.size = int(size)
self.dtype = xs[0].dtype
self.num_vectors = len(xs)
# keep reference to arrays
self.xs = xs
self.zs = zs
# Store each pointer as in integer array
self.x = Array([v.ptr for v in xs], dtype=int)
self.z = Array([v.ptr for v in zs], dtype=int)
@pycbc.scheme.schemed(BACKEND_PREFIX)
def batch_correlate_execute(self, y):
pass
execute = batch_correlate_execute
@pycbc.scheme.schemed(BACKEND_PREFIX)
def _correlate_factory(x, y, z):
err_msg = "This class is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
class Correlator(object):
""" Create a correlator engine
Parameters
---------
x : complex64
Input pycbc.types.Array (or subclass); it will be conjugated
y : complex64
Input pycbc.types.Array (or subclass); it will not be conjugated
z : complex64
Output pycbc.types.Array (or subclass).
It will contain conj(x) * y, element by element
The addresses in memory of the data of all three parameter vectors
must be the same modulo pycbc.PYCBC_ALIGNMENT
"""
def __new__(cls, *args, **kwargs):
real_cls = _correlate_factory(*args, **kwargs)
return real_cls(*args, **kwargs) # pylint:disable=not-callable
# The class below should serve as the parent for all schemed classes.
# The intention is that this class serves simply as the location for
# all documentation of the class and its methods, though that is not
# yet implemented. Perhaps something along the lines of:
#
# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance
#
# will work? Is there a better way?
class _BaseCorrelator(object):
def correlate(self):
"""
Compute the correlation of the vectors specified at object
instantiation, writing into the output vector given when the
object was instantiated. The intention is that this method
should be called many times, with the contents of those vectors
changing between invocations, but not their locations in memory
or length.
"""
pass
class MatchedFilterControl(object):
def __init__(self, low_frequency_cutoff, high_frequency_cutoff, snr_threshold, tlen,
delta_f, dtype, segment_list, template_output, use_cluster,
downsample_factor=1, upsample_threshold=1, upsample_method='pruned_fft',
gpu_callback_method='none', cluster_function='symmetric'):
""" Create a matched filter engine.
Parameters
----------
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
snr_threshold : float
The minimum snr to return when filtering
segment_list : list
List of FrequencySeries that are the Fourier-transformed data segments
template_output : complex64
Array of memory given as the 'out' parameter to waveform.FilterBank
use_cluster : boolean
If true, cluster triggers above threshold using a window; otherwise,
only apply a threshold.
downsample_factor : {1, int}, optional
The factor by which to reduce the sample rate when doing a hierarchical
matched filter
upsample_threshold : {1, float}, optional
The fraction of the snr_threshold to trigger on the subsampled filter.
upsample_method : {pruned_fft, str}
The method to upsample or interpolate the reduced rate filter.
cluster_function : {symmetric, str}, optional
Which method is used to cluster triggers over time. If 'findchirp', a
sliding forward window; if 'symmetric', each window's peak is compared
to the windows before and after it, and only kept as a trigger if larger
than both.
"""
# Assuming analysis time is constant across templates and segments, also
# delta_f is constant across segments.
self.tlen = tlen
self.flen = self.tlen / 2 + 1
self.delta_f = delta_f
self.delta_t = 1.0/(self.delta_f * self.tlen)
self.dtype = dtype
self.snr_threshold = snr_threshold
self.flow = low_frequency_cutoff
self.fhigh = high_frequency_cutoff
self.gpu_callback_method = gpu_callback_method
if cluster_function not in ['symmetric', 'findchirp']:
raise ValueError("MatchedFilter: 'cluster_function' must be either 'symmetric' or 'findchirp'")
self.cluster_function = cluster_function
self.segments = segment_list
self.htilde = template_output
if downsample_factor == 1:
self.snr_mem = zeros(self.tlen, dtype=self.dtype)
self.corr_mem = zeros(self.tlen, dtype=self.dtype)
if use_cluster and (cluster_function == 'symmetric'):
self.matched_filter_and_cluster = self.full_matched_filter_and_cluster_symm
# setup the threasholding/clustering operations for each segment
self.threshold_and_clusterers = []
for seg in self.segments:
thresh = events.ThresholdCluster(self.snr_mem[seg.analyze])
self.threshold_and_clusterers.append(thresh)
elif use_cluster and (cluster_function == 'findchirp'):
self.matched_filter_and_cluster = self.full_matched_filter_and_cluster_fc
else:
self.matched_filter_and_cluster = self.full_matched_filter_thresh_only
# Assuming analysis time is constant across templates and segments, also
# delta_f is constant across segments.
self.kmin, self.kmax = get_cutoff_indices(self.flow, self.fhigh,
self.delta_f, self.tlen)
# Set up the correlation operations for each analysis segment
corr_slice = slice(self.kmin, self.kmax)
self.correlators = []
for seg in self.segments:
corr = Correlator(self.htilde[corr_slice],
seg[corr_slice],
self.corr_mem[corr_slice])
self.correlators.append(corr)
# setup up the ifft we will do
self.ifft = IFFT(self.corr_mem, self.snr_mem)
elif downsample_factor >= 1:
self.matched_filter_and_cluster = self.hierarchical_matched_filter_and_cluster
self.downsample_factor = downsample_factor
self.upsample_method = upsample_method
self.upsample_threshold = upsample_threshold
N_full = self.tlen
N_red = N_full / downsample_factor
self.kmin_full, self.kmax_full = get_cutoff_indices(self.flow,
self.fhigh, self.delta_f, N_full)
self.kmin_red, _ = get_cutoff_indices(self.flow,
self.fhigh, self.delta_f, N_red)
if self.kmax_full < N_red:
self.kmax_red = self.kmax_full
else:
self.kmax_red = N_red - 1
self.snr_mem = zeros(N_red, dtype=self.dtype)
self.corr_mem_full = FrequencySeries(zeros(N_full, dtype=self.dtype), delta_f=self.delta_f)
self.corr_mem = Array(self.corr_mem_full[0:N_red], copy=False)
self.inter_vec = zeros(N_full, dtype=self.dtype)
else:
raise ValueError("Invalid downsample factor")
def full_matched_filter_and_cluster_symm(self, segnum, template_norm, window, epoch=None):
""" Returns the complex snr timeseries, normalization of the complex snr,
the correlation vector frequency series, the list of indices of the
triggers, and the snr values at the trigger locations. Returns empty
lists for these for points that are not above the threshold.
Calculated the matched filter, threshold, and cluster.
Parameters
----------
segnum : int
Index into the list of segments at MatchedFilterControl construction
against which to filter.
template_norm : float
The htilde, template normalization factor.
window : int
Size of the window over which to cluster triggers, in samples
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
norm : float
The normalization of the complex snr.
correlation: FrequencySeries
A frequency series containing the correlation vector.
idx : Array
List of indices of the triggers.
snrv : Array
The snr values at the trigger locations.
"""
norm = (4.0 * self.delta_f) / sqrt(template_norm)
self.correlators[segnum].correlate()
self.ifft.execute()
snrv, idx = self.threshold_and_clusterers[segnum].threshold_and_cluster(self.snr_threshold / norm, window)
if len(idx) == 0:
return [], [], [], [], []
logging.info("%s points above threshold" % str(len(idx)))
snr = TimeSeries(self.snr_mem, epoch=epoch, delta_t=self.delta_t, copy=False)
corr = FrequencySeries(self.corr_mem, delta_f=self.delta_f, copy=False)
return snr, norm, corr, idx, snrv
def full_matched_filter_and_cluster_fc(self, segnum, template_norm, window, epoch=None):
""" Returns the complex snr timeseries, normalization of the complex snr,
the correlation vector frequency series, the list of indices of the
triggers, and the snr values at the trigger locations. Returns empty
lists for these for points that are not above the threshold.
Calculated the matched filter, threshold, and cluster.
Parameters
----------
segnum : int
Index into the list of segments at MatchedFilterControl construction
against which to filter.
template_norm : float
The htilde, template normalization factor.
window : int
Size of the window over which to cluster triggers, in samples
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
norm : float
The normalization of the complex snr.
correlation: FrequencySeries
A frequency series containing the correlation vector.
idx : Array
List of indices of the triggers.
snrv : Array
The snr values at the trigger locations.
"""
norm = (4.0 * self.delta_f) / sqrt(template_norm)
self.correlators[segnum].correlate()
self.ifft.execute()
idx, snrv = events.threshold(self.snr_mem[self.segments[segnum].analyze],
self.snr_threshold / norm)
idx, snrv = events.cluster_reduce(idx, snrv, window)
if len(idx) == 0:
return [], [], [], [], []
logging.info("%s points above threshold" % str(len(idx)))
snr = TimeSeries(self.snr_mem, epoch=epoch, delta_t=self.delta_t, copy=False)
corr = FrequencySeries(self.corr_mem, delta_f=self.delta_f, copy=False)
return snr, norm, corr, idx, snrv
def full_matched_filter_thresh_only(self, segnum, template_norm, window=None, epoch=None):
""" Returns the complex snr timeseries, normalization of the complex snr,
the correlation vector frequency series, the list of indices of the
triggers, and the snr values at the trigger locations. Returns empty
lists for these for points that are not above the threshold.
Calculated the matched filter, threshold, and cluster.
Parameters
----------
segnum : int
Index into the list of segments at MatchedFilterControl construction
against which to filter.
template_norm : float
The htilde, template normalization factor.
window : int
Size of the window over which to cluster triggers, in samples.
This is IGNORED by this function, and provided only for API compatibility.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
norm : float
The normalization of the complex snr.
correlation: FrequencySeries
A frequency series containing the correlation vector.
idx : Array
List of indices of the triggers.
snrv : Array
The snr values at the trigger locations.
"""
norm = (4.0 * self.delta_f) / sqrt(template_norm)
self.correlators[segnum].correlate()
self.ifft.execute()
idx, snrv = events.threshold_only(self.snr_mem[self.segments[segnum].analyze],
self.snr_threshold / norm)
logging.info("%s points above threshold" % str(len(idx)))
snr = TimeSeries(self.snr_mem, epoch=epoch, delta_t=self.delta_t, copy=False)
corr = FrequencySeries(self.corr_mem, delta_f=self.delta_f, copy=False)
return snr, norm, corr, idx, snrv
def hierarchical_matched_filter_and_cluster(self, segnum, template_norm, window):
""" Returns the complex snr timeseries, normalization of the complex snr,
the correlation vector frequency series, the list of indices of the
triggers, and the snr values at the trigger locations. Returns empty
lists for these for points that are not above the threshold.
Calculated the matched filter, threshold, and cluster.
Parameters
----------
segnum : int
Index into the list of segments at MatchedFilterControl construction
template_norm : float
The htilde, template normalization factor.
window : int
Size of the window over which to cluster triggers, in samples
Returns
-------
snr : TimeSeries
A time series containing the complex snr at the reduced sample rate.
norm : float
The normalization of the complex snr.
correlation: FrequencySeries
A frequency series containing the correlation vector.
idx : Array
List of indices of the triggers.
snrv : Array
The snr values at the trigger locations.
"""
from pycbc.fft.fftw_pruned import pruned_c2cifft, fft_transpose
htilde = self.htilde
stilde = self.segments[segnum]
norm = (4.0 * stilde.delta_f) / sqrt(template_norm)
correlate(htilde[self.kmin_red:self.kmax_red],
stilde[self.kmin_red:self.kmax_red],
self.corr_mem[self.kmin_red:self.kmax_red])
ifft(self.corr_mem, self.snr_mem)
if not hasattr(stilde, 'red_analyze'):
stilde.red_analyze = \
slice(stilde.analyze.start/self.downsample_factor,
stilde.analyze.stop/self.downsample_factor)
idx_red, snrv_red = events.threshold(self.snr_mem[stilde.red_analyze],
self.snr_threshold / norm * self.upsample_threshold)
if len(idx_red) == 0:
return [], None, [], [], []
idx_red, _ = events.cluster_reduce(idx_red, snrv_red, window / self.downsample_factor)
logging.info("%s points above threshold at reduced resolution"\
%(str(len(idx_red)),))
# The fancy upsampling is here
if self.upsample_method=='pruned_fft':
idx = (idx_red + stilde.analyze.start/self.downsample_factor)\
* self.downsample_factor
idx = smear(idx, self.downsample_factor)
# cache transposed versions of htilde and stilde
if not hasattr(self.corr_mem_full, 'transposed'):
self.corr_mem_full.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
if not hasattr(htilde, 'transposed'):
htilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
htilde.transposed[self.kmin_full:self.kmax_full] = htilde[self.kmin_full:self.kmax_full]
htilde.transposed = fft_transpose(htilde.transposed)
if not hasattr(stilde, 'transposed'):
stilde.transposed = zeros(len(self.corr_mem_full), dtype=self.dtype)
stilde.transposed[self.kmin_full:self.kmax_full] = stilde[self.kmin_full:self.kmax_full]
stilde.transposed = fft_transpose(stilde.transposed)
correlate(htilde.transposed, stilde.transposed, self.corr_mem_full.transposed)
snrv = pruned_c2cifft(self.corr_mem_full.transposed, self.inter_vec, idx, pretransposed=True)
idx = idx - stilde.analyze.start
idx2, snrv = events.threshold(Array(snrv, copy=False), self.snr_threshold / norm)
if len(idx2) > 0:
correlate(htilde[self.kmax_red:self.kmax_full],
stilde[self.kmax_red:self.kmax_full],
self.corr_mem_full[self.kmax_red:self.kmax_full])
idx, snrv = events.cluster_reduce(idx[idx2], snrv, window)
else:
idx, snrv = [], []
logging.info("%s points at full rate and clustering" % len(idx))
return self.snr_mem, norm, self.corr_mem_full, idx, snrv
else:
raise ValueError("Invalid upsample method")
def compute_max_snr_over_sky_loc_stat(hplus, hcross, hphccorr,
hpnorm=None, hcnorm=None,
out=None, thresh=0,
analyse_slice=None):
"""
Matched filter maximised over polarization and orbital phase.
This implements the statistic derived in 1603.02444. It is encouraged
to read that work to understand the limitations and assumptions implicit
in this statistic before using it.
Parameters
-----------
hplus : TimeSeries
This is the IFFTed complex SNR time series of (h+, data). If not
normalized, supply the normalization factor so this can be done!
It is recommended to normalize this before sending through this
function
hcross : TimeSeries
This is the IFFTed complex SNR time series of (hx, data). If not
normalized, supply the normalization factor so this can be done!
hphccorr : float
The real component of the overlap between the two polarizations
Re[(h+, hx)]. Note that the imaginary component does not enter the
detection statistic. This must be normalized and is sign-sensitive.
thresh : float
Used for optimization. If we do not care about the value of SNR
values below thresh we can calculate a quick statistic that will
always overestimate SNR and then only calculate the proper, more
expensive, statistic at points where the quick SNR is above thresh.
hpsigmasq : float
The normalization factor (h+, h+). Default = None (=1, already
normalized)
hcsigmasq : float
The normalization factor (hx, hx). Default = None (=1, already
normalized)
out : TimeSeries (optional, default=None)
If given, use this array to store the output.
Returns
--------
det_stat : TimeSeries
The SNR maximized over sky location
"""
# NOTE: Not much optimization has been done here! This may need to be
# Cythonized.
if out is None:
out = zeros(len(hplus))
out.non_zero_locs = numpy.array([], dtype=out.dtype)
else:
if not hasattr(out, 'non_zero_locs'):
# Doing this every time is not a zero-cost operation
out.data[:] = 0
out.non_zero_locs = numpy.array([], dtype=out.dtype)
else:
# Only set non zero locations to zero
out.data[out.non_zero_locs] = 0
# If threshold is given we can limit the points at which to compute the
# full statistic
if thresh:
# This is the statistic that always overestimates the SNR...
# It allows some unphysical freedom that the full statistic does not
idx_p, _ = events.threshold_only(hplus[analyse_slice],
thresh / (2**0.5 * hpnorm))
idx_c, _ = events.threshold_only(hcross[analyse_slice],
thresh / (2**0.5 * hcnorm))
idx_p = idx_p + analyse_slice.start
idx_c = idx_c + analyse_slice.start
hp_red = hplus[idx_p] * hpnorm
hc_red = hcross[idx_p] * hcnorm
stat_p = hp_red.real**2 + hp_red.imag**2 + \
hc_red.real**2 + hc_red.imag**2
locs_p = idx_p[stat_p > (thresh*thresh)]
hp_red = hplus[idx_c] * hpnorm
hc_red = hcross[idx_c] * hcnorm
stat_c = hp_red.real**2 + hp_red.imag**2 + \
hc_red.real**2 + hc_red.imag**2
locs_c = idx_c[stat_c > (thresh*thresh)]
locs = numpy.unique(numpy.concatenate((locs_p, locs_c)))
hplus = hplus[locs]
hcross = hcross[locs]
hplus = hplus * hpnorm
hcross = hcross * hcnorm
# Calculate and sanity check the denominator
denom = 1 - hphccorr*hphccorr
if denom < 0:
if hphccorr > 1:
err_msg = "Overlap between hp and hc is given as %f. " %(hphccorr)
err_msg += "How can an overlap be bigger than 1?"
raise ValueError(err_msg)
else:
err_msg = "There really is no way to raise this error!?! "
err_msg += "If you're seeing this, it is bad."
raise ValueError(err_msg)
if denom == 0:
# This case, of hphccorr==1, makes the statistic degenerate
# This case should not physically be possible luckily.
err_msg = "You have supplied a real overlap between hp and hc of 1. "
err_msg += "Ian is reasonably certain this is physically impossible "
err_msg += "so why are you seeing this?"
raise ValueError(err_msg)
assert(len(hplus) == len(hcross))
# Now the stuff where comp. cost may be a problem
hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + \
numpy.imag(hplus) * numpy.imag(hplus)
hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + \
numpy.imag(hcross) * numpy.imag(hcross)
rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + numpy.imag(hplus)*numpy.imag(hcross)
sqroot = (hplus_magsq - hcross_magsq)**2
sqroot += 4 * (hphccorr * hplus_magsq - rho_pluscross) * \
(hphccorr * hcross_magsq - rho_pluscross)
# Sometimes this can be less than 0 due to numeric imprecision, catch this.
if (sqroot < 0).any():
indices = numpy.arange(len(sqroot))[sqroot < 0]
# This should not be *much* smaller than 0 due to numeric imprecision
if (sqroot[indices] < -0.0001).any():
err_msg = "Square root has become negative. Something wrong here!"
raise ValueError(err_msg)
sqroot[indices] = 0
sqroot = numpy.sqrt(sqroot)
det_stat_sq = 0.5 * (hplus_magsq + hcross_magsq - \
2 * rho_pluscross*hphccorr + sqroot) / denom
det_stat = numpy.sqrt(det_stat_sq)
if thresh:
out.data[locs] = det_stat
out.non_zero_locs = locs
return out
else:
return Array(det_stat, copy=False)
def compute_u_val_for_sky_loc_stat(hplus, hcross, hphccorr,
hpnorm=None, hcnorm=None, indices=None):
"""The max-over-sky location detection statistic maximizes over a phase,
an amplitude and the ratio of F+ and Fx, encoded in a variable called u.
Here we return the value of u for the given indices.
"""
if indices is not None:
hplus = hplus[indices]
hcross = hcross[indices]
if hpnorm is not None:
hplus = hplus * hpnorm
if hcnorm is not None:
hcross = hcross * hcnorm
# Sanity checking in func. above should already have identified any points
# which are bad, and should be used to construct indices for input here
hplus_magsq = numpy.real(hplus) * numpy.real(hplus) + \
numpy.imag(hplus) * numpy.imag(hplus)
hcross_magsq = numpy.real(hcross) * numpy.real(hcross) + \
numpy.imag(hcross) * numpy.imag(hcross)
rho_pluscross = numpy.real(hplus) * numpy.real(hcross) + \
numpy.imag(hplus)*numpy.imag(hcross)
a = hphccorr * hplus_magsq - rho_pluscross
b = hplus_magsq - hcross_magsq
c = rho_pluscross - hphccorr * hcross_magsq
sq_root = b*b - 4*a*c
sq_root = sq_root**0.5
sq_root = -sq_root
# Catch the a->0 case
bad_lgc = (a == 0)
dbl_bad_lgc = numpy.logical_and(c == 0, b == 0)
dbl_bad_lgc = numpy.logical_and(bad_lgc, dbl_bad_lgc)
# Initialize u
u = sq_root * 0.
# In this case u is completely degenerate, so set it to 1
u[dbl_bad_lgc] = 1.
# If a->0 avoid overflow by just setting to a large value
u[bad_lgc & ~dbl_bad_lgc] = 1E17
# Otherwise normal statistic
u[~bad_lgc] = (-b[~bad_lgc] + sq_root[~bad_lgc]) / (2*a[~bad_lgc])
snr_cplx = hplus * u + hcross
coa_phase = numpy.angle(snr_cplx)
return u, coa_phase
def compute_max_snr_over_sky_loc_stat_no_phase(hplus, hcross, hphccorr,
hpnorm=None, hcnorm=None,
out=None, thresh=0,
analyse_slice=None):
"""
Matched filter maximised over polarization phase.
This implements the statistic derived in 1709.09181. It is encouraged
to read that work to understand the limitations and assumptions implicit
in this statistic before using it.
In contrast to compute_max_snr_over_sky_loc_stat this function
performs no maximization over orbital phase, treating that as an intrinsic
parameter. In the case of aligned-spin 2,2-mode only waveforms, this
collapses to the normal statistic (at twice the computational cost!)
Parameters
-----------
hplus : TimeSeries
This is the IFFTed complex SNR time series of (h+, data). If not
normalized, supply the normalization factor so this can be done!
It is recommended to normalize this before sending through this
function
hcross : TimeSeries
This is the IFFTed complex SNR time series of (hx, data). If not
normalized, supply the normalization factor so this can be done!
hphccorr : float
The real component of the overlap between the two polarizations
Re[(h+, hx)]. Note that the imaginary component does not enter the
detection statistic. This must be normalized and is sign-sensitive.
thresh : float
Used for optimization. If we do not care about the value of SNR
values below thresh we can calculate a quick statistic that will
always overestimate SNR and then only calculate the proper, more
expensive, statistic at points where the quick SNR is above thresh.
hpsigmasq : float
The normalization factor (h+, h+). Default = None (=1, already
normalized)
hcsigmasq : float
The normalization factor (hx, hx). Default = None (=1, already
normalized)
out : TimeSeries (optional, default=None)
If given, use this array to store the output.
Returns
--------
det_stat : TimeSeries
The SNR maximized over sky location
"""
# NOTE: Not much optimization has been done here! This may need to be
# Cythonized.
if out is None:
out = zeros(len(hplus))
out.non_zero_locs = numpy.array([], dtype=out.dtype)
else:
if not hasattr(out, 'non_zero_locs'):
# Doing this every time is not a zero-cost operation
out.data[:] = 0
out.non_zero_locs = numpy.array([], dtype=out.dtype)
else:
# Only set non zero locations to zero
out.data[out.non_zero_locs] = 0
# If threshold is given we can limit the points at which to compute the
# full statistic
if thresh:
# This is the statistic that always overestimates the SNR...
# It allows some unphysical freedom that the full statistic does not
#
# For now this is copied from the max-over-phase statistic. One could
# probably make this faster by removing the imaginary components of
# the matched filter, as these are not used here.
idx_p, _ = events.threshold_only(hplus[analyse_slice],
thresh / (2**0.5 * hpnorm))
idx_c, _ = events.threshold_only(hcross[analyse_slice],
thresh / (2**0.5 * hcnorm))
idx_p = idx_p + analyse_slice.start
idx_c = idx_c + analyse_slice.start
hp_red = hplus[idx_p] * hpnorm
hc_red = hcross[idx_p] * hcnorm
stat_p = hp_red.real**2 + hp_red.imag**2 + \
hc_red.real**2 + hc_red.imag**2
locs_p = idx_p[stat_p > (thresh*thresh)]
hp_red = hplus[idx_c] * hpnorm
hc_red = hcross[idx_c] * hcnorm
stat_c = hp_red.real**2 + hp_red.imag**2 + \
hc_red.real**2 + hc_red.imag**2
locs_c = idx_c[stat_c > (thresh*thresh)]
locs = numpy.unique(numpy.concatenate((locs_p, locs_c)))
hplus = hplus[locs]
hcross = hcross[locs]
hplus = hplus * hpnorm
hcross = hcross * hcnorm
# Calculate and sanity check the denominator
denom = 1 - hphccorr*hphccorr
if denom < 0:
if hphccorr > 1:
err_msg = "Overlap between hp and hc is given as %f. " %(hphccorr)
err_msg += "How can an overlap be bigger than 1?"
raise ValueError(err_msg)
else:
err_msg = "There really is no way to raise this error!?! "
err_msg += "If you're seeing this, it is bad."
raise ValueError(err_msg)
if denom == 0:
# This case, of hphccorr==1, makes the statistic degenerate
# This case should not physically be possible luckily.
err_msg = "You have supplied a real overlap between hp and hc of 1. "
err_msg += "Ian is reasonably certain this is physically impossible "
err_msg += "so why are you seeing this?"
raise ValueError(err_msg)
assert(len(hplus) == len(hcross))
# Now the stuff where comp. cost may be a problem
hplus_magsq = numpy.real(hplus) * numpy.real(hplus)
hcross_magsq = numpy.real(hcross) * numpy.real(hcross)
rho_pluscross = numpy.real(hplus) * numpy.real(hcross)
det_stat_sq = (hplus_magsq + hcross_magsq - 2 * rho_pluscross*hphccorr)
det_stat = numpy.sqrt(det_stat_sq / denom)
if thresh:
out.data[locs] = det_stat
out.non_zero_locs = locs
return out
else:
return Array(det_stat, copy=False)
def compute_u_val_for_sky_loc_stat_no_phase(hplus, hcross, hphccorr,
hpnorm=None , hcnorm=None, indices=None):
"""The max-over-sky location (no phase) detection statistic maximizes over
an amplitude and the ratio of F+ and Fx, encoded in a variable called u.
Here we return the value of u for the given indices.
"""
if indices is not None:
hplus = hplus[indices]
hcross = hcross[indices]
if hpnorm is not None:
hplus = hplus * hpnorm
if hcnorm is not None:
hcross = hcross * hcnorm
rhoplusre=numpy.real(hplus)
rhocrossre=numpy.real(hcross)
overlap=numpy.real(hphccorr)
denom = (-rhocrossre+overlap*rhoplusre)
# Initialize tan_kappa array
u_val = denom * 0.
# Catch the denominator -> 0 case
numpy.putmask(u_val, denom == 0, 1E17)
# Otherwise do normal statistic
numpy.putmask(u_val, denom != 0, (-rhoplusre+overlap*rhocrossre)/(-rhocrossre+overlap*rhoplusre))
coa_phase = numpy.zeros(len(indices), dtype=numpy.float32)
return u_val, coa_phase
class MatchedFilterSkyMaxControl(object):
# FIXME: This seems much more simplistic than the aligned-spin class.
# E.g. no correlators. Is this worth updating?
def __init__(self, low_frequency_cutoff, high_frequency_cutoff,
snr_threshold, tlen, delta_f, dtype):
"""
Create a matched filter engine.
Parameters
----------
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin
at the first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue
to the nyquist frequency.
snr_threshold : float
The minimum snr to return when filtering
"""
self.tlen = tlen
self.delta_f = delta_f
self.dtype = dtype
self.snr_threshold = snr_threshold
self.flow = low_frequency_cutoff
self.fhigh = high_frequency_cutoff
self.matched_filter_and_cluster = \
self.full_matched_filter_and_cluster
self.snr_plus_mem = zeros(self.tlen, dtype=self.dtype)
self.corr_plus_mem = zeros(self.tlen, dtype=self.dtype)
self.snr_cross_mem = zeros(self.tlen, dtype=self.dtype)
self.corr_cross_mem = zeros(self.tlen, dtype=self.dtype)
self.snr_mem = zeros(self.tlen, dtype=self.dtype)
self.cached_hplus_hcross_correlation = None
self.cached_hplus_hcross_hplus = None
self.cached_hplus_hcross_hcross = None
self.cached_hplus_hcross_psd = None
def full_matched_filter_and_cluster(self, hplus, hcross, hplus_norm,
hcross_norm, psd, stilde, window):
"""
Return the complex snr and normalization.
Calculated the matched filter, threshold, and cluster.
Parameters
----------
h_quantities : Various
FILL ME IN
stilde : FrequencySeries
The strain data to be filtered.
window : int
The size of the cluster window in samples.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
norm : float
The normalization of the complex snr.
correlation: FrequencySeries
A frequency series containing the correlation vector.
idx : Array
List of indices of the triggers.
snrv : Array
The snr values at the trigger locations.
"""
I_plus, Iplus_corr, Iplus_norm = matched_filter_core(hplus, stilde,
h_norm=hplus_norm,
low_frequency_cutoff=self.flow,
high_frequency_cutoff=self.fhigh,
out=self.snr_plus_mem,
corr_out=self.corr_plus_mem)
I_cross, Icross_corr, Icross_norm = matched_filter_core(hcross,
stilde, h_norm=hcross_norm,
low_frequency_cutoff=self.flow,
high_frequency_cutoff=self.fhigh,
out=self.snr_cross_mem,
corr_out=self.corr_cross_mem)
# The information on the complex side of this overlap is important
# we may want to use this in the future.
if not id(hplus) == self.cached_hplus_hcross_hplus:
self.cached_hplus_hcross_correlation = None
if not id(hcross) == self.cached_hplus_hcross_hcross:
self.cached_hplus_hcross_correlation = None
if not id(psd) == self.cached_hplus_hcross_psd:
self.cached_hplus_hcross_correlation = None
if self.cached_hplus_hcross_correlation is None:
hplus_cross_corr = overlap_cplx(hplus, hcross, psd=psd,
low_frequency_cutoff=self.flow,
high_frequency_cutoff=self.fhigh,
normalized=False)
hplus_cross_corr = numpy.real(hplus_cross_corr)
hplus_cross_corr = hplus_cross_corr / (hcross_norm*hplus_norm)**0.5
self.cached_hplus_hcross_correlation = hplus_cross_corr
self.cached_hplus_hcross_hplus = id(hplus)
self.cached_hplus_hcross_hcross = id(hcross)
self.cached_hplus_hcross_psd = id(psd)
else:
hplus_cross_corr = self.cached_hplus_hcross_correlation
snr = self._maximized_snr(I_plus,I_cross,
hplus_cross_corr,
hpnorm=Iplus_norm,
hcnorm=Icross_norm,
out=self.snr_mem,
thresh=self.snr_threshold,
analyse_slice=stilde.analyze)
# FIXME: This should live further down
# Convert output to pycbc TimeSeries
delta_t = 1.0 / (self.tlen * stilde.delta_f)
snr = TimeSeries(snr, epoch=stilde.start_time, delta_t=delta_t,
copy=False)
idx, snrv = events.threshold_real_numpy(snr[stilde.analyze],
self.snr_threshold)
if len(idx) == 0:
return [], 0, 0, [], [], [], [], 0, 0, 0
logging.info("%s points above threshold", str(len(idx)))
idx, snrv = events.cluster_reduce(idx, snrv, window)
logging.info("%s clustered points", str(len(idx)))
# erased self.
u_vals, coa_phase = self._maximized_extrinsic_params\
(I_plus.data, I_cross.data, hplus_cross_corr,
indices=idx+stilde.analyze.start, hpnorm=Iplus_norm,
hcnorm=Icross_norm)
return snr, Iplus_corr, Icross_corr, idx, snrv, u_vals, coa_phase,\
hplus_cross_corr, Iplus_norm, Icross_norm
def _maximized_snr(self, hplus, hcross, hphccorr, **kwargs):
return compute_max_snr_over_sky_loc_stat(hplus, hcross, hphccorr,
**kwargs)
def _maximized_extrinsic_params(self, hplus, hcross, hphccorr, **kwargs):
return compute_u_val_for_sky_loc_stat(hplus, hcross, hphccorr,
**kwargs)
class MatchedFilterSkyMaxControlNoPhase(MatchedFilterSkyMaxControl):
# Basically the same as normal SkyMaxControl, except we use a slight
# variation in the internal SNR functions.
def _maximized_snr(self, hplus, hcross, hphccorr, **kwargs):
return compute_max_snr_over_sky_loc_stat_no_phase(hplus, hcross,
hphccorr, **kwargs)
def _maximized_extrinsic_params(self, hplus, hcross, hphccorr, **kwargs):
return compute_u_val_for_sky_loc_stat_no_phase(hplus, hcross, hphccorr,
**kwargs)
def make_frequency_series(vec):
"""Return a frequency series of the input vector.
If the input is a frequency series it is returned, else if the input
vector is a real time series it is fourier transformed and returned as a
frequency series.
Parameters
----------
vector : TimeSeries or FrequencySeries
Returns
-------
Frequency Series: FrequencySeries
A frequency domain version of the input vector.
"""
if isinstance(vec, FrequencySeries):
return vec
if isinstance(vec, TimeSeries):
N = len(vec)
n = N // 2 + 1
delta_f = 1.0 / N / vec.delta_t
vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)),
delta_f=delta_f, copy=False)
fft(vec, vectilde)
return vectilde
else:
raise TypeError("Can only convert a TimeSeries to a FrequencySeries")
def sigmasq_series(htilde, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None):
"""Return a cumulative sigmasq frequency series.
Return a frequency series containing the accumulated power in the input
up to that frequency.
Parameters
----------
htilde : TimeSeries or FrequencySeries
The input vector
psd : {None, FrequencySeries}, optional
The psd used to weight the accumulated power.
low_frequency_cutoff : {None, float}, optional
The frequency to begin accumulating power. If None, start at the beginning
of the vector.
high_frequency_cutoff : {None, float}, optional
The frequency to stop considering accumulated power. If None, continue
until the end of the input vector.
Returns
-------
Frequency Series: FrequencySeries
A frequency series containing the cumulative sigmasq.
"""
htilde = make_frequency_series(htilde)
N = (len(htilde)-1) * 2
norm = 4.0 * htilde.delta_f
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff, htilde.delta_f, N)
sigma_vec = FrequencySeries(zeros(len(htilde), dtype=real_same_precision_as(htilde)),
delta_f = htilde.delta_f, copy=False)
mag = htilde.squared_norm()
if psd is not None:
mag /= psd
sigma_vec[kmin:kmax] = mag[kmin:kmax].cumsum()
return sigma_vec*norm
def sigmasq(htilde, psd = None, low_frequency_cutoff=None,
high_frequency_cutoff=None):
"""Return the loudness of the waveform. This is defined (see Duncan
Brown's thesis) as the unnormalized matched-filter of the input waveform,
htilde, with itself. This quantity is usually referred to as (sigma)^2
and is then used to normalize matched-filters with the data.
Parameters
----------
htilde : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : {None, FrequencySeries}, optional
The psd used to weight the accumulated power.
low_frequency_cutoff : {None, float}, optional
The frequency to begin considering waveform power.
high_frequency_cutoff : {None, float}, optional
The frequency to stop considering waveform power.
Returns
-------
sigmasq: float
"""
htilde = make_frequency_series(htilde)
N = (len(htilde)-1) * 2
norm = 4.0 * htilde.delta_f
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff, htilde.delta_f, N)
ht = htilde[kmin:kmax]
if psd:
try:
numpy.testing.assert_almost_equal(ht.delta_f, psd.delta_f)
except AssertionError:
raise ValueError('Waveform does not have same delta_f as psd')
if psd is None:
sq = ht.inner(ht)
else:
sq = ht.weighted_inner(ht, psd[kmin:kmax])
return sq.real * norm
def sigma(htilde, psd = None, low_frequency_cutoff=None,
high_frequency_cutoff=None):
""" Return the sigma of the waveform. See sigmasq for more details.
Parameters
----------
htilde : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : {None, FrequencySeries}, optional
The psd used to weight the accumulated power.
low_frequency_cutoff : {None, float}, optional
The frequency to begin considering waveform power.
high_frequency_cutoff : {None, float}, optional
The frequency to stop considering waveform power.
Returns
-------
sigmasq: float
"""
return sqrt(sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff))
def get_cutoff_indices(flow, fhigh, df, N):
"""
Gets the indices of a frequency series at which to stop an overlap
calculation.
Parameters
----------
flow: float
The frequency (in Hz) of the lower index.
fhigh: float
The frequency (in Hz) of the upper index.
df: float
The frequency step (in Hz) of the frequency series.
N: int
The number of points in the **time** series. Can be odd
or even.
Returns
-------
kmin: int
kmax: int
"""
if flow:
kmin = int(flow / df)
if kmin < 0:
err_msg = "Start frequency cannot be negative. "
err_msg += "Supplied value and kmin {} and {}".format(flow, kmin)
raise ValueError(err_msg)
else:
kmin = 1
if fhigh:
kmax = int(fhigh / df)
if kmax > int((N + 1)/2.):
kmax = int((N + 1)/2.)
else:
# int() truncates towards 0, so this is
# equivalent to the floor of the float
kmax = int((N + 1)/2.)
if kmax <= kmin:
err_msg = "Kmax cannot be less than or equal to kmin. "
err_msg += "Provided values of freqencies (min,max) were "
err_msg += "{} and {} ".format(flow, fhigh)
err_msg += "corresponding to (kmin, kmax) of "
err_msg += "{} and {}.".format(kmin, kmax)
raise ValueError(err_msg)
return kmin,kmax
def matched_filter_core(template, data, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, h_norm=None, out=None, corr_out=None):
""" Return the complex snr and normalization.
Return the complex snr, along with its associated normalization of the template,
matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : {FrequencySeries}, optional
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
h_norm : {None, float}, optional
The template normalization. If none, this value is calculated internally.
out : {None, Array}, optional
An array to use as memory for snr storage. If None, memory is allocated
internally.
corr_out : {None, Array}, optional
An array to use as memory for correlation storage. If None, memory is allocated
internally. If provided, management of the vector is handled externally by the
caller. No zero'ing is done internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
correlation: FrequencySeries
A frequency series containing the correlation vector.
norm : float
The normalization of the complex snr.
"""
htilde = make_frequency_series(template)
stilde = make_frequency_series(data)
if len(htilde) != len(stilde):
raise ValueError("Length of template and data must match")
N = (len(stilde)-1) * 2
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff, stilde.delta_f, N)
if corr_out is not None:
qtilde = corr_out
else:
qtilde = zeros(N, dtype=complex_same_precision_as(data))
if out is None:
_q = zeros(N, dtype=complex_same_precision_as(data))
elif (len(out) == N) and type(out) is Array and out.kind =='complex':
_q = out
else:
raise TypeError('Invalid Output Vector: wrong length or dtype')
correlate(htilde[kmin:kmax], stilde[kmin:kmax], qtilde[kmin:kmax])
if psd is not None:
if isinstance(psd, FrequencySeries):
try:
numpy.testing.assert_almost_equal(stilde.delta_f, psd.delta_f)
except AssertionError:
raise ValueError("PSD delta_f does not match data")
qtilde[kmin:kmax] /= psd[kmin:kmax]
else:
raise TypeError("PSD must be a FrequencySeries")
ifft(qtilde, _q)
if h_norm is None:
h_norm = sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff)
norm = (4.0 * stilde.delta_f) / sqrt( h_norm)
return (TimeSeries(_q, epoch=stilde._epoch, delta_t=stilde.delta_t, copy=False),
FrequencySeries(qtilde, epoch=stilde._epoch, delta_f=stilde.delta_f, copy=False),
norm)
def smear(idx, factor):
"""
This function will take as input an array of indexes and return every
unique index within the specified factor of the inputs.
E.g.: smear([5,7,100],2) = [3,4,5,6,7,8,9,98,99,100,101,102]
Parameters
-----------
idx : numpy.array of ints
The indexes to be smeared.
factor : idx
The factor by which to smear out the input array.
Returns
--------
new_idx : numpy.array of ints
The smeared array of indexes.
"""
s = [idx]
for i in range(factor+1):
a = i - factor/2
s += [idx + a]
return numpy.unique(numpy.concatenate(s))
def matched_filter(template, data, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, sigmasq=None):
""" Return the complex snr.
Return the complex snr, along with its associated normalization of the
template, matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : FrequencySeries
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
sigmasq : {None, float}, optional
The template normalization. If none, this value is calculated
internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
"""
snr, _, norm = matched_filter_core(template, data, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq)
return snr * norm
_snr = None
def match(
vec1,
vec2,
psd=None,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
v1_norm=None,
v2_norm=None,
subsample_interpolation=False,
return_phase=False,
):
"""Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivalent to the overlap
maximized over time and phase.
The maximization is only performed with discrete time-shifts,
or a quadratic interpolation of them if the subsample_interpolation
option is turned on; for a more precise computation
of the match between two waveforms, use the optimized_match function.
The accuracy of this function is guaranteed up to the fourth decimal place.
Parameters
----------
vec1 : TimeSeries or FrequencySeries
The input vector containing a waveform.
vec2 : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
v1_norm : {None, float}, optional
The normalization of the first waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
v2_norm : {None, float}, optional
The normalization of the second waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
subsample_interpolation : {False, bool}, optional
If True the peak will be interpolated between samples using a simple
quadratic fit. This can be important if measuring matches very close to
1 and can cause discontinuities if you don't use it as matches move
between discrete samples. If True the index returned will be a float.
return_phase : {False, bool}, optional
If True, also return the phase shift that gives the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
phi: float
Phase to rotate complex waveform to get the match, if desired.
"""
htilde = make_frequency_series(vec1)
stilde = make_frequency_series(vec2)
N = (len(htilde) - 1) * 2
global _snr
if _snr is None or _snr.dtype != htilde.dtype or len(_snr) != N:
_snr = zeros(N, dtype=complex_same_precision_as(vec1))
snr, _, snr_norm = matched_filter_core(
htilde,
stilde,
psd,
low_frequency_cutoff,
high_frequency_cutoff,
v1_norm,
out=_snr,
)
maxsnr, max_id = snr.abs_max_loc()
if v2_norm is None:
v2_norm = sigmasq(stilde, psd, low_frequency_cutoff, high_frequency_cutoff)
if subsample_interpolation:
# This uses the implementation coded up in sbank. Thanks Nick!
# The maths for this is well summarized here:
# https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html
# We use adjacent points to interpolate, but wrap off the end if needed
left = abs(snr[-1]) if max_id == 0 else abs(snr[max_id - 1])
middle = maxsnr
right = abs(snr[0]) if max_id == (len(snr) - 1) else abs(snr[max_id + 1])
# Get derivatives
id_shift, maxsnr = quadratic_interpolate_peak(left, middle, right)
max_id = max_id + id_shift
if return_phase:
rounded_max_id = int(round(max_id))
phi = numpy.angle(snr[rounded_max_id])
return maxsnr * snr_norm / sqrt(v2_norm), max_id, phi
else:
return maxsnr * snr_norm / sqrt(v2_norm), max_id
def overlap(vec1, vec2, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, normalized=True):
""" Return the overlap between the two TimeSeries or FrequencySeries.
Parameters
----------
vec1 : TimeSeries or FrequencySeries
The input vector containing a waveform.
vec2 : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the overlap.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the overlap.
normalized : {True, boolean}, optional
Set if the overlap is normalized. If true, it will range from 0 to 1.
Returns
-------
overlap: float
"""
return overlap_cplx(vec1, vec2, psd=psd, \
low_frequency_cutoff=low_frequency_cutoff,\
high_frequency_cutoff=high_frequency_cutoff,\
normalized=normalized).real
def overlap_cplx(vec1, vec2, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, normalized=True):
"""Return the complex overlap between the two TimeSeries or FrequencySeries.
Parameters
----------
vec1 : TimeSeries or FrequencySeries
The input vector containing a waveform.
vec2 : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the overlap.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the overlap.
normalized : {True, boolean}, optional
Set if the overlap is normalized. If true, it will range from 0 to 1.
Returns
-------
overlap: complex
"""
htilde = make_frequency_series(vec1)
stilde = make_frequency_series(vec2)
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff, stilde.delta_f, (len(stilde)-1) * 2)
if psd:
inner = (htilde[kmin:kmax]).weighted_inner(stilde[kmin:kmax], psd[kmin:kmax])
else:
inner = (htilde[kmin:kmax]).inner(stilde[kmin:kmax])
if normalized:
sig1 = sigma(vec1, psd=psd, low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
sig2 = sigma(vec2, psd=psd, low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
norm = 1 / sig1 / sig2
else:
norm = 1
return 4 * htilde.delta_f * inner * norm
def quadratic_interpolate_peak(left, middle, right):
""" Interpolate the peak and offset using a quadratic approximation
Parameters
----------
left : numpy array
Values at a relative bin value of [-1]
middle : numpy array
Values at a relative bin value of [0]
right : numpy array
Values at a relative bin value of [1]
Returns
-------
bin_offset : numpy array
Array of bins offsets, each in the range [-1/2, 1/2]
peak_values : numpy array
Array of the estimated peak values at the interpolated offset
"""
bin_offset = 1.0/2.0 * (left - right) / (left - 2 * middle + right)
peak_value = middle - 0.25 * (left - right) * bin_offset
return bin_offset, peak_value
class LiveBatchMatchedFilter(object):
"""Calculate SNR and signal consistency tests in a batched progression"""
def __init__(self, templates, snr_threshold, chisq_bins, sg_chisq,
maxelements=2**27,
snr_abort_threshold=None,
newsnr_threshold=None,
max_triggers_in_batch=None):
"""Create a batched matchedfilter instance
Parameters
----------
templates: list of `FrequencySeries`
List of templates from the FilterBank class.
snr_threshold: float
Minimum value to record peaks in the SNR time series.
chisq_bins: str
Str that determines how the number of chisq bins varies as a
function of the template bank parameters.
sg_chisq: pycbc.vetoes.SingleDetSGChisq
Instance of the sg_chisq class to calculate sg_chisq with.
maxelements: {int, 2**27}
Maximum size of a batched fourier transform.
snr_abort_threshold: {float, None}
If the SNR is above this threshold, do not record any triggers.
newsnr_threshold: {float, None}
Only record triggers that have a re-weighted NewSNR above this
threshold.
max_triggers_in_batch: {int, None}
Record X number of the loudest triggers by SNR in each MPI
process. Signal consistency values will also only be calculated
for these triggers.
"""
self.snr_threshold = snr_threshold
self.snr_abort_threshold = snr_abort_threshold
self.newsnr_threshold = newsnr_threshold
self.max_triggers_in_batch = max_triggers_in_batch
from pycbc import vetoes
self.power_chisq = vetoes.SingleDetPowerChisq(chisq_bins, None)
self.sg_chisq = sg_chisq
durations = numpy.array([1.0 / t.delta_f for t in templates])
lsort = durations.argsort()
durations = durations[lsort]
templates = [templates[li] for li in lsort]
# Figure out how to chunk together the templates into groups to process
_, counts = numpy.unique(durations, return_counts=True)
tsamples = [(len(t) - 1) * 2 for t in templates]
grabs = maxelements / numpy.unique(tsamples)
chunks = numpy.array([])
num = 0
for count, grab in zip(counts, grabs):
chunks = numpy.append(chunks, numpy.arange(num, count + num, grab))
chunks = numpy.append(chunks, [count + num])
num += count
chunks = numpy.unique(chunks).astype(numpy.uint32)
# We now have how many templates to grab at a time.
self.chunks = chunks[1:] - chunks[0:-1]
self.out_mem = {}
self.cout_mem = {}
self.ifts = {}
chunk_durations = [durations[i] for i in chunks[:-1]]
self.chunk_tsamples = [tsamples[int(i)] for i in chunks[:-1]]
samples = self.chunk_tsamples * self.chunks
# Create workspace memory for correlate and snr
mem_ids = [(a, b) for a, b in zip(chunk_durations, self.chunks)]
mem_types = set(zip(mem_ids, samples))
self.tgroups, self.mids = [], []
for i, size in mem_types:
dur, count = i
self.out_mem[i] = zeros(size, dtype=numpy.complex64)
self.cout_mem[i] = zeros(size, dtype=numpy.complex64)
self.ifts[i] = IFFT(self.cout_mem[i], self.out_mem[i],
nbatch=count,
size=len(self.cout_mem[i]) // count)
# Split the templates into their processing groups
for dur, count in mem_ids:
tgroup = templates[0:count]
self.tgroups.append(tgroup)
self.mids.append((dur, count))
templates = templates[count:]
# Associate the snr and corr memory block to each template
self.corr = []
for i, tgroup in enumerate(self.tgroups):
psize = self.chunk_tsamples[i]
s = 0
e = psize
mid = self.mids[i]
for htilde in tgroup:
htilde.out = self.out_mem[mid][s:e]
htilde.cout = self.cout_mem[mid][s:e]
s += psize
e += psize
self.corr.append(BatchCorrelator(tgroup, [t.cout for t in tgroup], len(tgroup[0])))
def set_data(self, data):
"""Set the data reader object to use"""
self.data = data
self.block_id = 0
def combine_results(self, results):
"""Combine results from different batches of filtering"""
result = {}
for key in results[0]:
result[key] = numpy.concatenate([r[key] for r in results])
return result
def process_data(self, data_reader):
"""Process the data for all of the templates"""
self.set_data(data_reader)
return self.process_all()
def process_all(self):
"""Process every batch group and return as single result"""
results = []
veto_info = []
while 1:
result, veto = self._process_batch()
if result is False: return False
if result is None: break
results.append(result)
veto_info += veto
result = self.combine_results(results)
if self.max_triggers_in_batch:
sort = result['snr'].argsort()[::-1][:self.max_triggers_in_batch]
for key in result:
result[key] = result[key][sort]
tmp = veto_info
veto_info = [tmp[i] for i in sort]
result = self._process_vetoes(result, veto_info)
return result
def _process_vetoes(self, results, veto_info):
"""Calculate signal based vetoes"""
chisq = numpy.array(numpy.zeros(len(veto_info)), numpy.float32, ndmin=1)
dof = numpy.array(numpy.zeros(len(veto_info)), numpy.uint32, ndmin=1)
sg_chisq = numpy.array(numpy.zeros(len(veto_info)), numpy.float32,
ndmin=1)
results['chisq'] = chisq
results['chisq_dof'] = dof
results['sg_chisq'] = sg_chisq
keep = []
for i, (snrv, norm, l, htilde, stilde) in enumerate(veto_info):
correlate(htilde, stilde, htilde.cout)
c, d = self.power_chisq.values(htilde.cout, snrv,
norm, stilde.psd, [l], htilde)
chisq[i] = c[0] / d[0]
dof[i] = d[0]
sgv = self.sg_chisq.values(stilde, htilde, stilde.psd,
snrv, norm, c, d, [l])
if sgv is not None:
sg_chisq[i] = sgv[0]
if self.newsnr_threshold:
newsnr = ranking.newsnr(results['snr'][i], chisq[i])
if newsnr >= self.newsnr_threshold:
keep.append(i)
if self.newsnr_threshold:
keep = numpy.array(keep, dtype=numpy.uint32)
for key in results:
results[key] = results[key][keep]
return results
def _process_batch(self):
"""Process only a single batch group of data"""
if self.block_id == len(self.tgroups):
return None, None
tgroup = self.tgroups[self.block_id]
psize = self.chunk_tsamples[self.block_id]
mid = self.mids[self.block_id]
stilde = self.data.overwhitened_data(tgroup[0].delta_f)
psd = stilde.psd
valid_end = int(psize - self.data.trim_padding)
valid_start = int(valid_end - self.data.blocksize * self.data.sample_rate)
seg = slice(valid_start, valid_end)
self.corr[self.block_id].execute(stilde)
self.ifts[mid].execute()
self.block_id += 1
snr = numpy.zeros(len(tgroup), dtype=numpy.complex64)
time = numpy.zeros(len(tgroup), dtype=numpy.float64)
templates = numpy.zeros(len(tgroup), dtype=numpy.uint64)
sigmasq = numpy.zeros(len(tgroup), dtype=numpy.float32)
time[:] = self.data.start_time
result = {}
tkeys = tgroup[0].params.dtype.names
for key in tkeys:
result[key] = []
veto_info = []
# Find the peaks in our SNR times series from the various templates
i = 0
for htilde in tgroup:
if hasattr(htilde, 'time_offset'):
if 'time_offset' not in result:
result['time_offset'] = []
l = htilde.out[seg].abs_arg_max()
sgm = htilde.sigmasq(psd)
norm = 4.0 * htilde.delta_f / (sgm ** 0.5)
l += valid_start
snrv = numpy.array([htilde.out[l]])
# If nothing is above threshold we can exit this template
s = abs(snrv[0]) * norm
if s < self.snr_threshold:
continue
time[i] += float(l - valid_start) / self.data.sample_rate
# We have an SNR so high that we will drop the entire analysis
# of this chunk of time!
if self.snr_abort_threshold is not None and s > self.snr_abort_threshold:
logging.info("We are seeing some *really* high SNRs, lets"
" assume they aren't signals and just give up")
return False, []
veto_info.append((snrv, norm, l, htilde, stilde))
snr[i] = snrv[0] * norm
sigmasq[i] = sgm
templates[i] = htilde.id
if not hasattr(htilde, 'dict_params'):
htilde.dict_params = {}
for key in tkeys:
htilde.dict_params[key] = htilde.params[key]
for key in tkeys:
result[key].append(htilde.dict_params[key])
if hasattr(htilde, 'time_offset'):
result['time_offset'].append(htilde.time_offset)
i += 1
result['snr'] = abs(snr[0:i])
result['coa_phase'] = numpy.angle(snr[0:i])
result['end_time'] = time[0:i]
result['template_id'] = templates[0:i]
result['sigmasq'] = sigmasq[0:i]
for key in tkeys:
result[key] = numpy.array(result[key])
if 'time_offset' in result:
result['time_offset'] = numpy.array(result['time_offset'])
return result, veto_info
def followup_event_significance(ifo, data_reader, bank,
template_id, coinc_times,
coinc_threshold=0.005,
lookback=150, duration=0.095):
"""Given a detector, a template waveform and a set of candidate event
times in different detectors, perform an on-source/off-source analysis
to determine if the SNR in the first detector has a significant peak
in the on-source window. The significance is given in terms of a
p-value. See Dal Canton et al. 2021 (https://arxiv.org/abs/2008.07494)
for details.
"""
from pycbc.waveform import get_waveform_filter_length_in_time
tmplt = bank.table[template_id]
length_in_time = get_waveform_filter_length_in_time(tmplt['approximant'],
tmplt)
# calculate onsource time range
from pycbc.detector import Detector
onsource_start = -numpy.inf
onsource_end = numpy.inf
fdet = Detector(ifo)
for cifo in coinc_times:
time = coinc_times[cifo]
dtravel = Detector(cifo).light_travel_time_to_detector(fdet)
if time - dtravel > onsource_start:
onsource_start = time - dtravel
if time + dtravel < onsource_end:
onsource_end = time + dtravel
# Source must be within this time window to be considered a possible
# coincidence
onsource_start -= coinc_threshold
onsource_end += coinc_threshold
# Calculate how much time needed to calculate significance
trim_pad = (data_reader.trim_padding * data_reader.strain.delta_t)
bdur = int(lookback + 2.0 * trim_pad + length_in_time)
if bdur > data_reader.strain.duration * .75:
bdur = data_reader.strain.duration * .75
# Require all strain be valid within lookback time
if data_reader.state is not None:
state_start_time = data_reader.strain.end_time \
- data_reader.reduced_pad * data_reader.strain.delta_t - bdur
if not data_reader.state.is_extent_valid(state_start_time, bdur):
return None
# We won't require that all DQ checks be valid for now, except at
# onsource time.
if data_reader.dq is not None:
dq_start_time = onsource_start - duration / 2.0
dq_duration = onsource_end - onsource_start + duration
if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration):
return None
# Calculate SNR time series for this duration
htilde = bank.get_template(template_id, min_buffer=bdur)
stilde = data_reader.overwhitened_data(htilde.delta_f)
sigma2 = htilde.sigmasq(stilde.psd)
snr, _, norm = matched_filter_core(htilde, stilde, h_norm=sigma2)
# Find peak in on-source and determine p-value
onsrc = snr.time_slice(onsource_start, onsource_end)
peak = onsrc.abs_arg_max()
peak_time = peak * snr.delta_t + onsrc.start_time
peak_value = abs(onsrc[peak])
bstart = float(snr.start_time) + length_in_time + trim_pad
bkg = abs(snr.time_slice(bstart, onsource_start)).numpy()
window = int((onsource_end - onsource_start) * snr.sample_rate)
nsamples = int(len(bkg) / window)
peaks = bkg[:nsamples*window].reshape(nsamples, window).max(axis=1)
num_louder_bg = (peaks >= peak_value).sum()
pvalue = (1 + num_louder_bg) / float(1 + nsamples)
pvalue_saturated = num_louder_bg == 0
# Return recentered source SNR for bayestar, along with p-value, and trig
peak_full = int((peak_time - snr.start_time) / snr.delta_t)
half_dur_samples = int(snr.sample_rate * duration / 2)
snr_slice = slice(peak_full - half_dur_samples,
peak_full + half_dur_samples + 1)
baysnr = snr[snr_slice]
logging.info('Adding %s to candidate, pvalue %s, %s samples', ifo,
pvalue, nsamples)
return {
'snr_series': baysnr * norm,
'peak_time': peak_time,
'pvalue': pvalue,
'pvalue_saturated': pvalue_saturated,
'sigma2': sigma2
}
def compute_followup_snr_series(data_reader, htilde, trig_time,
duration=0.095, check_state=True,
coinc_window=0.05):
"""Given a StrainBuffer, a template frequency series and a trigger time,
compute a portion of the SNR time series centered on the trigger for its
rapid sky localization and followup.
If the trigger time is too close to the boundary of the valid data segment
the SNR series is calculated anyway and might be slightly contaminated by
filter and wrap-around effects. For reasonable durations this will only
affect a small fraction of the triggers and probably in a negligible way.
Parameters
----------
data_reader : StrainBuffer
The StrainBuffer object to read strain data from.
htilde : FrequencySeries
The frequency series containing the template waveform.
trig_time : {float, lal.LIGOTimeGPS}
The trigger time.
duration : float (optional)
Duration of the computed SNR series in seconds. If omitted, it defaults
to twice the Earth light travel time plus 10 ms of timing uncertainty.
check_state : boolean
If True, and the detector was offline or flagged for bad data quality
at any point during the inspiral, then return (None, None) instead.
coinc_window : float (optional)
Maximum possible time between coincident triggers at different
detectors. This is needed to properly determine data padding.
Returns
-------
snr : TimeSeries
The portion of SNR around the trigger. None if the detector is offline
or has bad data quality, and check_state is True.
"""
if check_state:
# was the detector observing for the full amount of involved data?
state_start_time = trig_time - duration / 2 - htilde.length_in_time
state_end_time = trig_time + duration / 2
state_duration = state_end_time - state_start_time
if data_reader.state is not None:
if not data_reader.state.is_extent_valid(state_start_time,
state_duration):
return None
# was the data quality ok for the full amount of involved data?
dq_start_time = state_start_time - data_reader.dq_padding
dq_duration = state_duration + 2 * data_reader.dq_padding
if data_reader.dq is not None:
if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration):
return None
stilde = data_reader.overwhitened_data(htilde.delta_f)
snr, _, norm = matched_filter_core(htilde, stilde,
h_norm=htilde.sigmasq(stilde.psd))
valid_end = int(len(snr) - data_reader.trim_padding)
valid_start = int(valid_end - data_reader.blocksize * snr.sample_rate)
half_dur_samples = int(snr.sample_rate * duration / 2)
coinc_samples = int(snr.sample_rate * coinc_window)
valid_start -= half_dur_samples + coinc_samples
valid_end += half_dur_samples
if valid_start < 0 or valid_end > len(snr)-1:
raise ValueError(('Requested SNR duration ({0} s)'
' too long').format(duration))
# Onsource slice for Bayestar followup
onsource_idx = float(trig_time - snr.start_time) * snr.sample_rate
onsource_idx = int(round(onsource_idx))
onsource_slice = slice(onsource_idx - half_dur_samples,
onsource_idx + half_dur_samples + 1)
return snr[onsource_slice] * norm
def optimized_match(
vec1,
vec2,
psd=None,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
v1_norm=None,
v2_norm=None,
return_phase=False,
):
"""Given two waveforms (as numpy arrays),
compute the optimized match between them, making use
of scipy.minimize_scalar.
This function computes the same quantities as "match";
it is more accurate and slower.
Parameters
----------
vec1 : TimeSeries or FrequencySeries
The input vector containing a waveform.
vec2 : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : FrequencySeries
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
v1_norm : {None, float}, optional
The normalization of the first waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
v2_norm : {None, float}, optional
The normalization of the second waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
return_phase : {False, bool}, optional
If True, also return the phase shift that gives the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
phi: float
Phase to rotate complex waveform to get the match, if desired.
"""
from scipy.optimize import minimize_scalar
htilde = make_frequency_series(vec1)
stilde = make_frequency_series(vec2)
assert numpy.isclose(htilde.delta_f, stilde.delta_f)
delta_f = stilde.delta_f
assert numpy.isclose(htilde.delta_t, stilde.delta_t)
delta_t = stilde.delta_t
# a first time shift to get in the nearby region;
# then the optimization is only used to move to the
# correct subsample-timeshift witin (-delta_t, delta_t)
# of this
_, max_id, _ = match(
htilde,
stilde,
psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff,
return_phase=True,
)
stilde = stilde.cyclic_time_shift(-max_id * delta_t)
frequencies = stilde.sample_frequencies.numpy()
waveform_1 = htilde.numpy()
waveform_2 = stilde.numpy()
N = (len(stilde) - 1) * 2
kmin, kmax = get_cutoff_indices(
low_frequency_cutoff, high_frequency_cutoff, delta_f, N
)
mask = slice(kmin, kmax)
waveform_1 = waveform_1[mask]
waveform_2 = waveform_2[mask]
frequencies = frequencies[mask]
if psd is not None:
psd_arr = psd.numpy()[mask]
else:
psd_arr = numpy.ones_like(waveform_1)
def product(a, b):
integral = numpy.sum(numpy.conj(a) * b / psd_arr) * delta_f
return 4 * abs(integral), numpy.angle(integral)
def product_offset(dt):
offset = numpy.exp(2j * numpy.pi * frequencies * dt)
return product(waveform_1, waveform_2 * offset)
def to_minimize(dt):
return -product_offset(dt)[0]
norm_1 = (
sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff)
if v1_norm is None
else v1_norm
)
norm_2 = (
sigmasq(stilde, psd, low_frequency_cutoff, high_frequency_cutoff)
if v2_norm is None
else v2_norm
)
norm = numpy.sqrt(norm_1 * norm_2)
res = minimize_scalar(
to_minimize,
method="brent",
bracket=(-delta_t, delta_t)
)
m, angle = product_offset(res.x)
if return_phase:
return m / norm, res.x / delta_t + max_id, -angle
else:
return m / norm, res.x / delta_t + max_id
__all__ = ['match', 'optimized_match', 'matched_filter', 'sigmasq', 'sigma', 'get_cutoff_indices',
'sigmasq_series', 'make_frequency_series', 'overlap',
'overlap_cplx', 'matched_filter_core', 'correlate',
'MatchedFilterControl', 'LiveBatchMatchedFilter',
'MatchedFilterSkyMaxControl', 'MatchedFilterSkyMaxControlNoPhase',
'compute_max_snr_over_sky_loc_stat_no_phase',
'compute_max_snr_over_sky_loc_stat',
'compute_followup_snr_series',
'compute_u_val_for_sky_loc_stat_no_phase',
'compute_u_val_for_sky_loc_stat',
'followup_event_significance']
| 83,046
| 38.247164
| 114
|
py
|
pycbc
|
pycbc-master/pycbc/filter/zpk.py
|
# Copyright (C) 2014 Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy as np
from scipy.signal import zpk2sos, sosfilt
from pycbc.types import TimeSeries
def filter_zpk(timeseries, z, p, k):
"""Return a new timeseries that was filtered with a zero-pole-gain filter.
The transfer function in the s-domain looks like:
.. math::
\\frac{H(s) = (s - s_1) * (s - s_3) * ... * (s - s_n)}{(s - s_2) * (s - s_4) * ... * (s - s_m)}, m >= n
The zeroes, and poles entered in Hz are converted to angular frequency,
along the imaginary axis in the s-domain s=i*omega. Then the zeroes, and
poles are bilinearly transformed via:
.. math::
z(s) = \\frac{(1 + s*T/2)}{(1 - s*T/2)}
Where z is the z-domain value, s is the s-domain value, and T is the
sampling period. After the poles and zeroes have been bilinearly
transformed, then the second-order sections are found and filter the data
using scipy.
Parameters
----------
timeseries: TimeSeries
The TimeSeries instance to be filtered.
z: array
Array of zeros to include in zero-pole-gain filter design.
In units of Hz.
p: array
Array of poles to include in zero-pole-gain filter design.
In units of Hz.
k: float
Gain to include in zero-pole-gain filter design. This gain is a
constant multiplied to the transfer function.
Returns
-------
Time Series: TimeSeries
A new TimeSeries that has been filtered.
Examples
--------
To apply a 5 zeroes at 100Hz, 5 poles at 1Hz, and a gain of 1e-10 filter
to a TimeSeries instance, do:
>>> filtered_data = zpk_filter(timeseries, [100]*5, [1]*5, 1e-10)
"""
# sanity check type
if not isinstance(timeseries, TimeSeries):
raise TypeError("Can only filter TimeSeries instances.")
# sanity check casual filter
degree = len(p) - len(z)
if degree < 0:
raise TypeError("May not have more zeroes than poles. \
Filter is not casual.")
# cast zeroes and poles as arrays and gain as a float
z = np.array(z)
p = np.array(p)
k = float(k)
# put zeroes and poles in the s-domain
# convert from frequency to angular frequency
z *= -2 * np.pi
p *= -2 * np.pi
# get denominator of bilinear transform
fs = 2.0 * timeseries.sample_rate
# zeroes in the z-domain
z_zd = (1 + z/fs) / (1 - z/fs)
# any zeros that were at infinity are moved to the Nyquist frequency
z_zd = z_zd[np.isfinite(z_zd)]
z_zd = np.append(z_zd, -np.ones(degree))
# poles in the z-domain
p_zd = (1 + p/fs) / (1 - p/fs)
# gain change in z-domain
k_zd = k * np.prod(fs - z) / np.prod(fs - p)
# get second-order sections
sos = zpk2sos(z_zd, p_zd, k_zd)
# filter
filtered_data = sosfilt(sos, timeseries.numpy())
return TimeSeries(filtered_data, delta_t = timeseries.delta_t,
dtype=timeseries.dtype,
epoch=timeseries._epoch)
| 3,947
| 32.457627
| 107
|
py
|
pycbc
|
pycbc-master/pycbc/filter/qtransform.py
|
# Copyright (C) 2017 Hunter A. Gabbard, Andrew Lundgren,
# Duncan Macleod, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module retrives a timeseries and then calculates
the q-transform of that time series
"""
import numpy
from numpy import ceil, log, exp
from pycbc.types.timeseries import FrequencySeries, TimeSeries
from pycbc.fft import ifft
from pycbc.types import zeros
def qplane(qplane_tile_dict, fseries, return_complex=False):
"""Performs q-transform on each tile for each q-plane and selects
tile with the maximum energy. Q-transform can then
be interpolated to a desired frequency and time resolution.
Parameters
----------
qplane_tile_dict:
Dictionary containing a list of q-tile tupples for each q-plane
fseries: 'pycbc FrequencySeries'
frequency-series data set
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
q : float
The q of the maximum q plane
times : numpy.ndarray
The time that the qtransform is sampled.
freqs : numpy.ndarray
The frequencies that the qtransform is samled.
qplane : numpy.ndarray (2d)
The two dimensional interpolated qtransform of this time series.
"""
# store q-transforms for each q in a dict
qplanes = {}
max_energy, max_key = None, None
for i, q in enumerate(qplane_tile_dict):
energies = []
for f0 in qplane_tile_dict[q]:
energy = qseries(fseries, q, f0, return_complex=return_complex)
menergy = abs(energy).max()
energies.append(energy)
if i == 0 or menergy > max_energy:
max_energy = menergy
max_key = q
qplanes[q] = energies
# record q-transform output for peak q
plane = qplanes[max_key]
frequencies = qplane_tile_dict[max_key]
times = plane[0].sample_times.numpy()
plane = numpy.array([v.numpy() for v in plane])
return max_key, times, frequencies, numpy.array(plane)
def qtiling(fseries, qrange, frange, mismatch=0.2):
"""Iterable constructor of QTile tuples
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
qrange:
upper and lower bounds of q range
frange:
upper and lower bounds of frequency range
mismatch:
percentage of desired fractional mismatch
Returns
-------
qplane_tile_dict: 'dict'
dictionary containing Q-tile tuples for a set of Q-planes
"""
qplane_tile_dict = {}
qs = list(_iter_qs(qrange, deltam_f(mismatch)))
for q in qs:
qtilefreq = _iter_frequencies(q, frange, mismatch, fseries.duration)
qplane_tile_dict[q] = numpy.array(list(qtilefreq))
return qplane_tile_dict
def deltam_f(mismatch):
"""Fractional mismatch between neighbouring tiles
Parameters
----------
mismatch: 'float'
percentage of desired fractional mismatch
Returns
-------
:type: 'float'
"""
return 2 * (mismatch / 3.) ** (1/2.)
def _iter_qs(qrange, deltam):
"""Iterate over the Q values
Parameters
----------
qrange:
upper and lower bounds of q range
deltam:
Fractional mismatch between neighbouring tiles
Returns
-------
Q-value:
Q value for Q-tile
"""
# work out how many Qs we need
cumum = log(float(qrange[1]) / qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / deltam), 1))
dq = cumum / nplanes
for i in range(nplanes):
yield qrange[0] * exp(2**(1/2.) * dq * (i + .5))
return
def _iter_frequencies(q, frange, mismatch, dur):
"""Iterate over the frequencies of this 'QPlane'
Parameters
----------
q:
q value
frange: 'list'
upper and lower bounds of frequency range
mismatch:
percentage of desired fractional mismatch
dur:
duration of timeseries in seconds
Returns
-------
frequencies:
Q-Tile frequency
"""
# work out how many frequencies we need
minf, maxf = frange
fcum_mismatch = log(float(maxf) / minf) * (2 + q**2)**(1/2.) / 2.
nfreq = int(max(1, ceil(fcum_mismatch / deltam_f(mismatch))))
fstep = fcum_mismatch / nfreq
fstepmin = 1. / dur
# for each frequency, yield a QTile
for i in range(nfreq):
yield (float(minf) *
exp(2 / (2 + q**2)**(1/2.) * (i + .5) * fstep) //
fstepmin * fstepmin)
return
def qseries(fseries, Q, f0, return_complex=False):
"""Calculate the energy 'TimeSeries' for the given fseries
Parameters
----------
fseries: 'pycbc FrequencySeries'
frequency-series data set
Q:
q value
f0:
central frequency
return_complex: {False, bool}
Return the raw complex series instead of the normalized power.
Returns
-------
energy: '~pycbc.types.TimeSeries'
A 'TimeSeries' of the normalized energy from the Q-transform of
this tile against the data.
"""
# normalize and generate bi-square window
qprime = Q / 11**(1/2.)
norm = numpy.sqrt(315. * qprime / (128. * f0))
window_size = 2 * int(f0 / qprime * fseries.duration) + 1
xfrequencies = numpy.linspace(-1., 1., window_size)
start = int((f0 - (f0 / qprime)) * fseries.duration)
end = int(start + window_size)
center = (start + end) // 2
windowed = fseries[start:end] * (1 - xfrequencies ** 2) ** 2 * norm
tlen = (len(fseries)-1) * 2
windowed.resize(tlen)
windowed.roll(-center)
# calculate the time series for this q -value
windowed = FrequencySeries(windowed, delta_f=fseries.delta_f,
epoch=fseries.start_time)
ctseries = TimeSeries(zeros(tlen, dtype=numpy.complex128),
delta_t=fseries.delta_t)
ifft(windowed, ctseries)
if return_complex:
return ctseries
else:
energy = ctseries.squared_norm()
medianenergy = numpy.median(energy.numpy())
return energy / float(medianenergy)
| 7,048
| 29.383621
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/filter/fotonfilter.py
|
#!/usr/bin/env python
# Copyright (C) 2015 Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import numpy
import sys
from pycbc import frame
# import dependencies that are not standard to pycbc
from foton import Filter, iir2z
def get_swstat_bits(frame_filenames, swstat_channel_name, start_time, end_time):
''' This function just checks the first time in the SWSTAT channel
to see if the filter was on, it doesn't check times beyond that.
This is just for a first test on a small chunck of data.
To read the SWSTAT bits, reference: https://dcc.ligo.org/DocDB/0107/T1300711/001/LIGO-T1300711-v1.pdf
Bit 0-9 = Filter on/off switches for the 10 filters in an SFM.
Bit 10 = Filter module input switch on/off
Bit 11 = Filter module offset switch on/off
Bit 12 = Filter module output switch on/off
Bit 13 = Filter module limit switch on/off
Bit 14 = Filter module history reset momentary switch
'''
# read frames
swstat = frame.read_frame(frame_filenames, swstat_channel_name,
start_time=start_time, end_time=end_time)
# convert number in channel to binary
bits = bin(int(swstat[0]))
# check if filterbank input or output was off
filterbank_off = False
if len(bits) < 14 or int(bits[-13]) == 0 or int(bits[-11]) == 0:
filterbank_off = True
return bits[-10:], filterbank_off
def filter_data(data, filter_name, filter_file, bits, filterbank_off=False,
swstat_channel_name=None):
'''
A naive function to determine if the filter was on at the time
and then filter the data.
'''
# if filterbank is off then return a time series of zeroes
if filterbank_off:
return numpy.zeros(len(data))
# loop over the 10 filters in the filterbank
for i in range(10):
# read the filter
filter = Filter(filter_file[filter_name][i])
# if bit is on then filter the data
bit = int(bits[-(i+1)])
if bit:
logging.info('filtering with filter module %d', i)
# if there are second-order sections then filter with them
if len(filter.sections):
data = filter.apply(data)
# else it is a filter with only gain so apply the gain
else:
coeffs = iir2z(filter_file[filter_name][i])
if len(coeffs) > 1:
logging.info('Gain-only filter module return more than one number')
sys.exit()
gain = coeffs[0]
data = gain * data
return data
def read_gain_from_frames(frame_filenames, gain_channel_name, start_time, end_time):
'''
Returns the gain from the file.
'''
# get timeseries from frame
gain = frame.read_frame(frame_filenames, gain_channel_name,
start_time=start_time, end_time=end_time)
return gain[0]
| 3,609
| 33.711538
| 105
|
py
|
pycbc
|
pycbc-master/pycbc/vetoes/bank_chisq.py
|
# Copyright (C) 2013 Ian Harry, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import logging, numpy
from pycbc.types import Array, zeros, real_same_precision_as, TimeSeries
from pycbc.filter import overlap_cplx, matched_filter_core
from pycbc.waveform import FilterBank
from math import sqrt
def segment_snrs(filters, stilde, psd, low_frequency_cutoff):
""" This functions calculates the snr of each bank veto template against
the segment
Parameters
----------
filters: list of FrequencySeries
The list of bank veto templates filters.
stilde: FrequencySeries
The current segment of data.
psd: FrequencySeries
low_frequency_cutoff: float
Returns
-------
snr (list): List of snr time series.
norm (list): List of normalizations factors for the snr time series.
"""
snrs = []
norms = []
for bank_template in filters:
# For every template compute the snr against the stilde segment
snr, _, norm = matched_filter_core(
bank_template, stilde, h_norm=bank_template.sigmasq(psd),
psd=None, low_frequency_cutoff=low_frequency_cutoff)
# SNR time series stored here
snrs.append(snr)
# Template normalization factor stored here
norms.append(norm)
return snrs, norms
def template_overlaps(bank_filters, template, psd, low_frequency_cutoff):
""" This functions calculates the overlaps between the template and the
bank veto templates.
Parameters
----------
bank_filters: List of FrequencySeries
template: FrequencySeries
psd: FrequencySeries
low_frequency_cutoff: float
Returns
-------
overlaps: List of complex overlap values.
"""
overlaps = []
template_ow = template / psd
for bank_template in bank_filters:
overlap = overlap_cplx(template_ow, bank_template,
low_frequency_cutoff=low_frequency_cutoff, normalized=False)
norm = sqrt(1 / template.sigmasq(psd) / bank_template.sigmasq(psd))
overlaps.append(overlap * norm)
if (abs(overlaps[-1]) > 0.99):
errMsg = "Overlap > 0.99 between bank template and filter. "
errMsg += "This bank template will not be used to calculate "
errMsg += "bank chisq for this filter template. The expected "
errMsg += "value will be added to the chisq to account for "
errMsg += "the removal of this template.\n"
errMsg += "Masses of filter template: %e %e\n" \
%(template.params.mass1, template.params.mass2)
errMsg += "Masses of bank filter template: %e %e\n" \
%(bank_template.params.mass1, bank_template.params.mass2)
errMsg += "Overlap: %e" %(abs(overlaps[-1]))
logging.info(errMsg)
return overlaps
def bank_chisq_from_filters(tmplt_snr, tmplt_norm, bank_snrs, bank_norms,
tmplt_bank_matches, indices=None):
""" This function calculates and returns a TimeSeries object containing the
bank veto calculated over a segment.
Parameters
----------
tmplt_snr: TimeSeries
The SNR time series from filtering the segment against the current
search template
tmplt_norm: float
The normalization factor for the search template
bank_snrs: list of TimeSeries
The precomputed list of SNR time series between each of the bank veto
templates and the segment
bank_norms: list of floats
The normalization factors for the list of bank veto templates
(usually this will be the same for all bank veto templates)
tmplt_bank_matches: list of floats
The complex overlap between the search template and each
of the bank templates
indices: {None, Array}, optional
Array of indices into the snr time series. If given, the bank chisq
will only be calculated at these values.
Returns
-------
bank_chisq: TimeSeries of the bank vetos
"""
if indices is not None:
tmplt_snr = Array(tmplt_snr, copy=False)
bank_snrs_tmp = []
for bank_snr in bank_snrs:
bank_snrs_tmp.append(bank_snr.take(indices))
bank_snrs=bank_snrs_tmp
# Initialise bank_chisq as 0s everywhere
bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr))
# Loop over all the bank templates
for i in range(len(bank_snrs)):
bank_match = tmplt_bank_matches[i]
if (abs(bank_match) > 0.99):
# Not much point calculating bank_chisquared if the bank template
# is very close to the filter template. Can also hit numerical
# error due to approximations made in this calculation.
# The value of 2 is the expected addition to the chisq for this
# template
bank_chisq += 2.
continue
bank_norm = sqrt((1 - bank_match*bank_match.conj()).real)
bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm)
tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm)
bank_SNR = Array(bank_SNR, copy=False)
tmplt_SNR = Array(tmplt_SNR, copy=False)
bank_chisq += (bank_SNR - tmplt_SNR).squared_norm()
if indices is not None:
return bank_chisq
else:
return TimeSeries(bank_chisq, delta_t=tmplt_snr.delta_t,
epoch=tmplt_snr.start_time, copy=False)
class SingleDetBankVeto(object):
"""This class reads in a template bank file for a bank veto, handles the
memory management of its filters internally, and calculates the bank
veto TimeSeries.
"""
def __init__(self, bank_file, flen, delta_f, f_low, cdtype, approximant=None, **kwds):
if bank_file is not None:
self.do = True
self.column_name = "bank_chisq"
self.table_dof_name = "bank_chisq_dof"
self.cdtype = cdtype
self.delta_f = delta_f
self.f_low = f_low
self.seg_len_freq = flen
self.seg_len_time = (self.seg_len_freq-1)*2
logging.info("Read in bank veto template bank")
bank_veto_bank = FilterBank(bank_file,
self.seg_len_freq,
self.delta_f, self.cdtype,
low_frequency_cutoff=f_low,
approximant=approximant, **kwds)
self.filters = list(bank_veto_bank)
self.dof = len(bank_veto_bank) * 2
self._overlaps_cache = {}
self._segment_snrs_cache = {}
else:
self.do = False
def cache_segment_snrs(self, stilde, psd):
key = (id(stilde), id(psd))
if key not in self._segment_snrs_cache:
logging.info("Precalculate the bank veto template snrs")
data = segment_snrs(self.filters, stilde, psd, self.f_low)
self._segment_snrs_cache[key] = data
return self._segment_snrs_cache[key]
def cache_overlaps(self, template, psd):
key = (id(template.params), id(psd))
if key not in self._overlaps_cache:
logging.info("...Calculate bank veto overlaps")
o = template_overlaps(self.filters, template, psd, self.f_low)
self._overlaps_cache[key] = o
return self._overlaps_cache[key]
def values(self, template, psd, stilde, snrv, norm, indices):
"""
Returns
-------
bank_chisq_from_filters: TimeSeries of bank veto values - if indices
is None then evaluated at all time samples, if not then only at
requested sample indices
bank_chisq_dof: int, approx number of statistical degrees of freedom
"""
if self.do:
logging.info("...Doing bank veto")
overlaps = self.cache_overlaps(template, psd)
bank_veto_snrs, bank_veto_norms = self.cache_segment_snrs(stilde, psd)
chisq = bank_chisq_from_filters(snrv, norm, bank_veto_snrs,
bank_veto_norms, overlaps, indices)
dof = numpy.repeat(self.dof, len(chisq))
return chisq, dof
else:
return None, None
class SingleDetSkyMaxBankVeto(SingleDetBankVeto):
"""Stub for precessing bank veto if anyone ever wants to code it up.
"""
def __init__(self, *args, **kwds):
super(SingleDetSkyMaxBankVeto, self).__init__(*args, **kwds)
def values(self, *args, **kwargs):
if self.do:
err_msg = "Precessing single detector sky-max bank veto has not "
err_msg += "been written. If you want to use it, why not help "
err_msg += "write it?"
raise NotImplementedError(err_msg)
else:
return None, None
| 9,693
| 37.931727
| 90
|
py
|
pycbc
|
pycbc-master/pycbc/vetoes/chisq_cuda.py
|
# Copyright (C) 2015 Alex Nitz, Josh Willis
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import pycuda.driver, numpy
from pycuda.elementwise import ElementwiseKernel
from pycuda.tools import context_dependent_memoize, dtype_to_ctype
import pycuda.gpuarray
from mako.template import Template
from pycuda.compiler import SourceModule
@context_dependent_memoize
def get_accum_diff_sq_kernel(dtype_x, dtype_z):
return ElementwiseKernel(
"%(tp_a)s *x, %(tp_c)s *z" % {
"tp_a": dtype_to_ctype(dtype_x),
"tp_c": dtype_to_ctype(dtype_z),
},
"x[i] += norm(z[i]) ",
"chisq_accum")
def chisq_accum_bin(chisq, q):
krnl = get_accum_diff_sq_kernel(chisq.dtype, q.dtype)
krnl(chisq.data, q.data)
chisqkernel = Template("""
#include <stdio.h>
__global__ void power_chisq_at_points_${NP}(
%if fuse:
float2* htilde,
float2* stilde,
%else:
float2* corr,
%endif
float2* outc, unsigned int N,
%for p in range(NP):
float phase${p},
%endfor
unsigned int* kmin,
unsigned int* kmax,
unsigned int* bv,
unsigned int nbins){
__shared__ unsigned int s;
__shared__ unsigned int e;
__shared__ float2 chisq[${NT} * ${NP}];
// load integration boundaries (might not be bin boundaries if bin is large)
if (threadIdx.x == 0){
s = kmin[blockIdx.x];
e = kmax[blockIdx.x];
}
% for p in range(NP):
chisq[threadIdx.x + ${NT*p}].x = 0;
chisq[threadIdx.x + ${NT*p}].y = 0;
% endfor
__syncthreads();
// calculate the chisq integral for each thread
// sliding reduction for each thread from s, e
for (int i = threadIdx.x + s; i < e; i += blockDim.x){
float re, im;
%if fuse:
float2 qt, st, ht;
st = stilde[i];
ht = htilde[i];
qt.x = ht.x * st.x + ht.y * st.y;
qt.y = ht.x * st.y - ht.y * st.x;
%else:
float2 qt = corr[i];
%endif
%for p in range(NP):
sincosf(phase${p} * i, &im, &re);
chisq[threadIdx.x + ${NT*p}].x += re * qt.x - im * qt.y;
chisq[threadIdx.x + ${NT*p}].y += im * qt.x + re * qt.y;
%endfor
}
float x, y, x2, y2;
// logarithmic reduction within thread block
for (int j=${NT} / 2; j>=1; j/=2){
if (threadIdx.x <j){
%for p in range(NP):
__syncthreads();
x = chisq[threadIdx.x + ${NT*p}].x;
y = chisq[threadIdx.x + ${NT*p}].y;
x2 = chisq[threadIdx.x + j + ${NT*p}].x;
y2 = chisq[threadIdx.x + j + ${NT*p}].y;
__syncthreads();
chisq[threadIdx.x + ${NT*p}].x = x + x2;
chisq[threadIdx.x + ${NT*p}].y = y + y2;
%endfor
}
}
if (threadIdx.x == 0){
% for p in range(NP):
atomicAdd(&outc[bv[blockIdx.x] + nbins * ${p}].x, chisq[0 + ${NT*p}].x);
atomicAdd(&outc[bv[blockIdx.x] + nbins * ${p}].y, chisq[0 + ${NT*p}].y);
% endfor
}
}
""")
chisqkernel_pow2 = Template("""
#include <stdio.h>
#include <stdint.h> // For uint64_t
__global__ void power_chisq_at_points_${NP}_pow2(
%if fuse:
float2* htilde,
float2* stilde,
%else:
float2* corr,
%endif
float2* outc, unsigned int N,
%for p in range(NP):
unsigned int points${p},
%endfor
unsigned int* kmin,
unsigned int* kmax,
unsigned int* bv,
unsigned int nbins){
__shared__ unsigned int s;
__shared__ unsigned int e;
__shared__ float2 chisq[${NT} * ${NP}];
float twopi = 6.283185307179586f;
uint64_t NN;
NN = (uint64_t) N;
// load integration boundaries (might not be bin boundaries if bin is large)
if (threadIdx.x == 0){
s = kmin[blockIdx.x];
e = kmax[blockIdx.x];
}
% for p in range(NP):
chisq[threadIdx.x + ${NT*p}].x = 0;
chisq[threadIdx.x + ${NT*p}].y = 0;
% endfor
__syncthreads();
// calculate the chisq integral for each thread
// sliding reduction for each thread from s, e
for (int i = threadIdx.x + s; i < e; i += blockDim.x){
float re, im;
%if fuse:
float2 qt, st, ht;
st = stilde[i];
ht = htilde[i];
qt.x = ht.x * st.x + ht.y * st.y;
qt.y = ht.x * st.y - ht.y * st.x;
%else:
float2 qt = corr[i];
%endif
%for p in range(NP):
uint64_t prod${p} = points${p} * i;
unsigned int k${p} = (unsigned int) (prod${p}&(NN-1));
float phase${p} = twopi * k${p}/((float) N);
__sincosf(phase${p}, &im, &re);
chisq[threadIdx.x + ${NT*p}].x += re * qt.x - im * qt.y;
chisq[threadIdx.x + ${NT*p}].y += im * qt.x + re * qt.y;
%endfor
}
float x, y, x2, y2;
// logarithmic reduction within thread block
for (int j=${NT} / 2; j>=1; j/=2){
if (threadIdx.x <j){
%for p in range(NP):
__syncthreads();
x = chisq[threadIdx.x + ${NT*p}].x;
y = chisq[threadIdx.x + ${NT*p}].y;
x2 = chisq[threadIdx.x + j + ${NT*p}].x;
y2 = chisq[threadIdx.x + j + ${NT*p}].y;
__syncthreads();
chisq[threadIdx.x + ${NT*p}].x = x + x2;
chisq[threadIdx.x + ${NT*p}].y = y + y2;
%endfor
}
}
if (threadIdx.x == 0){
% for p in range(NP):
atomicAdd(&outc[bv[blockIdx.x] + nbins * ${p}].x, chisq[0 + ${NT*p}].x);
atomicAdd(&outc[bv[blockIdx.x] + nbins * ${p}].y, chisq[0 + ${NT*p}].y);
% endfor
}
}
""")
_pchisq_cache = {}
def get_pchisq_fn(np, fuse_correlate=False):
if np not in _pchisq_cache:
nt = 256
mod = SourceModule(chisqkernel.render(NT=nt, NP=np, fuse=fuse_correlate))
fn = mod.get_function("power_chisq_at_points_%s" % (np))
if fuse_correlate:
fn.prepare("PPPI" + "f" * np + "PPPI")
else:
fn.prepare("PPI" + "f" * np + "PPPI")
_pchisq_cache[np] = (fn, nt)
return _pchisq_cache[np]
_pchisq_cache_pow2 = {}
def get_pchisq_fn_pow2(np, fuse_correlate=False):
if np not in _pchisq_cache_pow2:
nt = 256
mod = SourceModule(chisqkernel_pow2.render(NT=nt, NP=np,
fuse=fuse_correlate))
fn = mod.get_function("power_chisq_at_points_%s_pow2" % (np))
if fuse_correlate:
fn.prepare("PPPI" + "I" * np + "PPPI")
else:
fn.prepare("PPI" + "I" * np + "PPPI")
_pchisq_cache_pow2[np] = (fn, nt)
return _pchisq_cache_pow2[np]
def get_cached_bin_layout(bins):
bv, kmin, kmax = [], [], []
for i in range(len(bins)-1):
s, e = bins[i], bins[i+1]
BS = 4096
if (e - s) < BS:
bv.append(i)
kmin.append(s)
kmax.append(e)
else:
k = list(numpy.arange(s, e, BS/2))
kmin += k
kmax += k[1:] + [e]
bv += [i]*len(k)
bv = pycuda.gpuarray.to_gpu_async(numpy.array(bv, dtype=numpy.uint32))
kmin = pycuda.gpuarray.to_gpu_async(numpy.array(kmin, dtype=numpy.uint32))
kmax = pycuda.gpuarray.to_gpu_async(numpy.array(kmax, dtype=numpy.uint32))
return kmin, kmax, bv
def shift_sum_points(num, N, arg_tuple):
#fuse = 'fuse' in corr.gpu_callback_method
fuse = False
fn, nt = get_pchisq_fn(num, fuse_correlate = fuse)
corr, outp, phase, np, nb, N, kmin, kmax, bv, nbins = arg_tuple
args = [(nb, 1), (nt, 1, 1)]
if fuse:
args += [corr.htilde.data.gpudata, corr.stilde.data.gpudata]
else:
args += [corr.data.gpudata]
args +=[outp.gpudata, N] + phase[0:num] + [kmin.gpudata, kmax.gpudata, bv.gpudata, nbins]
fn.prepared_call(*args)
outp = outp[num*nbins:]
phase = phase[num:]
np -= num
return outp, phase, np
def shift_sum_points_pow2(num, arg_tuple):
#fuse = 'fuse' in corr.gpu_callback_method
fuse = False
fn, nt = get_pchisq_fn_pow2(num, fuse_correlate = fuse)
corr, outp, points, np, nb, N, kmin, kmax, bv, nbins = arg_tuple
args = [(nb, 1), (nt, 1, 1)]
if fuse:
args += [corr.htilde.data.gpudata, corr.stilde.data.gpudata]
else:
args += [corr.data.gpudata]
args += [outp.gpudata, N] + points[0:num] + [kmin.gpudata,
kmax.gpudata, bv.gpudata, nbins]
fn.prepared_call(*args)
outp = outp[num*nbins:]
points = points[num:]
np -= num
return outp, points, np
_pow2_cache = {}
def get_cached_pow2(N):
if N not in _pow2_cache:
_pow2_cache[N] = not(N & (N-1))
return _pow2_cache[N]
def shift_sum(corr, points, bins):
kmin, kmax, bv = get_cached_bin_layout(bins)
nb = len(kmin)
N = numpy.uint32(len(corr))
is_pow2 = get_cached_pow2(N)
nbins = numpy.uint32(len(bins) - 1)
outc = pycuda.gpuarray.zeros((len(points), nbins), dtype=numpy.complex64)
outp = outc.reshape(nbins * len(points))
np = len(points)
if is_pow2:
lpoints = points.tolist()
while np > 0:
cargs = (corr, outp, lpoints, np, nb, N, kmin, kmax, bv, nbins)
if np >= 4:
outp, lpoints, np = shift_sum_points_pow2(4, cargs)
elif np >= 3:
outp, lpoints, np = shift_sum_points_pow2(3, cargs)
elif np >= 2:
outp, lpoints, np = shift_sum_points_pow2(2, cargs)
elif np == 1:
outp, lpoints, np = shift_sum_points_pow2(1, cargs)
else:
phase = [numpy.float32(p * 2.0 * numpy.pi / N) for p in points]
while np > 0:
cargs = (corr, outp, phase, np, nb, N, kmin, kmax, bv, nbins)
if np >= 4:
outp, phase, np = shift_sum_points(4, cargs) # pylint:disable=no-value-for-parameter
elif np >= 3:
outp, phase, np = shift_sum_points(3, cargs) # pylint:disable=no-value-for-parameter
elif np >= 2:
outp, phase, np = shift_sum_points(2, cargs) # pylint:disable=no-value-for-parameter
elif np == 1:
outp, phase, np = shift_sum_points(1, cargs) # pylint:disable=no-value-for-parameter
o = outc.get()
return (o.conj() * o).sum(axis=1).real
| 12,354
| 34.3
| 100
|
py
|
pycbc
|
pycbc-master/pycbc/vetoes/chisq.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy, logging, math, pycbc.fft
from pycbc.types import zeros, real_same_precision_as, TimeSeries, complex_same_precision_as
from pycbc.filter import sigmasq_series, make_frequency_series, matched_filter_core, get_cutoff_indices
from pycbc.scheme import schemed
import pycbc.pnutils
BACKEND_PREFIX="pycbc.vetoes.chisq_"
def power_chisq_bins_from_sigmasq_series(sigmasq_series, num_bins, kmin, kmax):
"""Returns bins of equal power for use with the chisq functions
Parameters
----------
sigmasq_series: FrequencySeries
A frequency series containing the cumulative power of a filter template
preweighted by a psd.
num_bins: int
The number of chisq bins to calculate.
kmin: int
DOCUMENTME
kmax: int
DOCUMENTME
Returns
-------
bins: List of ints
A list of the edges of the chisq bins is returned.
"""
sigmasq = sigmasq_series[kmax - 1]
edge_vec = numpy.arange(0, num_bins) * sigmasq / num_bins
bins = numpy.searchsorted(sigmasq_series[kmin:kmax], edge_vec, side='right')
bins += kmin
return numpy.append(bins, kmax)
def power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff=None,
high_frequency_cutoff=None):
"""Returns bins of equal power for use with the chisq functions
Parameters
----------
htilde: FrequencySeries
A frequency series containing the template waveform
num_bins: int
The number of chisq bins to calculate.
psd: FrequencySeries
A frequency series containing the psd. Its length must be commensurate
with the template waveform.
low_frequency_cutoff: {None, float}, optional
The low frequency cutoff to apply
high_frequency_cutoff: {None, float}, optional
The high frequency cutoff to apply
Returns
-------
bins: List of ints
A list of the edges of the chisq bins is returned.
"""
sigma_vec = sigmasq_series(htilde, psd, low_frequency_cutoff,
high_frequency_cutoff).numpy()
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff,
htilde.delta_f,
(len(htilde)-1)*2)
return power_chisq_bins_from_sigmasq_series(sigma_vec, num_bins, kmin, kmax)
@schemed(BACKEND_PREFIX)
def chisq_accum_bin(chisq, q):
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@schemed(BACKEND_PREFIX)
def shift_sum(v1, shifts, bins):
""" Calculate the time shifted sum of the FrequencySeries
"""
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices):
"""Calculate the chisq timeseries from precomputed values for only select points.
This function calculates the chisq at each point by explicitly time shifting
and summing each bin. No FFT is involved.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: numpy.ndarray
The unnormalized array of snr values at only the selected points in `indices`.
snr_norm: float
The normalization of the snr (EXPLAINME : refer to Findchirp paper?)
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `corr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points.
"""
num_bins = len(bins) - 1
chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return
return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0)
_q_l = None
_qtilde_l = None
_chisq_l = None
def power_chisq_from_precomputed(corr, snr, snr_norm, bins, indices=None, return_bins=False):
"""Calculate the chisq timeseries from precomputed values.
This function calculates the chisq at all times by performing an
inverse FFT of each bin.
Parameters
----------
corr: FrequencySeries
The produce of the template and data in the frequency domain.
snr: TimeSeries
The unnormalized snr time series.
snr_norm:
The snr normalization factor (true snr = snr * snr_norm) EXPLAINME - define 'true snr'?
bins: List of integers
The edges of the chisq bins.
indices: {Array, None}, optional
Index values into snr that indicate where to calculate
chisq values. If none, calculate chisq for all possible indices.
return_bins: {boolean, False}, optional
Return a list of the SNRs for each chisq bin.
Returns
-------
chisq: TimeSeries
"""
# Get workspace memory
global _q_l, _qtilde_l, _chisq_l
bin_snrs = []
if _q_l is None or len(_q_l) != len(snr):
q = zeros(len(snr), dtype=complex_same_precision_as(snr))
qtilde = zeros(len(snr), dtype=complex_same_precision_as(snr))
_q_l = q
_qtilde_l = qtilde
else:
q = _q_l
qtilde = _qtilde_l
if indices is not None:
snr = snr.take(indices)
if _chisq_l is None or len(_chisq_l) < len(snr):
chisq = zeros(len(snr), dtype=real_same_precision_as(snr))
_chisq_l = chisq
else:
chisq = _chisq_l[0:len(snr)]
chisq.clear()
num_bins = len(bins) - 1
for j in range(num_bins):
k_min = int(bins[j])
k_max = int(bins[j+1])
qtilde[k_min:k_max] = corr[k_min:k_max]
pycbc.fft.ifft(qtilde, q)
qtilde[k_min:k_max].clear()
if return_bins:
bin_snrs.append(TimeSeries(q * snr_norm * num_bins ** 0.5,
delta_t=snr.delta_t,
epoch=snr.start_time))
if indices is not None:
chisq_accum_bin(chisq, q.take(indices))
else:
chisq_accum_bin(chisq, q)
chisq = (chisq * num_bins - snr.squared_norm()) * (snr_norm ** 2.0)
if indices is None:
chisq = TimeSeries(chisq, delta_t=snr.delta_t, epoch=snr.start_time, copy=False)
if return_bins:
return chisq, bin_snrs
else:
return chisq
def fastest_power_chisq_at_points(corr, snr, snrv, snr_norm, bins, indices):
"""Calculate the chisq values for only selected points.
This function looks at the number of points to be evaluated and selects
the fastest method (FFT, or direct time shift and sum). In either case,
only the selected points are returned.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: Array
The unnormalized snr
snr_norm: float
The snr normalization factor --- EXPLAINME
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `snr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points.
"""
import pycbc.scheme
if isinstance(pycbc.scheme.mgr.state, pycbc.scheme.CPUScheme):
# We don't have that many points so do the direct time shift.
return power_chisq_at_points_from_precomputed(corr, snrv,
snr_norm, bins, indices)
else:
# We have a lot of points so it is faster to use the fourier transform
return power_chisq_from_precomputed(corr, snr, snr_norm, bins,
indices=indices)
def power_chisq(template, data, num_bins, psd,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
return_bins=False):
"""Calculate the chisq timeseries
Parameters
----------
template: FrequencySeries or TimeSeries
A time or frequency series that contains the filter template.
data: FrequencySeries or TimeSeries
A time or frequency series that contains the data to filter. The length
must be commensurate with the template.
(EXPLAINME - does this mean 'the same as' or something else?)
num_bins: int
The number of frequency bins used for chisq. The number of statistical
degrees of freedom ('dof') is 2*num_bins-2.
psd: FrequencySeries
The psd of the data.
low_frequency_cutoff: {None, float}, optional
The low frequency cutoff for the filter
high_frequency_cutoff: {None, float}, optional
The high frequency cutoff for the filter
return_bins: {boolean, False}, optional
Return a list of the individual chisq bins
Returns
-------
chisq: TimeSeries
TimeSeries containing the chisq values for all times.
"""
htilde = make_frequency_series(template)
stilde = make_frequency_series(data)
bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff,
high_frequency_cutoff)
corra = zeros((len(htilde) - 1) * 2, dtype=htilde.dtype)
total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd,
low_frequency_cutoff, high_frequency_cutoff,
corr_out=corra)
return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
class SingleDetPowerChisq(object):
"""Class that handles precomputation and memory management for efficiently
running the power chisq in a single detector inspiral analysis.
"""
def __init__(self, num_bins=0, snr_threshold=None):
if not (num_bins == "0" or num_bins == 0):
self.do = True
self.column_name = "chisq"
self.table_dof_name = "chisq_dof"
self.num_bins = num_bins
else:
self.do = False
self.snr_threshold = snr_threshold
@staticmethod
def parse_option(row, arg):
safe_dict = {'max': max, 'min': min}
safe_dict.update(row.__dict__)
safe_dict.update(math.__dict__)
safe_dict.update(pycbc.pnutils.__dict__)
return eval(arg, {"__builtins__":None}, safe_dict)
def cached_chisq_bins(self, template, psd):
from pycbc.opt import LimitedSizeDict
key = id(psd)
if not hasattr(psd, '_chisq_cached_key'):
psd._chisq_cached_key = {}
if not hasattr(template, '_bin_cache'):
template._bin_cache = LimitedSizeDict(size_limit=2**2)
if key not in template._bin_cache or id(template.params) not in psd._chisq_cached_key:
psd._chisq_cached_key[id(template.params)] = True
num_bins = int(self.parse_option(template, self.num_bins))
if hasattr(psd, 'sigmasq_vec') and \
template.approximant in psd.sigmasq_vec:
kmin = int(template.f_lower / psd.delta_f)
kmax = template.end_idx
bins = power_chisq_bins_from_sigmasq_series(
psd.sigmasq_vec[template.approximant],
num_bins,
kmin,
kmax
)
else:
bins = power_chisq_bins(template, num_bins, psd, template.f_lower)
template._bin_cache[key] = bins
return template._bin_cache[key]
def values(self, corr, snrv, snr_norm, psd, indices, template):
""" Calculate the chisq at points given by indices.
Returns
-------
chisq: Array
Chisq values, one for each sample index, or zero for points below
the specified SNR threshold
chisq_dof: Array
Number of statistical degrees of freedom for the chisq test
in the given template, equal to 2 * num_bins - 2
"""
if self.do:
num_above = len(indices)
if self.snr_threshold:
above = abs(snrv * snr_norm) > self.snr_threshold
num_above = above.sum()
logging.info('%s above chisq activation threshold' % num_above)
above_indices = indices[above]
above_snrv = snrv[above]
chisq_out = numpy.zeros(len(indices), dtype=numpy.float32)
dof = -100
else:
above_indices = indices
above_snrv = snrv
if num_above > 0:
bins = self.cached_chisq_bins(template, psd)
# len(bins) is number of bin edges, num_bins = len(bins) - 1
dof = (len(bins) - 1) * 2 - 2
_chisq = power_chisq_at_points_from_precomputed(corr,
above_snrv, snr_norm, bins, above_indices)
if self.snr_threshold:
if num_above > 0:
chisq_out[above] = _chisq
else:
chisq_out = _chisq
return chisq_out, numpy.repeat(dof, len(indices))# dof * numpy.ones_like(indices)
else:
return None, None
class SingleDetSkyMaxPowerChisq(SingleDetPowerChisq):
"""Class that handles precomputation and memory management for efficiently
running the power chisq in a single detector inspiral analysis when
maximizing analytically over sky location.
"""
def __init__(self, **kwds):
super(SingleDetSkyMaxPowerChisq, self).__init__(**kwds)
self.template_mem = None
self.corr_mem = None
def calculate_chisq_bins(self, template, psd):
""" Obtain the chisq bins for this template and PSD.
"""
num_bins = int(self.parse_option(template, self.num_bins))
if hasattr(psd, 'sigmasq_vec') and \
template.approximant in psd.sigmasq_vec:
kmin = int(template.f_lower / psd.delta_f)
kmax = template.end_idx
bins = power_chisq_bins_from_sigmasq_series(
psd.sigmasq_vec[template.approximant], num_bins, kmin, kmax)
else:
bins = power_chisq_bins(template, num_bins, psd, template.f_lower)
return bins
def values(self, corr_plus, corr_cross, snrv, psd,
indices, template_plus, template_cross, u_vals,
hplus_cross_corr, hpnorm, hcnorm):
""" Calculate the chisq at points given by indices.
Returns
-------
chisq: Array
Chisq values, one for each sample index
chisq_dof: Array
Number of statistical degrees of freedom for the chisq test
in the given template
"""
if self.do:
num_above = len(indices)
if self.snr_threshold:
above = abs(snrv) > self.snr_threshold
num_above = above.sum()
logging.info('%s above chisq activation threshold' % num_above)
above_indices = indices[above]
above_snrv = snrv[above]
u_vals = u_vals[above]
rchisq = numpy.zeros(len(indices), dtype=numpy.float32)
dof = -100
else:
above_indices = indices
above_snrv = snrv
if num_above > 0:
chisq = []
curr_tmplt_mult_fac = 0.
curr_corr_mult_fac = 0.
if self.template_mem is None or \
(not len(self.template_mem) == len(template_plus)):
self.template_mem = zeros(len(template_plus),
dtype=complex_same_precision_as(corr_plus))
if self.corr_mem is None or \
(not len(self.corr_mem) == len(corr_plus)):
self.corr_mem = zeros(len(corr_plus),
dtype=complex_same_precision_as(corr_plus))
tmplt_data = template_cross.data
corr_data = corr_cross.data
numpy.copyto(self.template_mem.data, template_cross.data)
numpy.copyto(self.corr_mem.data, corr_cross.data)
template_cross._data = self.template_mem.data
corr_cross._data = self.corr_mem.data
for lidx, index in enumerate(above_indices):
above_local_indices = numpy.array([index])
above_local_snr = numpy.array([above_snrv[lidx]])
local_u_val = u_vals[lidx]
# Construct template from _plus and _cross
# Note that this modifies in place, so we store that and
# revert on the next pass.
template = template_cross.multiply_and_add(template_plus,
local_u_val-curr_tmplt_mult_fac)
curr_tmplt_mult_fac = local_u_val
template.f_lower = template_plus.f_lower
template.params = template_plus.params
# Construct the corr vector
norm_fac = local_u_val*local_u_val + 1
norm_fac += 2 * local_u_val * hplus_cross_corr
norm_fac = hcnorm / (norm_fac**0.5)
hp_fac = local_u_val * hpnorm / hcnorm
corr = corr_cross.multiply_and_add(corr_plus,
hp_fac - curr_corr_mult_fac)
curr_corr_mult_fac = hp_fac
bins = self.calculate_chisq_bins(template, psd)
dof = (len(bins) - 1) * 2 - 2
curr_chisq = power_chisq_at_points_from_precomputed(corr,
above_local_snr/ norm_fac, norm_fac,
bins, above_local_indices)
chisq.append(curr_chisq[0])
chisq = numpy.array(chisq)
# Must reset corr and template to original values!
template_cross._data = tmplt_data
corr_cross._data = corr_data
if self.snr_threshold:
if num_above > 0:
rchisq[above] = chisq
else:
rchisq = chisq
return rchisq, numpy.repeat(dof, len(indices))# dof * numpy.ones_like(indices)
else:
return None, None
| 19,668
| 36.971042
| 103
|
py
|
pycbc
|
pycbc-master/pycbc/vetoes/__init__.py
|
from .chisq import *
from .bank_chisq import *
from .autochisq import *
| 72
| 17.25
| 25
|
py
|
pycbc
|
pycbc-master/pycbc/vetoes/autochisq.py
|
# Copyright (C) 2013 Stas Babak
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from pycbc.filter import make_frequency_series
from pycbc.filter import matched_filter_core
from pycbc.types import Array
import numpy as np
import logging
BACKEND_PREFIX="pycbc.vetoes.autochisq_"
def autochisq_from_precomputed(sn, corr_sn, hautocorr, indices,
stride=1, num_points=None, oneside=None,
twophase=True, maxvalued=False):
"""
Compute correlation (two sided) between template and data
and compares with autocorrelation of the template: C(t) = IFFT(A*A/S(f))
Parameters
----------
sn: Array[complex]
normalized (!) array of complex snr for the template that produced the
trigger(s) being tested
corr_sn : Array[complex]
normalized (!) array of complex snr for the template that you want to
produce a correlation chisq test for. In the [common] case that sn and
corr_sn are the same, you are computing auto-correlation chisq.
hautocorr: Array[complex]
time domain autocorrelation for the template
indices: Array[int]
compute correlation chisquare at the points specified in this array,
num_points: [int, optional; default=None]
Number of points used for autochisq on each side, if None all points
are used.
stride: [int, optional; default = 1]
stride for points selection for autochisq
total length <= 2*num_points*stride
oneside: [str, optional; default=None]
whether to use one or two sided autochisquare. If None (or not
provided) twosided chi-squared will be used. If given, options are
'left' or 'right', to do one-sided chi-squared on the left or right.
twophase: Boolean, optional; default=True
If True calculate the auto-chisq using both phases of the filter.
If False only use the phase of the obtained trigger(s).
maxvalued: Boolean, optional; default=False
Return the largest auto-chisq at any of the points tested if True.
If False, return the sum of auto-chisq at all points tested.
Returns
-------
autochisq: [tuple]
returns autochisq values and snr corresponding to the instances
of time defined by indices
"""
Nsnr = len(sn)
achisq = np.zeros(len(indices))
num_points_all = int(Nsnr/stride)
if num_points is None:
num_points = num_points_all
if (num_points > num_points_all):
num_points = num_points_all
snrabs = np.abs(sn[indices])
cphi_array = (sn[indices]).real / snrabs
sphi_array = (sn[indices]).imag / snrabs
start_point = - stride*num_points
end_point = stride*num_points+1
if oneside == 'left':
achisq_idx_list = np.arange(start_point, 0, stride)
elif oneside == 'right':
achisq_idx_list = np.arange(stride, end_point, stride)
else:
achisq_idx_list_pt1 = np.arange(start_point, 0, stride)
achisq_idx_list_pt2 = np.arange(stride, end_point, stride)
achisq_idx_list = np.append(achisq_idx_list_pt1,
achisq_idx_list_pt2)
hauto_corr_vec = hautocorr[achisq_idx_list]
hauto_norm = hauto_corr_vec.real*hauto_corr_vec.real
# REMOVE THIS LINE TO REPRODUCE OLD RESULTS
hauto_norm += hauto_corr_vec.imag*hauto_corr_vec.imag
chisq_norm = 1.0 - hauto_norm
for ip,ind in enumerate(indices):
curr_achisq_idx_list = achisq_idx_list + ind
cphi = cphi_array[ip]
sphi = sphi_array[ip]
# By construction, the other "phase" of the SNR is 0
snr_ind = sn[ind].real*cphi + sn[ind].imag*sphi
# Wrap index if needed (maybe should fail in this case?)
if curr_achisq_idx_list[0] < 0:
curr_achisq_idx_list[curr_achisq_idx_list < 0] += Nsnr
if curr_achisq_idx_list[-1] > (Nsnr - 1):
curr_achisq_idx_list[curr_achisq_idx_list > (Nsnr-1)] -= Nsnr
z = corr_sn[curr_achisq_idx_list].real*cphi + \
corr_sn[curr_achisq_idx_list].imag*sphi
dz = z - hauto_corr_vec.real*snr_ind
curr_achisq_list = dz*dz/chisq_norm
if twophase:
chisq_norm = 1.0 - hauto_norm
z = -corr_sn[curr_achisq_idx_list].real*sphi + \
corr_sn[curr_achisq_idx_list].imag*cphi
dz = z - hauto_corr_vec.imag*snr_ind
curr_achisq_list += dz*dz/chisq_norm
if maxvalued:
achisq[ip] = curr_achisq_list.max()
else:
achisq[ip] = curr_achisq_list.sum()
dof = num_points
if oneside is None:
dof = dof * 2
if twophase:
dof = dof * 2
return dof, achisq, indices
class SingleDetAutoChisq(object):
"""Class that handles precomputation and memory management for efficiently
running the auto chisq in a single detector inspiral analysis.
"""
def __init__(self, stride, num_points, onesided=None, twophase=False,
reverse_template=False, take_maximum_value=False,
maximal_value_dof=None):
"""
Initialize autochisq calculation instance
Parameters
-----------
stride : int
Number of sample points between points at which auto-chisq is
calculated.
num_points : int
Number of sample points at which to calculate auto-chisq in each
direction from the trigger
onesided : optional, default=None, choices=['left','right']
If None (default), calculate auto-chisq in both directions from the
trigger. If left (backwards in time) or right (forwards in time)
calculate auto-chisq only in that direction.
twophase : optional, default=False
If False calculate auto-chisq using only the phase of the trigger.
If True, compare also against the orthogonal phase.
reverse_template : optional, default=False
If true, time-reverse the template before calculating auto-chisq.
In this case this is more of a cross-correlation chisq than auto.
take_maximum_value : optional, default=False
If provided, instead of adding the auto-chisq value at each sample
point tested, return only the maximum value.
maximal_value_dof : int, required if using take_maximum_value
If using take_maximum_value the expected value is not known. This
value specifies what to store in the cont_chisq_dof output.
"""
if stride > 0:
self.do = True
self.column_name = "cont_chisq"
self.table_dof_name = "cont_chisq_dof"
self.dof = num_points
self.num_points = num_points
self.stride = stride
self.one_sided = onesided
if (onesided is not None):
self.dof = self.dof * 2
self.two_phase = twophase
if self.two_phase:
self.dof = self.dof * 2
self.reverse_template = reverse_template
self.take_maximum_value=take_maximum_value
if self.take_maximum_value:
if maximal_value_dof is None:
err_msg = "Must provide the maximal_value_dof keyword "
err_msg += "argument if using the take_maximum_value "
err_msg += "option."
raise ValueError(err_msg)
self.dof = maximal_value_dof
self._autocor = None
self._autocor_id = None
else:
self.do = False
def values(self, sn, indices, template, psd, norm, stilde=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
"""
Calculate the auto-chisq at the specified indices.
Parameters
-----------
sn : Array[complex]
SNR time series of the template for which auto-chisq is being
computed. Provided unnormalized.
indices : Array[int]
List of points at which to calculate auto-chisq
template : Pycbc template object
The template for which we are calculating auto-chisq
psd : Pycbc psd object
The PSD of the data being analysed
norm : float
The normalization factor to apply to sn
stilde : Pycbc data object, needed if using reverse-template
The data being analysed. Only needed if using reverse-template,
otherwise ignored
low_frequency_cutoff : float
The lower frequency to consider in matched-filters
high_frequency_cutoff : float
The upper frequency to consider in matched-filters
"""
if self.do and (len(indices) > 0):
htilde = make_frequency_series(template)
# Check if we need to recompute the autocorrelation
key = (id(template), id(psd))
if key != self._autocor_id:
logging.info("Calculating autocorrelation")
if not self.reverse_template:
Pt, _, P_norm = matched_filter_core(htilde,
htilde, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
Pt = Pt * (1./ Pt[0])
self._autocor = Array(Pt, copy=True)
else:
Pt, _, P_norm = matched_filter_core(htilde.conj(),
htilde, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
# T-reversed template has same norm as forward template
# so we can normalize using that
# FIXME: Here sigmasq has to be cast to a float or the
# code is really slow ... why??
norm_fac = P_norm / float(((template.sigmasq(psd))**0.5))
Pt *= norm_fac
self._autocor = Array(Pt, copy=True)
self._autocor_id = key
logging.info("...Calculating autochisquare")
sn = sn*norm
if self.reverse_template:
assert(stilde is not None)
asn, _, ahnrm = matched_filter_core(htilde.conj(), stilde,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff,
h_norm=template.sigmasq(psd))
correlation_snr = asn * ahnrm
else:
correlation_snr = sn
achi_list = np.array([])
index_list = np.array(indices)
dof, achi_list, _ = autochisq_from_precomputed(sn, correlation_snr,
self._autocor, index_list, stride=self.stride,
num_points=self.num_points,
oneside=self.one_sided, twophase=self.two_phase,
maxvalued=self.take_maximum_value)
self.dof = dof
return achi_list
class SingleDetSkyMaxAutoChisq(SingleDetAutoChisq):
"""Stub for precessing auto chisq if anyone ever wants to code it up.
"""
def __init__(self, *args, **kwds):
super(SingleDetSkyMaxAutoChisq, self).__init__(*args, **kwds)
def values(self, *args, **kwargs):
if self.do:
err_msg = "Precessing single detector sky-max auto chisq has not "
err_msg += "been written. If you want to use it, why not help "
err_msg += "write it?"
raise NotImplementedError(err_msg)
else:
return None
| 12,524
| 41.74744
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/vetoes/sgchisq.py
|
"""Chisq based on sine-gaussian tiles.
See https://arxiv.org/abs/1709.08974 for a discussion.
"""
import numpy
from pycbc.waveform.utils import apply_fseries_time_shift
from pycbc.filter import sigma
from pycbc.waveform import sinegauss
from pycbc.vetoes.chisq import SingleDetPowerChisq
from pycbc.events import ranking
class SingleDetSGChisq(SingleDetPowerChisq):
"""Class that handles precomputation and memory management for efficiently
running the sine-Gaussian chisq
"""
returns = {'sg_chisq': numpy.float32}
def __init__(self, bank, num_bins=0,
snr_threshold=None,
chisq_locations=None):
""" Create sine-Gaussian Chisq Calculator
Parameters
----------
bank: pycbc.waveform.TemplateBank
The template bank that will be processed.
num_bins: str
The string determining the number of power chisq bins
snr_threshold: float
The threshold to calculate the sine-Gaussian chisq
chisq_locations: list of strs
List of strings which detail where to place a sine-Gaussian.
The format is 'region-boolean:q1-offset1,q2-offset2'.
The offset is relative to the end frequency of the approximant.
The region is a boolean expression such as 'mtotal>40' indicating
which templates to apply this set of sine-Gaussians to.
"""
if snr_threshold is not None:
self.do = True
self.num_bins = num_bins
self.snr_threshold = snr_threshold
self.params = {}
for descr in chisq_locations:
region, values = descr.split(":")
mask = bank.table.parse_boolargs([(1, region), (0, 'else')])[0]
hashes = bank.table['template_hash'][mask.astype(bool)]
for h in hashes:
self.params[h] = values
else:
self.do = False
@staticmethod
def insert_option_group(parser):
group = parser.add_argument_group("Sine-Gaussian Chisq")
group.add_argument("--sgchisq-snr-threshold", type=float,
help="Minimum SNR threshold to use SG chisq")
group.add_argument("--sgchisq-locations", type=str, nargs='+',
help="Frequency offsets and quality factors of the sine-Gaussians"
" to use, format 'region-boolean:q1-offset1,q2-offset2'. "
"Offset is relative to the end frequency of the approximant."
" Region is a boolean expression selecting templates to "
"apply the sine-Gaussians to, ex. 'mtotal>40'")
@classmethod
def from_cli(cls, args, bank, chisq_bins):
return cls(bank, chisq_bins,
args.sgchisq_snr_threshold,
args.sgchisq_locations)
def values(self, stilde, template, psd, snrv, snr_norm,
bchisq, bchisq_dof, indices):
""" Calculate sine-Gaussian chisq
Parameters
----------
stilde: pycbc.types.Frequencyseries
The overwhitened strain
template: pycbc.types.Frequencyseries
The waveform template being analyzed
psd: pycbc.types.Frequencyseries
The power spectral density of the data
snrv: numpy.ndarray
The peak unnormalized complex SNR values
snr_norm: float
The normalization factor for the snr
bchisq: numpy.ndarray
The Bruce Allen power chisq values for these triggers
bchisq_dof: numpy.ndarray
The degrees of freedom of the Bruce chisq
indics: numpy.ndarray
The indices of the snr peaks.
Returns
-------
chisq: Array
Chisq values, one for each sample index
"""
if not self.do:
return None
if template.params.template_hash not in self.params:
return numpy.ones(len(snrv))
values = self.params[template.params.template_hash].split(',')
# Get the chisq bins to use as the frequency reference point
bins = self.cached_chisq_bins(template, psd)
# This is implemented slowly, so let's not call it often, OK?
chisq = numpy.ones(len(snrv))
for i, snrvi in enumerate(snrv):
#Skip if newsnr too low
snr = abs(snrvi * snr_norm)
nsnr = ranking.newsnr(snr, bchisq[i] / bchisq_dof[i])
if nsnr < self.snr_threshold:
continue
N = (len(template) - 1) * 2
dt = 1.0 / (N * template.delta_f)
kmin = int(template.f_lower / psd.delta_f)
time = float(template.epoch) + dt * indices[i]
# Shift the time of interest to be centered on 0
stilde_shift = apply_fseries_time_shift(stilde, -time)
# Only apply the sine-Gaussian in a +-50 Hz range around the
# central frequency
qwindow = 50
chisq[i] = 0
# Estimate the maximum frequency up to which the waveform has
# power by approximating power per frequency
# as constant over the last 2 chisq bins. We cannot use the final
# chisq bin edge as it does not have to be where the waveform
# terminates.
fstep = (bins[-2] - bins[-3])
fpeak = (bins[-2] + fstep) * template.delta_f
# This is 90% of the Nyquist frequency of the data
# This allows us to avoid issues near Nyquist due to resample
# Filtering
fstop = len(stilde) * stilde.delta_f * 0.9
dof = 0
# Calculate the sum of SNR^2 for the sine-Gaussians specified
for descr in values:
# Get the q and frequency offset from the descriptor
q, offset = descr.split('-')
q, offset = float(q), float(offset)
fcen = fpeak + offset
flow = max(kmin * template.delta_f, fcen - qwindow)
fhigh = fcen + qwindow
# If any sine-gaussian tile has an upper frequency near
# nyquist return 1 instead.
if fhigh > fstop:
return numpy.ones(len(snrv))
kmin = int(flow / template.delta_f)
kmax = int(fhigh / template.delta_f)
#Calculate sine-gaussian tile
gtem = sinegauss.fd_sine_gaussian(1.0, q, fcen, flow,
len(template) * template.delta_f,
template.delta_f).astype(numpy.complex64)
gsigma = sigma(gtem, psd=psd,
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
#Calculate the SNR of the tile
gsnr = (gtem[kmin:kmax] * stilde_shift[kmin:kmax]).sum()
gsnr *= 4.0 * gtem.delta_f / gsigma
chisq[i] += abs(gsnr)**2.0
dof += 2
if dof == 0:
chisq[i] = 1
else:
chisq[i] /= dof
return chisq
| 7,224
| 39.363128
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/strain/recalibrate.py
|
""" Classes and functions for adjusting strain data.
"""
# Copyright (C) 2015 Ben Lackey, Christopher M. Biwer,
# Daniel Finstad, Colm Talbot, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from abc import (ABCMeta, abstractmethod)
import numpy as np
from scipy.interpolate import UnivariateSpline
from pycbc.types import FrequencySeries
class Recalibrate(metaclass=ABCMeta):
""" Base class for modifying calibration """
name = None
def __init__(self, ifo_name):
self.ifo_name = ifo_name
self.params = dict()
@abstractmethod
def apply_calibration(self, strain):
"""Apply calibration model
This method should be overwritten by subclasses
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
return
def map_to_adjust(self, strain, prefix='recalib_', **params):
"""Map an input dictionary of sampling parameters to the
adjust_strain function by filtering the dictionary for the
calibration parameters, then calling adjust_strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
prefix: str
Prefix for calibration parameter names
params : dict
Dictionary of sampling parameters which includes
calibration parameters.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
self.params.update({
key[len(prefix):]: params[key]
for key in params if prefix in key and self.ifo_name in key})
strain_adjusted = self.apply_calibration(strain)
return strain_adjusted
@classmethod
def from_config(cls, cp, ifo, section):
"""Read a config file to get calibration options and transfer
functions which will be used to intialize the model.
Parameters
----------
cp : WorkflowConfigParser
An open config file.
ifo : string
The detector (H1, L1) for which the calibration model will
be loaded.
section : string
The section name in the config file from which to retrieve
the calibration options.
Return
------
instance
An instance of the class.
"""
all_params = dict(cp.items(section))
params = {key[len(ifo)+1:]: all_params[key]
for key in all_params if ifo.lower() in key}
params = {key: params[key] for key in params}
params.pop('model')
params['ifo_name'] = ifo.lower()
return cls(**params)
class CubicSpline(Recalibrate):
"""Cubic spline recalibration
see https://dcc.ligo.org/LIGO-T1400682/public
This assumes the spline points follow
np.logspace(np.log(minimum_frequency), np.log(maximum_frequency),
n_points)
Parameters
----------
minimum_frequency: float
minimum frequency of spline points
maximum_frequency: float
maximum frequency of spline points
n_points: int
number of spline points
"""
name = 'cubic_spline'
def __init__(self, minimum_frequency, maximum_frequency, n_points,
ifo_name):
Recalibrate.__init__(self, ifo_name=ifo_name)
minimum_frequency = float(minimum_frequency)
maximum_frequency = float(maximum_frequency)
n_points = int(n_points)
if n_points < 4:
raise ValueError(
'Use at least 4 spline points for calibration model')
self.n_points = n_points
self.spline_points = np.logspace(np.log10(minimum_frequency),
np.log10(maximum_frequency), n_points)
def apply_calibration(self, strain):
"""Apply calibration model
This applies cubic spline calibration to the strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
amplitude_parameters =\
[self.params['amplitude_{}_{}'.format(self.ifo_name, ii)]
for ii in range(self.n_points)]
amplitude_spline = UnivariateSpline(self.spline_points,
amplitude_parameters)
delta_amplitude = amplitude_spline(strain.sample_frequencies.numpy())
phase_parameters =\
[self.params['phase_{}_{}'.format(self.ifo_name, ii)]
for ii in range(self.n_points)]
phase_spline = UnivariateSpline(self.spline_points, phase_parameters)
delta_phase = phase_spline(strain.sample_frequencies.numpy())
strain_adjusted = strain * (1.0 + delta_amplitude)\
* (2.0 + 1j * delta_phase) / (2.0 - 1j * delta_phase)
return strain_adjusted
class PhysicalModel(object):
""" Class for adjusting time-varying calibration parameters of given
strain data.
Parameters
----------
strain : FrequencySeries
The strain to be adjusted.
freq : array
The frequencies corresponding to the values of c0, d0, a0 in Hertz.
fc0 : float
Coupled-cavity (CC) pole at time t0, when c0=c(t0) and a0=a(t0) are
measured.
c0 : array
Initial sensing function at t0 for the frequencies.
d0 : array
Digital filter for the frequencies.
a_tst0 : array
Initial actuation function for the test mass at t0 for the
frequencies.
a_pu0 : array
Initial actuation function for the penultimate mass at t0 for the
frequencies.
fs0 : float
Initial spring frequency at t0 for the signal recycling cavity.
qinv0 : float
Initial inverse quality factor at t0 for the signal recycling
cavity.
"""
name = 'physical_model'
def __init__(self, freq=None, fc0=None, c0=None, d0=None,
a_tst0=None, a_pu0=None, fs0=None, qinv0=None):
self.freq = np.real(freq)
self.c0 = c0
self.d0 = d0
self.a_tst0 = a_tst0
self.a_pu0 = a_pu0
self.fc0 = float(fc0)
self.fs0 = float(fs0)
self.qinv0 = float(qinv0)
# initial detuning at time t0
init_detuning = self.freq**2 / (self.freq**2 - 1.0j * self.freq * \
self.fs0 * self.qinv0 + self.fs0**2)
# initial open loop gain
self.g0 = self.c0 * self.d0 * (self.a_tst0 + self.a_pu0)
# initial response function
self.r0 = (1.0 + self.g0) / self.c0
# residual of c0 after factoring out the coupled cavity pole fc0
self.c_res = self.c0 * (1 + 1.0j * self.freq / self.fc0)/init_detuning
def update_c(self, fs=None, qinv=None, fc=None, kappa_c=1.0):
""" Calculate the sensing function c(f,t) given the new parameters
kappa_c(t), kappa_a(t), f_c(t), fs, and qinv.
Parameters
----------
fc : float
Coupled-cavity (CC) pole at time t.
kappa_c : float
Scalar correction factor for sensing function at time t.
fs : float
Spring frequency for signal recycling cavity.
qinv : float
Inverse quality factor for signal recycling cavity.
Returns
-------
c : numpy.array
The new sensing function c(f,t).
"""
detuning_term = self.freq**2 / (self.freq**2 - 1.0j *self.freq*fs * \
qinv + fs**2)
return self.c_res * kappa_c / (1 + 1.0j * self.freq/fc)*detuning_term
def update_g(self, fs=None, qinv=None, fc=None, kappa_tst_re=1.0,
kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0,
kappa_c=1.0):
""" Calculate the open loop gain g(f,t) given the new parameters
kappa_c(t), kappa_a(t), f_c(t), fs, and qinv.
Parameters
----------
fc : float
Coupled-cavity (CC) pole at time t.
kappa_c : float
Scalar correction factor for sensing function c at time t.
kappa_tst_re : float
Real part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_re : float
Real part of scalar correction factor for actuation function
a_pu0 at time t.
kappa_tst_im : float
Imaginary part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_im : float
Imaginary part of scalar correction factor for actuation function
a_pu0 at time t.
fs : float
Spring frequency for signal recycling cavity.
qinv : float
Inverse quality factor for signal recycling cavity.
Returns
-------
g : numpy.array
The new open loop gain g(f,t).
"""
c = self.update_c(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c)
a_tst = self.a_tst0 * (kappa_tst_re + 1.0j * kappa_tst_im)
a_pu = self.a_pu0 * (kappa_pu_re + 1.0j * kappa_pu_im)
return c * self.d0 * (a_tst + a_pu)
def update_r(self, fs=None, qinv=None, fc=None, kappa_c=1.0,
kappa_tst_re=1.0, kappa_tst_im=0.0, kappa_pu_re=1.0,
kappa_pu_im=0.0):
""" Calculate the response function R(f,t) given the new parameters
kappa_c(t), kappa_a(t), f_c(t), fs, and qinv.
Parameters
----------
fc : float
Coupled-cavity (CC) pole at time t.
kappa_c : float
Scalar correction factor for sensing function c at time t.
kappa_tst_re : float
Real part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_re : float
Real part of scalar correction factor for actuation function
a_pu0 at time t.
kappa_tst_im : float
Imaginary part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_im : float
Imaginary part of scalar correction factor for actuation function
a_pu0 at time t.
fs : float
Spring frequency for signal recycling cavity.
qinv : float
Inverse quality factor for signal recycling cavity.
Returns
-------
r : numpy.array
The new response function r(f,t).
"""
c = self.update_c(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c)
g = self.update_g(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c,
kappa_tst_re=kappa_tst_re,
kappa_tst_im=kappa_tst_im,
kappa_pu_re=kappa_pu_re, kappa_pu_im=kappa_pu_im)
return (1.0 + g) / c
def adjust_strain(self, strain, delta_fs=None, delta_qinv=None,
delta_fc=None, kappa_c=1.0, kappa_tst_re=1.0,
kappa_tst_im=0.0, kappa_pu_re=1.0, kappa_pu_im=0.0):
"""Adjust the FrequencySeries strain by changing the time-dependent
calibration parameters kappa_c(t), kappa_a(t), f_c(t), fs, and qinv.
Parameters
----------
strain : FrequencySeries
The strain data to be adjusted.
delta_fc : float
Change in coupled-cavity (CC) pole at time t.
kappa_c : float
Scalar correction factor for sensing function c0 at time t.
kappa_tst_re : float
Real part of scalar correction factor for actuation function
A_{tst0} at time t.
kappa_tst_im : float
Imaginary part of scalar correction factor for actuation function
A_tst0 at time t.
kappa_pu_re : float
Real part of scalar correction factor for actuation function
A_{pu0} at time t.
kappa_pu_im : float
Imaginary part of scalar correction factor for actuation function
A_{pu0} at time t.
fs : float
Spring frequency for signal recycling cavity.
qinv : float
Inverse quality factor for signal recycling cavity.
Returns
-------
strain_adjusted : FrequencySeries
The adjusted strain.
"""
fc = self.fc0 + delta_fc if delta_fc else self.fc0
fs = self.fs0 + delta_fs if delta_fs else self.fs0
qinv = self.qinv0 + delta_qinv if delta_qinv else self.qinv0
# calculate adjusted response function
r_adjusted = self.update_r(fs=fs, qinv=qinv, fc=fc, kappa_c=kappa_c,
kappa_tst_re=kappa_tst_re,
kappa_tst_im=kappa_tst_im,
kappa_pu_re=kappa_pu_re,
kappa_pu_im=kappa_pu_im)
# calculate error function
k = r_adjusted / self.r0
# decompose into amplitude and unwrapped phase
k_amp = np.abs(k)
k_phase = np.unwrap(np.angle(k))
# convert to FrequencySeries by interpolating then resampling
order = 1
k_amp_off = UnivariateSpline(self.freq, k_amp, k=order, s=0)
k_phase_off = UnivariateSpline(self.freq, k_phase, k=order, s=0)
freq_even = strain.sample_frequencies.numpy()
k_even_sample = k_amp_off(freq_even) * \
np.exp(1.0j * k_phase_off(freq_even))
strain_adjusted = FrequencySeries(strain.numpy() * \
k_even_sample,
delta_f=strain.delta_f)
return strain_adjusted
@classmethod
def tf_from_file(cls, path, delimiter=" "):
"""Convert the contents of a file with the columns
[freq, real(h), imag(h)] to a numpy.array with columns
[freq, real(h)+j*imag(h)].
Parameters
----------
path : string
delimiter : {" ", string}
Return
------
numpy.array
"""
data = np.loadtxt(path, delimiter=delimiter)
freq = data[:, 0]
h = data[:, 1] + 1.0j * data[:, 2]
return np.array([freq, h]).transpose()
@classmethod
def from_config(cls, cp, ifo, section):
"""Read a config file to get calibration options and transfer
functions which will be used to intialize the model.
Parameters
----------
cp : WorkflowConfigParser
An open config file.
ifo : string
The detector (H1, L1) for which the calibration model will
be loaded.
section : string
The section name in the config file from which to retrieve
the calibration options.
Return
------
instance
An instance of the Recalibrate class.
"""
# read transfer functions
tfs = []
tf_names = ["a-tst", "a-pu", "c", "d"]
for tag in ['-'.join([ifo, "transfer-function", name])
for name in tf_names]:
tf_path = cp.get_opt_tag(section, tag, None)
tfs.append(cls.tf_from_file(tf_path))
a_tst0 = tfs[0][:, 1]
a_pu0 = tfs[1][:, 1]
c0 = tfs[2][:, 1]
d0 = tfs[3][:, 1]
freq = tfs[0][:, 0]
# if upper stage actuation is included, read that in and add it
# to a_pu0
uim_tag = '-'.join([ifo, 'transfer-function-a-uim'])
if cp.has_option(section, uim_tag):
tf_path = cp.get_opt_tag(section, uim_tag, None)
a_pu0 += cls.tf_from_file(tf_path)[:, 1]
# read fc0, fs0, and qinv0
fc0 = cp.get_opt_tag(section, '-'.join([ifo, "fc0"]), None)
fs0 = cp.get_opt_tag(section, '-'.join([ifo, "fs0"]), None)
qinv0 = cp.get_opt_tag(section, '-'.join([ifo, "qinv0"]), None)
return cls(freq=freq, fc0=fc0, c0=c0, d0=d0, a_tst0=a_tst0,
a_pu0=a_pu0, fs0=fs0, qinv0=qinv0)
def map_to_adjust(self, strain, **params):
"""Map an input dictionary of sampling parameters to the
adjust_strain function by filtering the dictionary for the
calibration parameters, then calling adjust_strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
params : dict
Dictionary of sampling parameters which includes
calibration parameters.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
# calibration param names
arg_names = ['delta_fs', 'delta_fc', 'delta_qinv', 'kappa_c',
'kappa_tst_re', 'kappa_tst_im', 'kappa_pu_re',
'kappa_pu_im']
# calibration param labels as they exist in config files
arg_labels = [''.join(['calib_', name]) for name in arg_names]
# default values for calibration params
default_values = [0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0]
# make list of calibration param values
calib_args = []
for arg, val in zip(arg_labels, default_values):
if arg in params:
calib_args.append(params[arg])
else:
calib_args.append(val)
# adjust the strain using calibration param values
strain_adjusted = self.adjust_strain(strain, delta_fs=calib_args[0],
delta_fc=calib_args[1], delta_qinv=calib_args[2],
kappa_c=calib_args[3],
kappa_tst_re=calib_args[4],
kappa_tst_im=calib_args[5],
kappa_pu_re=calib_args[6],
kappa_pu_im=calib_args[7])
return strain_adjusted
| 18,783
| 35.473786
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/strain/strain.py
|
#Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions reading, generating, and segmenting strain data
"""
import copy
import logging, numpy
import functools
import pycbc.types
from pycbc.types import TimeSeries, zeros
from pycbc.types import Array, FrequencySeries
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import MultiDetOptionActionSpecial
from pycbc.types import DictOptionAction, MultiDetDictOptionAction
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
from pycbc.types import copy_opts_for_single_ifo, complex_same_precision_as
from pycbc.inject import InjectionSet, SGBurstInjectionSet
from pycbc.filter import resample_to_delta_t, lowpass, highpass, make_frequency_series
from pycbc.filter.zpk import filter_zpk
from pycbc.waveform.spa_tmplt import spa_distance
import pycbc.psd
from pycbc.fft import FFT, IFFT
import pycbc.events
import pycbc.frame
import pycbc.filter
from scipy.signal import kaiserord
def next_power_of_2(n):
"""Return the smallest integer power of 2 larger than the argument.
Parameters
----------
n : int
A positive integer.
Returns
-------
m : int
Smallest integer power of 2 larger than n.
"""
return 1 << n.bit_length()
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2.,
psd_avg_method='median', low_freq_cutoff=30.,
threshold=50., cluster_window=5., corrupt_time=4.,
high_freq_cutoff=None, output_intermediates=False):
"""Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
input series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermediates : {bool, False}
Save intermediate time series for debugging.
"""
if high_freq_cutoff:
strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff,
method='ldas')
else:
strain = strain.copy()
# taper strain
corrupt_length = int(corrupt_time * strain.sample_rate)
w = numpy.arange(corrupt_length) / float(corrupt_length)
strain[0:corrupt_length] *= pycbc.types.Array(w, dtype=strain.dtype)
strain[(len(strain) - corrupt_length):] *= \
pycbc.types.Array(w[::-1], dtype=strain.dtype)
if output_intermediates:
strain.save_to_wav('strain_conditioned.wav')
# zero-pad strain to a power-of-2 length
strain_pad_length = next_power_of_2(len(strain))
pad_start = int(strain_pad_length / 2 - len(strain) / 2)
pad_end = pad_start + len(strain)
pad_epoch = strain.start_time - pad_start / float(strain.sample_rate)
strain_pad = pycbc.types.TimeSeries(
pycbc.types.zeros(strain_pad_length, dtype=strain.dtype),
delta_t=strain.delta_t, copy=False, epoch=pad_epoch)
strain_pad[pad_start:pad_end] = strain[:]
# estimate the PSD
psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)],
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method,
require_exact_data_fit=False)
psd = pycbc.psd.interpolate(psd, 1. / strain_pad.duration)
psd = pycbc.psd.inverse_spectrum_truncation(
psd, int(psd_duration * strain.sample_rate),
low_frequency_cutoff=low_freq_cutoff,
trunc_method='hann')
kmin = int(low_freq_cutoff / psd.delta_f)
psd[0:kmin] = numpy.inf
if high_freq_cutoff:
kmax = int(high_freq_cutoff / psd.delta_f)
psd[kmax:] = numpy.inf
# whiten
strain_tilde = strain_pad.to_frequencyseries()
if high_freq_cutoff:
norm = high_freq_cutoff - low_freq_cutoff
else:
norm = strain.sample_rate / 2. - low_freq_cutoff
strain_tilde *= (psd * norm) ** (-0.5)
strain_pad = strain_tilde.to_timeseries()
if output_intermediates:
strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav')
mag = abs(strain_pad[pad_start:pad_end])
if output_intermediates:
mag.save('strain_whitened_mag.npy')
mag = mag.numpy()
# remove strain corrupted by filters at the ends
mag[0:corrupt_length] = 0
mag[-1:-corrupt_length-1:-1] = 0
# find peaks and their times
indices = numpy.where(mag > threshold)[0]
cluster_idx = pycbc.events.findchirp_cluster_over_window(
indices, numpy.array(mag[indices]),
int(cluster_window*strain.sample_rate))
times = [idx * strain.delta_t + strain.start_time \
for idx in indices[cluster_idx]]
return times
def from_cli(opt, dyn_range_fac=1, precision='single',
inj_filter_rejector=None):
"""Parses the CLI options related to strain data reading and conditioning.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, (frame-cache or frame-files), channel-name,
fake-strain, fake-strain-seed, fake-strain-from-file, gating_file).
dyn_range_fac : {float, 1}, optional
A large constant to reduce the dynamic range of the strain.
precision : string
Precision of the returned strain ('single' or 'double').
inj_filter_rejector : InjFilterRejector instance; optional, default=None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary.
Returns
-------
strain : TimeSeries
The time series containing the conditioned strain data.
"""
gating_info = {}
injector = InjectionSet.from_cli(opt)
if opt.frame_cache or opt.frame_files or opt.frame_type or opt.hdf_store:
if opt.frame_cache:
frame_source = opt.frame_cache
if opt.frame_files:
frame_source = opt.frame_files
logging.info("Reading Frames")
if hasattr(opt, 'frame_sieve') and opt.frame_sieve:
sieve = opt.frame_sieve
else:
sieve = None
if opt.frame_type:
strain = pycbc.frame.query_and_read_frame(
opt.frame_type, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.frame_files or opt.frame_cache:
strain = pycbc.frame.read_frame(
frame_source, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.hdf_store:
strain = pycbc.frame.read_store(opt.hdf_store, opt.channel_name,
opt.gps_start_time - opt.pad_data,
opt.gps_end_time + opt.pad_data)
elif opt.fake_strain or opt.fake_strain_from_file:
logging.info("Generating Fake Strain")
duration = opt.gps_end_time - opt.gps_start_time
duration += 2 * opt.pad_data
pdf = 1.0 / opt.fake_strain_filter_duration
fake_flow = opt.fake_strain_flow
fake_rate = opt.fake_strain_sample_rate
fake_extra_args = opt.fake_strain_extra_args
plen = round(opt.sample_rate / pdf) // 2 + 1
if opt.fake_strain_from_file:
logging.info("Reading ASD from file")
strain_psd = pycbc.psd.from_txt(opt.fake_strain_from_file,
plen, pdf,
fake_flow,
is_asd_file=True)
elif opt.fake_strain != 'zeroNoise':
logging.info("Making PSD for strain")
strain_psd = pycbc.psd.from_string(opt.fake_strain, plen, pdf,
fake_flow, **fake_extra_args)
if opt.fake_strain == 'zeroNoise':
logging.info("Making zero-noise time series")
strain = TimeSeries(pycbc.types.zeros(duration * fake_rate),
delta_t=1.0 / fake_rate,
epoch=opt.gps_start_time - opt.pad_data)
else:
logging.info("Making colored noise")
from pycbc.noise.reproduceable import colored_noise
strain = colored_noise(strain_psd,
opt.gps_start_time - opt.pad_data,
opt.gps_end_time + opt.pad_data,
seed=opt.fake_strain_seed,
sample_rate=fake_rate,
low_frequency_cutoff=fake_flow,
filter_duration=1.0/pdf)
if not strain.sample_rate_close(fake_rate):
err_msg = "Actual sample rate of generated data does not match "
err_msg += "that expected. Possible causes of this:\n"
err_msg += "The desired duration is not a multiple of delta_t. "
err_msg += "e.g. If using LISA with delta_t = 15 the duration "
err_msg += "must be a multiple of 15 seconds."
raise ValueError(err_msg)
if not opt.channel_name and (opt.injection_file \
or opt.sgburst_injection_file):
raise ValueError('Please provide channel names with the format '
'ifo:channel (e.g. H1:CALIB-STRAIN) to inject '
'simulated signals into fake strain')
if opt.zpk_z and opt.zpk_p and opt.zpk_k:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
logging.info("Applying zpk filter")
z = numpy.array(opt.zpk_z)
p = numpy.array(opt.zpk_p)
k = float(opt.zpk_k)
strain = filter_zpk(strain.astype(numpy.float64), z, p, k)
if opt.normalize_strain:
logging.info("Dividing strain by constant")
l = opt.normalize_strain
strain = strain / l
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if opt.sample_rate:
logging.info("Resampling data")
strain = resample_to_delta_t(strain,
1. / opt.sample_rate,
method='ldas')
if injector is not None:
logging.info("Applying injections")
injections = \
injector.apply(strain, opt.channel_name.split(':')[0],
distance_scale=opt.injection_scale_factor,
injection_sample_rate=opt.injection_sample_rate,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name.split(':')[0],
distance_scale=opt.injection_scale_factor)
if precision == 'single':
logging.info("Converting to float32")
strain = (strain * dyn_range_fac).astype(pycbc.types.float32)
elif precision == "double":
logging.info("Converting to float64")
strain = (strain * dyn_range_fac).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.gating_file is not None:
logging.info("Gating times contained in gating file")
gate_params = numpy.loadtxt(opt.gating_file)
if len(gate_params.shape) == 1:
gate_params = [gate_params]
for gate_time, gate_window, gate_taper in gate_params:
strain = strain.gate(gate_time, window=gate_window,
method=opt.gating_method,
copy=False,
taper_width=gate_taper)
gating_info['file'] = \
[gp for gp in gate_params \
if (gp[0] + gp[1] + gp[2] >= strain.start_time) \
and (gp[0] - gp[1] - gp[2] <= strain.end_time)]
if opt.autogating_threshold is not None:
gating_info['auto'] = []
for _ in range(opt.autogating_max_iterations):
glitch_times = detect_loud_glitches(
strain, threshold=opt.autogating_threshold,
cluster_window=opt.autogating_cluster,
low_freq_cutoff=opt.strain_high_pass,
corrupt_time=opt.pad_data + opt.autogating_pad)
gate_params = [[gt, opt.autogating_width, opt.autogating_taper]
for gt in glitch_times]
gating_info['auto'] += gate_params
for gate_time, gate_window, gate_taper in gate_params:
strain = strain.gate(gate_time, window=gate_window,
method=opt.gating_method,
copy=False,
taper_width=gate_taper)
if len(glitch_times) > 0:
logging.info('Autogating at %s',
', '.join(['%.3f' % gt
for gt in glitch_times]))
else:
break
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if opt.strain_low_pass:
logging.info("Lowpass Filtering")
strain = lowpass(strain, frequency=opt.strain_low_pass)
if hasattr(opt, 'witness_frame_type') and opt.witness_frame_type:
stilde = strain.to_frequencyseries()
import h5py
tf_file = h5py.File(opt.witness_tf_file)
for key in tf_file:
witness = pycbc.frame.query_and_read_frame(opt.witness_frame_type,
str(key),
start_time=strain.start_time,
end_time=strain.end_time)
witness = (witness * dyn_range_fac).astype(strain.dtype)
tf = pycbc.types.load_frequencyseries(opt.witness_tf_file,
group=key)
tf = tf.astype(stilde.dtype)
flen = int(opt.witness_filter_length * strain.sample_rate)
tf = pycbc.psd.interpolate(tf, stilde.delta_f)
tf_time = tf.to_timeseries()
window = Array(numpy.hanning(flen * 2), dtype=strain.dtype)
tf_time[0:flen] *= window[flen:]
tf_time[len(tf_time)-flen:] *= window[0:flen]
tf = tf_time.to_frequencyseries()
kmax = min(len(tf), len(stilde) - 1)
stilde[:kmax] -= tf[:kmax] * witness.to_frequencyseries()[:kmax]
strain = stilde.to_timeseries()
if opt.pad_data:
logging.info("Remove Padding")
start = int(opt.pad_data * strain.sample_rate)
end = int(len(strain) - strain.sample_rate * opt.pad_data)
strain = strain[start:end]
if opt.taper_data:
logging.info("Tapering data")
# Use auto-gating, a one-sided gate is a taper
pd_taper_window = opt.taper_data
gate_params = [(strain.start_time, 0., pd_taper_window)]
gate_params.append((strain.end_time, 0., pd_taper_window))
gate_data(strain, gate_params)
if injector is not None:
strain.injections = injections
strain.gating_info = gating_info
return strain
def from_cli_single_ifo(opt, ifo, inj_filter_rejector=None, **kwargs):
"""
Get the strain for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt,
inj_filter_rejector=inj_filter_rejector, **kwargs)
def from_cli_multi_ifos(opt, ifos, inj_filter_rejector_dict=None, **kwargs):
"""
Get the strain for all ifos when using the multi-detector CLI
"""
strain = {}
if inj_filter_rejector_dict is None:
inj_filter_rejector_dict = {ifo: None for ifo in ifos}
for ifo in ifos:
strain[ifo] = from_cli_single_ifo(opt, ifo,
inj_filter_rejector_dict[ifo], **kwargs)
return strain
def insert_strain_option_group(parser, gps_times=True):
""" Add strain-related options to the optparser object.
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group = parser.add_argument_group("Options for obtaining h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
" if the --psd-estimation option is given.")
# Required options
if gps_times:
data_reading_group.add_argument("--gps-start-time",
help="The gps start time of the data "
"(integer seconds)", type=int)
data_reading_group.add_argument("--gps-end-time",
help="The gps end time of the data "
"(integer seconds)", type=int)
data_reading_group.add_argument("--strain-high-pass", type=float,
help="High pass frequency")
data_reading_group.add_argument("--strain-low-pass", type=float,
help="Low pass frequency")
data_reading_group.add_argument("--pad-data", default=8,
help="Extra padding to remove highpass corruption "
"(integer seconds, default 8)", type=int)
data_reading_group.add_argument("--taper-data",
help="Taper ends of data to zero using the supplied length as a "
"window (integer seconds)", type=int, default=0)
data_reading_group.add_argument("--sample-rate", type=float,
help="The sample rate to use for h(t) generation (integer Hz)")
data_reading_group.add_argument("--channel-name", type=str,
help="The channel containing the gravitational strain data")
# Read from cache file
data_reading_group.add_argument("--frame-cache", type=str, nargs="+",
help="Cache file containing the frame locations.")
# Read from frame files
data_reading_group.add_argument("--frame-files",
type=str, nargs="+",
help="list of frame files")
# Read from hdf store file
data_reading_group.add_argument("--hdf-store",
type=str,
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group.add_argument("--frame-type",
type=str,
help="(optional), replaces frame-files. Use datafind "
"to get the needed frame file(s) of this type.")
# Filter frame files by URL
data_reading_group.add_argument("--frame-sieve",
type=str,
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
# Generate gaussian noise with given psd
data_reading_group.add_argument("--fake-strain",
help="Name of model PSD for generating fake gaussian noise.",
choices=pycbc.psd.get_psd_model_list() + ['zeroNoise'])
data_reading_group.add_argument("--fake-strain-extra-args",
nargs='+', action=DictOptionAction,
metavar='PARAM:VALUE', default={}, type=float,
help="(optional) Extra arguments passed to "
"the PSD models.")
data_reading_group.add_argument("--fake-strain-seed", type=int, default=0,
help="Seed value for the generation of fake colored"
" gaussian noise")
data_reading_group.add_argument("--fake-strain-from-file",
help="File containing ASD for generating fake noise from it.")
data_reading_group.add_argument("--fake-strain-flow",
default=1.0, type=float,
help="Low frequency cutoff of the fake strain")
data_reading_group.add_argument("--fake-strain-filter-duration",
default=128.0, type=float,
help="Duration in seconds of the fake data coloring filter")
data_reading_group.add_argument("--fake-strain-sample-rate",
default=16384, type=float,
help="Sample rate of the fake data generation")
# Injection options
data_reading_group.add_argument("--injection-file", type=str,
help="(optional) Injection file containing parameters"
" of CBC signals to be added to the strain")
data_reading_group.add_argument("--sgburst-injection-file", type=str,
help="(optional) Injection file containing parameters"
"of sine-Gaussian burst signals to add to the strain")
data_reading_group.add_argument("--injection-scale-factor", type=float,
default=1,
help="Divide injections by this factor "
"before adding to the strain data")
data_reading_group.add_argument("--injection-sample-rate", type=float,
help="Sample rate to use for injections (integer Hz). "
"Typically similar to the strain data sample rate."
"If not provided, the strain sample rate will be "
"used")
data_reading_group.add_argument("--injection-f-ref", type=float,
help="Reference frequency in Hz for creating CBC "
"injections from an XML file")
data_reading_group.add_argument("--injection-f-final", type=float,
help="Override the f_final field of a CBC XML "
"injection file (frequency in Hz)")
# Gating options
data_reading_group.add_argument("--gating-file", type=str,
help="(optional) Text file of gating segments to apply."
" Format of each line is (all values in seconds):"
" gps_time zeros_half_width pad_half_width")
data_reading_group.add_argument('--autogating-threshold', type=float,
metavar='SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group.add_argument('--autogating-cluster', type=float,
metavar='SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group.add_argument('--autogating-width', type=float,
metavar='SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group.add_argument('--autogating-taper', type=float,
metavar='SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group.add_argument('--autogating-pad', type=float,
metavar='SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group.add_argument('--gating-method', type=str,
default='taper',
help='Choose the method for gating. '
'Default: `taper`',
choices=['hard', 'taper', 'paint'])
# Optional
data_reading_group.add_argument("--normalize-strain", type=float,
help="(optional) Divide frame data by constant.")
data_reading_group.add_argument("--zpk-z", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group.add_argument("--zpk-p", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group.add_argument("--zpk-k", type=float,
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
# Options to apply to subtract noise from a witness channel and known
# transfer function.
data_reading_group.add_argument("--witness-frame-type", type=str,
help="(optional), frame type which will be use to query the"
" witness channel data.")
data_reading_group.add_argument("--witness-tf-file", type=str,
help="an hdf file containing the transfer"
" functions and the associated channel names")
data_reading_group.add_argument("--witness-filter-length", type=float,
help="filter length in seconds for the transfer function")
return data_reading_group
# FIXME: This repeats almost all of the options above. Any nice way of reducing
# this?
def insert_strain_option_group_multi_ifo(parser, gps_times=True):
"""
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group_multi = parser.add_argument_group("Options for obtaining"
" h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
"if the --psd-estimation option is given. This group "
"supports reading from multiple ifos simultaneously.")
# Required options
if gps_times:
data_reading_group_multi.add_argument(
"--gps-start-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps start time of the data (integer seconds)")
data_reading_group_multi.add_argument(
"--gps-end-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps end time of the data (integer seconds)")
data_reading_group_multi.add_argument("--strain-high-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="High pass frequency")
data_reading_group_multi.add_argument("--strain-low-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="Low pass frequency")
data_reading_group_multi.add_argument("--pad-data", nargs='+', default=8,
action=MultiDetOptionAction,
type=int, metavar='IFO:LENGTH',
help="Extra padding to remove highpass corruption "
"(integer seconds, default 8)")
data_reading_group_multi.add_argument("--taper-data", nargs='+',
action=MultiDetOptionAction,
type=int, default=0, metavar='IFO:LENGTH',
help="Taper ends of data to zero using the "
"supplied length as a window (integer seconds)")
data_reading_group_multi.add_argument("--sample-rate", type=float,
nargs='+',
action=MultiDetOptionAction, metavar='IFO:RATE',
help="The sample rate to use for h(t) generation "
" (integer Hz).")
data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+',
action=MultiDetOptionActionSpecial,
metavar='IFO:CHANNEL',
help="The channel containing the gravitational "
"strain data")
# Read from cache file
data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_CACHE',
help="Cache file containing the frame locations.")
# Read from frame files
data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_FILES',
help="list of frame files")
# Read from hdf store file
data_reading_group_multi.add_argument("--hdf-store", type=str, nargs='+',
action=MultiDetOptionAction,
metavar='IFO:HDF_STORE_FILE',
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_TYPE',
help="(optional) Replaces frame-files. "
"Use datafind to get the needed frame "
"file(s) of this type.")
# Filter frame files by URL
data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_SIEVE',
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
# Generate gaussian noise with given psd
data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+",
action=MultiDetOptionAction, metavar='IFO:CHOICE',
help="Name of model PSD for generating fake "
"gaussian noise. Choose from %s or zeroNoise" \
%((', ').join(pycbc.psd.get_lalsim_psd_list()),) )
data_reading_group_multi.add_argument("--fake-strain-extra-args",
nargs='+', action=MultiDetDictOptionAction,
metavar='DETECTOR:PARAM:VALUE', default={},
type=float, help="(optional) Extra arguments "
"passed to the PSD models.")
data_reading_group_multi.add_argument("--fake-strain-seed", type=int,
default=0, nargs="+", action=MultiDetOptionAction,
metavar='IFO:SEED',
help="Seed value for the generation of fake "
"colored gaussian noise")
data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="File containing ASD for generating fake "
"noise from it.")
data_reading_group_multi.add_argument("--fake-strain-flow",
default=1.0, type=float,
nargs="+", action=MultiDetOptionAction,
help="Low frequency cutoff of the fake strain")
data_reading_group_multi.add_argument("--fake-strain-filter-duration",
default=128.0, type=float,
nargs="+", action=MultiDetOptionAction,
help="Duration in seconds of the fake data coloring filter")
data_reading_group_multi.add_argument("--fake-strain-sample-rate",
default=16384, type=float,
nargs="+", action=MultiDetOptionAction,
help="Sample rate of the fake data generation")
# Injection options
data_reading_group_multi.add_argument("--injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file containing parameters"
"of CBC signals to be added to the strain")
data_reading_group_multi.add_argument("--sgburst-injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file containing parameters"
"of sine-Gaussian burst signals to add to the strain")
data_reading_group_multi.add_argument("--injection-scale-factor",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL", default=1.,
help="Divide injections by this factor "
"before adding to the strain data")
data_reading_group_multi.add_argument("--injection-sample-rate",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL",
help="Sample rate to use for injections (integer Hz). "
"Typically similar to the strain data sample rate."
"If not provided, the strain sample rate will be "
"used")
data_reading_group_multi.add_argument("--injection-f-ref", type=float,
action=MultiDetOptionAction, metavar='IFO:VALUE',
help="Reference frequency in Hz for creating CBC "
"injections from an XML file")
data_reading_group_multi.add_argument('--injection-f-final', type=float,
action=MultiDetOptionAction, metavar='IFO:VALUE',
help="Override the f_final field of a CBC XML "
"injection file (frequency in Hz)")
# Gating options
data_reading_group_multi.add_argument("--gating-file", nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FILE',
help='(optional) Text file of gating segments to apply.'
' Format of each line (units s) :'
' gps_time zeros_half_width pad_half_width')
data_reading_group_multi.add_argument('--autogating-threshold', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SIGMA',
help='If given, find and gate glitches producing a '
'deviation larger than SIGMA in the whitened strain'
' time series')
data_reading_group_multi.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group_multi.add_argument('--autogating-cluster', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group_multi.add_argument('--autogating-width', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group_multi.add_argument('--autogating-taper', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group_multi.add_argument('--autogating-pad', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group_multi.add_argument('--gating-method', type=str,
nargs='+', action=MultiDetOptionAction,
default='taper',
help='Choose the method for gating. '
'Default: `taper`',
choices=['hard', 'taper', 'paint'])
# Optional
data_reading_group_multi.add_argument("--normalize-strain", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:VALUE',
help="(optional) Divide frame data by constant.")
data_reading_group_multi.add_argument("--zpk-z", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group_multi.add_argument("--zpk-p", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group_multi.add_argument("--zpk-k", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
return data_reading_group_multi
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--frame-cache','--fake-strain',
'--fake-strain-from-file',
'--frame-files', '--frame-type',
'--hdf-store'])
required_opts_list = ['--gps-start-time', '--gps-end-time',
'--pad-data', '--sample-rate',
'--channel-name']
def verify_strain_options(opts, parser):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
"""
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opts, parser, opt_group)
required_opts(opts, parser, required_opts_list)
def verify_strain_options_multi_ifo(opts, parser, ifos):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
ifos : list of strings
List of ifos for which to verify options for
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opts, parser, ifo, opt_group)
required_opts_multi_ifo(opts, parser, ifo, required_opts_list)
def gate_data(data, gate_params):
"""Apply a set of gating windows to a time series.
Each gating window is
defined by a central time, a given duration (centered on the given
time) to zero out, and a given duration of smooth tapering on each side of
the window. The window function used for tapering is a Tukey window.
Parameters
----------
data : TimeSeries
The time series to be gated.
gate_params : list
List of parameters for the gating windows. Each element should be a
list or tuple with 3 elements: the central time of the gating window,
the half-duration of the portion to zero out, and the duration of the
Tukey tapering on each side. All times in seconds. The total duration
of the data affected by one gating window is thus twice the second
parameter plus twice the third parameter.
Returns
-------
data: TimeSeries
The gated time series.
"""
def inverted_tukey(M, n_pad):
midlen = M - 2*n_pad
if midlen < 0:
raise ValueError("No zeros left after applying padding.")
padarr = 0.5*(1.+numpy.cos(numpy.pi*numpy.arange(n_pad)/n_pad))
return numpy.concatenate((padarr,numpy.zeros(midlen),padarr[::-1]))
sample_rate = 1./data.delta_t
temp = data.data
for glitch_time, glitch_width, pad_width in gate_params:
t_start = glitch_time - glitch_width - pad_width - data.start_time
t_end = glitch_time + glitch_width + pad_width - data.start_time
if t_start > data.duration or t_end < 0.:
continue # Skip gate segments that don't overlap
win_samples = int(2*sample_rate*(glitch_width+pad_width))
pad_samples = int(sample_rate*pad_width)
window = inverted_tukey(win_samples, pad_samples)
offset = int(t_start * sample_rate)
idx1 = max(0, -offset)
idx2 = min(len(window), len(data)-offset)
temp[idx1+offset:idx2+offset] *= window[idx1:idx2]
return data
class StrainSegments(object):
""" Class for managing manipulation of strain data for the purpose of
matched filtering. This includes methods for segmenting and
conditioning.
"""
def __init__(self, strain, segment_length=None, segment_start_pad=0,
segment_end_pad=0, trigger_start=None, trigger_end=None,
filter_inj_only=False, injection_window=None,
allow_zero_padding=False):
""" Determine how to chop up the strain data into smaller segments
for analysis.
"""
self._fourier_segments = None
self.strain = strain
self.delta_t = strain.delta_t
self.sample_rate = strain.sample_rate
if segment_length:
seg_len = segment_length
else:
seg_len = strain.duration
self.delta_f = 1.0 / seg_len
self.time_len = int(seg_len * self.sample_rate)
self.freq_len = self.time_len // 2 + 1
seg_end_pad = segment_end_pad
seg_start_pad = segment_start_pad
if not trigger_start:
trigger_start = int(strain.start_time) + segment_start_pad
else:
if not allow_zero_padding:
min_start_time = int(strain.start_time) + segment_start_pad
else:
min_start_time = int(strain.start_time)
if trigger_start < min_start_time:
err_msg = "Trigger start time must be within analysable "
err_msg += "window. Asked to start from %d " %(trigger_start)
err_msg += "but can only analyse from %d." %(min_start_time)
raise ValueError(err_msg)
if not trigger_end:
trigger_end = int(strain.end_time) - segment_end_pad
else:
if not allow_zero_padding:
max_end_time = int(strain.end_time) - segment_end_pad
else:
max_end_time = int(strain.end_time)
if trigger_end > max_end_time:
err_msg = "Trigger end time must be within analysable "
err_msg += "window. Asked to end at %d " %(trigger_end)
err_msg += "but can only analyse to %d." %(max_end_time)
raise ValueError(err_msg)
throwaway_size = seg_start_pad + seg_end_pad
seg_width = seg_len - throwaway_size
# The amount of time we can actually analyze given the
# amount of padding that is needed
analyzable = trigger_end - trigger_start
data_start = (trigger_start - segment_start_pad) - \
int(strain.start_time)
data_end = trigger_end + segment_end_pad - int(strain.start_time)
data_dur = data_end - data_start
data_start = data_start * strain.sample_rate
data_end = data_end * strain.sample_rate
#number of segments we need to analyze this data
num_segs = int(numpy.ceil(float(analyzable) / float(seg_width)))
# The offset we will use between segments
seg_offset = int(numpy.ceil(analyzable / float(num_segs)))
self.segment_slices = []
self.analyze_slices = []
# Determine how to chop up the strain into smaller segments
for nseg in range(num_segs-1):
# boundaries for time slices into the strain
seg_start = int(data_start + (nseg*seg_offset) * strain.sample_rate)
seg_end = int(seg_start + seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
# boundaries for the analyzable portion of the segment
ana_start = int(seg_start_pad * strain.sample_rate)
ana_end = int(ana_start + seg_offset * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
# The last segment takes up any integer boundary slop
seg_end = int(data_end)
seg_start = int(seg_end - seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
remaining = (data_dur - ((num_segs - 1) * seg_offset + seg_start_pad))
ana_start = int((seg_len - remaining) * strain.sample_rate)
ana_end = int((seg_len - seg_end_pad) * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
self.full_segment_slices = copy.deepcopy(self.segment_slices)
#Remove segments that are outside trig start and end
segment_slices_red = []
analyze_slices_red = []
trig_start_idx = (trigger_start - int(strain.start_time)) * strain.sample_rate
trig_end_idx = (trigger_end - int(strain.start_time)) * strain.sample_rate
if filter_inj_only and hasattr(strain, 'injections'):
end_times = strain.injections.end_times()
end_times = [time for time in end_times if float(time) < trigger_end and float(time) > trigger_start]
inj_idx = [(float(time) - float(strain.start_time)) * strain.sample_rate for time in end_times]
for seg, ana in zip(self.segment_slices, self.analyze_slices):
start = ana.start
stop = ana.stop
cum_start = start + seg.start
cum_end = stop + seg.start
# adjust first segment
if trig_start_idx > cum_start:
start += (trig_start_idx - cum_start)
# adjust last segment
if trig_end_idx < cum_end:
stop -= (cum_end - trig_end_idx)
if filter_inj_only and hasattr(strain, 'injections'):
analyze_this = False
inj_window = strain.sample_rate * 8
for inj_id in inj_idx:
if inj_id < (cum_end + inj_window) and \
inj_id > (cum_start - inj_window):
analyze_this = True
if not analyze_this:
continue
if start < stop:
segment_slices_red.append(seg)
analyze_slices_red.append(slice(start, stop))
self.segment_slices = segment_slices_red
self.analyze_slices = analyze_slices_red
def fourier_segments(self):
""" Return a list of the FFT'd segments.
Return the list of FrequencySeries. Additional properties are
added that describe the strain segment. The property 'analyze'
is a slice corresponding to the portion of the time domain equivalent
of the segment to analyze for triggers. The value 'cumulative_index'
indexes from the beginning of the original strain series.
"""
if not self._fourier_segments:
self._fourier_segments = []
for seg_slice, ana in zip(self.segment_slices, self.analyze_slices):
if seg_slice.start >= 0 and seg_slice.stop <= len(self.strain):
freq_seg = make_frequency_series(self.strain[seg_slice])
# Assume that we cannot have a case where we both zero-pad on
# both sides
elif seg_slice.start < 0:
strain_chunk = self.strain[:seg_slice.stop]
strain_chunk.prepend_zeros(-seg_slice.start)
freq_seg = make_frequency_series(strain_chunk)
elif seg_slice.stop > len(self.strain):
strain_chunk = self.strain[seg_slice.start:]
strain_chunk.append_zeros(seg_slice.stop - len(self.strain))
freq_seg = make_frequency_series(strain_chunk)
freq_seg.analyze = ana
freq_seg.cumulative_index = seg_slice.start + ana.start
freq_seg.seg_slice = seg_slice
self._fourier_segments.append(freq_seg)
return self._fourier_segments
@classmethod
def from_cli(cls, opt, strain):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length,
segment_start_pad=opt.segment_start_pad,
segment_end_pad=opt.segment_end_pad,
trigger_start=opt.trig_start_time,
trigger_end=opt.trig_end_time,
filter_inj_only=opt.filter_inj_only,
injection_window=opt.injection_window,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def insert_segment_option_group(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to "
"analyze requested times, if needed.")
# Injection optimization options
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain an injection.")
segment_group.add_argument("--injection-window", default=None,
type=float, help="""If using --filter-inj-only then
only search for injections within +/- injection
window of the injections's end time. This is useful
to speed up a coherent search or a search where we
initially filter at lower sample rate, and then
filter at full rate where needed. NOTE: Reverts to
full analysis if two injections are in the same
segment.""")
@classmethod
def from_cli_single_ifo(cls, opt, strain, ifo):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length[ifo],
segment_start_pad=opt.segment_start_pad[ifo],
segment_end_pad=opt.segment_end_pad[ifo],
trigger_start=opt.trig_start_time[ifo],
trigger_end=opt.trig_end_time[ifo],
filter_inj_only=opt.filter_inj_only,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def from_cli_multi_ifos(cls, opt, strain_dict, ifos):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
strain_segments = {}
for ifo in ifos:
strain_segments[ifo] = cls.from_cli_single_ifo(
opt, strain_dict[ifo], ifo)
return strain_segments
@classmethod
def insert_segment_option_group_multi_ifo(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:LENGTH',
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to analyze "
"requested times, if needed.")
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain "
"an injection.")
required_opts_list = ['--segment-length',
'--segment-start-pad',
'--segment-end-pad',
]
@classmethod
def verify_segment_options(cls, opt, parser):
required_opts(opt, parser, cls.required_opts_list)
@classmethod
def verify_segment_options_multi_ifo(cls, opt, parser, ifos):
for ifo in ifos:
required_opts_multi_ifo(opt, parser, ifo, cls.required_opts_list)
@functools.lru_cache(maxsize=500)
def create_memory_and_engine_for_class_based_fft(
npoints_time,
dtype,
delta_t=1,
ifft=False,
uid=0
):
""" Create memory and engine for class-based FFT/IFFT
Currently only supports R2C FFT / C2R IFFTs, but this could be expanded
if use-cases arise.
Parameters
----------
npoints_time : int
Number of time samples of the real input vector (or real output vector
if doing an IFFT).
dtype : np.dtype
The dtype for the real input vector (or real output vector if doing an
IFFT). np.float32 or np.float64 I think in all cases.
delta_t : float (default: 1)
delta_t of the real vector. If not given this will be set to 1, and we
will assume it is not needed in the returned TimeSeries/FrequencySeries
ifft : boolean (default: False)
By default will use the FFT class, set to true to use IFFT.
uid : int (default: 0)
Provide a unique identifier. This is used to provide a separate set
of memory in the cache, for instance if calling this from different
codes.
"""
npoints_freq = npoints_time // 2 + 1
delta_f_tmp = 1.0 / (npoints_time * delta_t)
vec = TimeSeries(
zeros(
npoints_time,
dtype=dtype
),
delta_t=delta_t,
copy=False
)
vectilde = FrequencySeries(
zeros(
npoints_freq,
dtype=complex_same_precision_as(vec)
),
delta_f=delta_f_tmp,
copy=False
)
if ifft:
fft_class = IFFT(vectilde, vec)
invec = vectilde
outvec = vec
else:
fft_class = FFT(vec, vectilde)
invec = vec
outvec = vectilde
return invec, outvec, fft_class
def execute_cached_fft(invec_data, normalize_by_rate=True, ifft=False,
copy_output=True, uid=0):
""" Executes a cached FFT
Parameters
-----------
invec_data : Array
Array which will be used as input when fft_class is executed.
normalize_by_rate : boolean (optional, default:False)
If True, then normalize by delta_t (for an FFT) or delta_f (for an
IFFT).
ifft : boolean (optional, default:False)
If true assume this is an IFFT and multiply by delta_f not delta_t.
Will do nothing if normalize_by_rate is False.
copy_output : boolean (optional, default:True)
If True we will copy the output into a new array. This avoids the issue
that calling this function again might overwrite output. However, if
you know that the output array will not be used before this function
might be called again with the same length, then setting this to False
will provide some increase in efficiency. The uid can also be used to
help ensure that data doesn't get unintentionally overwritten!
uid : int (default: 0)
Provide a unique identifier. This is used to provide a separate set
of memory in the cache, for instance if calling this from different
codes.
"""
from pycbc.types import real_same_precision_as
if ifft:
npoints_time = (len(invec_data) - 1) * 2
else:
npoints_time = len(invec_data)
try:
delta_t = invec_data.delta_t
except AttributeError:
if not normalize_by_rate:
# Don't need this
delta_t = 1
else:
raise
dtype = real_same_precision_as(invec_data)
invec, outvec, fft_class = create_memory_and_engine_for_class_based_fft(
npoints_time,
dtype,
delta_t=delta_t,
ifft=ifft,
uid=uid
)
if invec_data is not None:
invec._data[:] = invec_data._data[:]
fft_class.execute()
if normalize_by_rate:
if ifft:
outvec._data *= invec._delta_f
else:
outvec._data *= invec._delta_t
if copy_output:
outvec = outvec.copy()
return outvec
def execute_cached_ifft(*args, **kwargs):
""" Executes a cached IFFT
Parameters
-----------
invec_data : Array
Array which will be used as input when fft_class is executed.
normalize_by_rate : boolean (optional, default:False)
If True, then normalize by delta_t (for an FFT) or delta_f (for an
IFFT).
copy_output : boolean (optional, default:True)
If True we will copy the output into a new array. This avoids the issue
that calling this function again might overwrite output. However, if
you know that the output array will not be used before this function
might be called again with the same length, then setting this to False
will provide some increase in efficiency. The uid can also be used to
help ensure that data doesn't get unintentionally overwritten!
uid : int (default: 0)
Provide a unique identifier. This is used to provide a separate set
of memory in the cache, for instance if calling this from different
codes.
"""
return execute_cached_fft(*args, **kwargs, ifft=True)
# If using caching we want output to be unique if called at different places
# (and if called from different modules/functions), these unique IDs acheive
# that. The numbers are not significant, only that they are unique.
STRAINBUFFER_UNIQUE_ID_1 = 236546845
STRAINBUFFER_UNIQUE_ID_2 = 778946541
STRAINBUFFER_UNIQUE_ID_3 = 665849947
class StrainBuffer(pycbc.frame.DataBuffer):
def __init__(self, frame_src, channel_name, start_time,
max_buffer=512,
sample_rate=4096,
low_frequency_cutoff=20,
highpass_frequency=15.0,
highpass_reduction=200.0,
highpass_bandwidth=5.0,
psd_samples=30,
psd_segment_length=4,
psd_inverse_length=3.5,
trim_padding=0.25,
autogating_threshold=None,
autogating_cluster=None,
autogating_pad=None,
autogating_width=None,
autogating_taper=None,
autogating_duration=None,
autogating_psd_segment_length=None,
autogating_psd_stride=None,
state_channel=None,
data_quality_channel=None,
idq_channel=None,
idq_state_channel=None,
idq_threshold=None,
dyn_range_fac=pycbc.DYN_RANGE_FAC,
psd_abort_difference=None,
psd_recalculate_difference=None,
force_update_cache=True,
increment_update_cache=None,
analyze_flags=None,
data_quality_flags=None,
dq_padding=0):
""" Class to produce overwhitened strain incrementally
Parameters
----------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
channel_name: str
Name of the channel to read from the frame files
start_time:
Time to start reading from.
max_buffer: {int, 512}, Optional
Length of the buffer in seconds
sample_rate: {int, 2048}, Optional
Rate in Hz to sample the data.
low_frequency_cutoff: {float, 20}, Optional
The low frequency cutoff to use for inverse spectrum truncation
highpass_frequency: {float, 15}, Optional
The frequency to apply a highpass filter at before downsampling.
highpass_reduction: {float, 200}, Optional
The amount of reduction to apply to the low frequencies.
highpass_bandwidth: {float, 5}, Optional
The width of the transition region for the highpass filter.
psd_samples: {int, 30}, Optional
The number of sample to use for psd estimation
psd_segment_length: {float, 4}, Optional
The number of seconds in each psd sample.
psd_inverse_length: {float, 3.5}, Optional
The length in seconds for fourier transform of the inverse of the
PSD to be truncated to.
trim_padding: {float, 0.25}, Optional
Amount of padding in seconds to give for truncated the overwhitened
data stream.
autogating_threshold: float, Optional
Sigma deviation required to cause autogating of data.
If None, no autogating is performed.
autogating_cluster: float, Optional
Seconds to cluster possible gating locations.
autogating_pad: float, Optional
Seconds of corrupted whitened strain to ignore when generating a gate.
autogating_width: float, Optional
Half-duration of the zeroed-out portion of autogates.
autogating_taper: float, Optional
Duration of taper on either side of the gating window in seconds.
autogating_duration: float, Optional
Amount of data in seconds to apply autogating on.
autogating_psd_segment_length: float, Optional
The length in seconds of each segment used to estimate the PSD with Welch's method.
autogating_psd_stride: float, Optional
The overlap in seconds between each segment used to estimate the PSD with Welch's method.
state_channel: {str, None}, Optional
Channel to use for state information about the strain
data_quality_channel: {str, None}, Optional
Channel to use for data quality information about the strain
idq_channel: {str, None}, Optional
Channel to use for idq timeseries
idq_state_channel : {str, None}, Optional
Channel containing information about usability of idq
idq_threshold : float, Optional
Threshold which triggers a veto if iDQ channel falls below this threshold
dyn_range_fac: {float, pycbc.DYN_RANGE_FAC}, Optional
Scale factor to apply to strain
psd_abort_difference: {float, None}, Optional
The relative change in the inspiral range from the previous PSD
estimate to trigger the data to be considered invalid.
psd_recalculate_difference: {float, None}, Optional
the relative change in the inspiral range from the previous PSD
to trigger a re-estimatoin of the PSD.
force_update_cache: {boolean, True}, Optional
Re-check the filesystem for frame files on every attempt to
read more data.
analyze_flags: list of strs
The flags that must be on to mark the current data as valid for
*any* use.
data_quality_flags: list of strs
The flags used to determine if to keep triggers.
dq_padding: {float, 0}, optional
Extra seconds to consider invalid before/after times with bad DQ.
increment_update_cache: {str, None}, Optional
Pattern to look for frame files in a GPS dependent directory. This
is an alternate to the forced updated of the frame cache, and
apptempts to predict the next frame file name without probing the
filesystem.
"""
super(StrainBuffer, self).__init__(frame_src, channel_name, start_time,
max_buffer=32,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.low_frequency_cutoff = low_frequency_cutoff
# Set up status buffers
self.analyze_flags = analyze_flags
self.data_quality_flags = data_quality_flags
self.state = None
self.dq = None
self.idq = None
self.dq_padding = dq_padding
# State channel
if state_channel is not None:
valid_mask = pycbc.frame.flag_names_to_bitmask(self.analyze_flags)
logging.info('State channel %s interpreted as bitmask %s = good',
state_channel, bin(valid_mask))
self.state = pycbc.frame.StatusBuffer(
frame_src,
state_channel, start_time,
max_buffer=max_buffer,
valid_mask=valid_mask,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
# low latency dq channel
if data_quality_channel is not None:
sb_kwargs = dict(max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
if len(self.data_quality_flags) == 1 \
and self.data_quality_flags[0] == 'veto_nonzero':
sb_kwargs['valid_on_zero'] = True
logging.info('DQ channel %s interpreted as zero = good',
data_quality_channel)
else:
sb_kwargs['valid_mask'] = pycbc.frame.flag_names_to_bitmask(
self.data_quality_flags)
logging.info(
'DQ channel %s interpreted as bitmask %s = good',
data_quality_channel,
bin(sb_kwargs['valid_mask'])
)
self.dq = pycbc.frame.StatusBuffer(frame_src, data_quality_channel,
start_time, **sb_kwargs)
if idq_channel is not None:
if idq_state_channel is None:
raise ValueError(
'Each detector with an iDQ channel requires an iDQ state channel as well')
if idq_threshold is None:
raise ValueError(
'If an iDQ channel is provided, a veto threshold must also be provided')
self.idq = pycbc.frame.iDQBuffer(frame_src,
idq_channel,
idq_state_channel,
idq_threshold,
start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.highpass_frequency = highpass_frequency
self.highpass_reduction = highpass_reduction
self.highpass_bandwidth = highpass_bandwidth
self.autogating_threshold = autogating_threshold
self.autogating_cluster = autogating_cluster
self.autogating_pad = autogating_pad
self.autogating_width = autogating_width
self.autogating_taper = autogating_taper
self.autogating_duration = autogating_duration
self.autogating_psd_segment_length = autogating_psd_segment_length
self.autogating_psd_stride = autogating_psd_stride
self.gate_params = []
self.sample_rate = sample_rate
self.dyn_range_fac = dyn_range_fac
self.psd_abort_difference = psd_abort_difference
self.psd_recalculate_difference = psd_recalculate_difference
self.psd_segment_length = psd_segment_length
self.psd_samples = psd_samples
self.psd_inverse_length = psd_inverse_length
self.psd = None
self.psds = {}
strain_len = int(max_buffer * self.sample_rate)
self.strain = TimeSeries(zeros(strain_len, dtype=numpy.float32),
delta_t=1.0/self.sample_rate,
epoch=start_time-max_buffer)
# Determine the total number of corrupted samples for highpass
# and PSD over whitening
highpass_samples, self.beta = kaiserord(self.highpass_reduction,
self.highpass_bandwidth / self.raw_buffer.sample_rate * 2 * numpy.pi)
self.highpass_samples = int(highpass_samples / 2)
resample_corruption = 10 # If using the ldas method
self.factor = round(1.0 / self.raw_buffer.delta_t / self.sample_rate)
self.corruption = self.highpass_samples // self.factor + resample_corruption
self.psd_corruption = self.psd_inverse_length * self.sample_rate
self.total_corruption = self.corruption + self.psd_corruption
# Determine how much padding is needed after removing the parts
# associated with PSD over whitening and highpass filtering
self.trim_padding = int(trim_padding * self.sample_rate)
if self.trim_padding > self.total_corruption:
self.trim_padding = self.total_corruption
self.psd_duration = (psd_samples - 1) // 2 * psd_segment_length
self.reduced_pad = int(self.total_corruption - self.trim_padding)
self.segments = {}
# time to ignore output of frame (for initial buffering)
self.add_hard_count()
self.taper_immediate_strain = True
@property
def start_time(self):
""" Return the start time of the current valid segment of data """
return self.end_time - self.blocksize
@property
def end_time(self):
""" Return the end time of the current valid segment of data """
return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate)
def add_hard_count(self):
""" Reset the countdown timer, so that we don't analyze data long enough
to generate a new PSD.
"""
self.wait_duration = int(numpy.ceil(self.total_corruption / self.sample_rate + self.psd_duration))
self.invalidate_psd()
def invalidate_psd(self):
""" Make the current PSD invalid. A new one will be generated when
it is next required """
self.psd = None
self.psds = {}
def recalculate_psd(self):
""" Recalculate the psd
"""
seg_len = int(self.sample_rate * self.psd_segment_length)
e = len(self.strain)
s = e - (self.psd_samples + 1) * seg_len // 2
psd = pycbc.psd.welch(self.strain[s:e], seg_len=seg_len, seg_stride=seg_len//2)
psd.dist = spa_distance(psd, 1.4, 1.4, self.low_frequency_cutoff) * pycbc.DYN_RANGE_FAC
# If the new psd is similar to the old one, don't replace it
if self.psd and self.psd_recalculate_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist < self.psd_recalculate_difference:
logging.info("Skipping recalculation of %s PSD, %s-%s",
self.detector, self.psd.dist, psd.dist)
return True
# If the new psd is *really* different than the old one, return an error
if self.psd and self.psd_abort_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist > self.psd_abort_difference:
logging.info("%s PSD is CRAZY, aborting!!!!, %s-%s",
self.detector, self.psd.dist, psd.dist)
self.psd = psd
self.psds = {}
return False
# If the new estimate replaces the current one, invalide the ineterpolate PSDs
self.psd = psd
self.psds = {}
logging.info("Recalculating %s PSD, %s", self.detector, psd.dist)
return True
def check_psd_dist(self, min_dist, max_dist):
"""Check that the horizon distance of a detector is within a required
range. If so, return True, otherwise log a warning and return False.
"""
if self.psd is None:
# ignore check
return True
# Note that the distance can in principle be inf or nan, e.g. if h(t)
# is identically zero. The check must fail in those cases. Be careful
# with how the logic works out when comparing inf's or nan's!
good = self.psd.dist >= min_dist and self.psd.dist <= max_dist
if not good:
logging.info(
"%s PSD dist %s outside acceptable range [%s, %s]",
self.detector,
self.psd.dist,
min_dist,
max_dist
)
return good
def overwhitened_data(self, delta_f):
""" Return overwhitened data
Parameters
----------
delta_f: float
The sample step to generate overwhitened frequency domain data for
Returns
-------
htilde: FrequencySeries
Overwhited strain data
"""
# we haven't already computed htilde for this delta_f
if delta_f not in self.segments:
buffer_length = int(1.0 / delta_f)
e = len(self.strain)
s = int(e - buffer_length * self.sample_rate - self.reduced_pad * 2)
# FFT the contents of self.strain[s:e] into fseries
fseries = execute_cached_fft(self.strain[s:e],
copy_output=False,
uid=STRAINBUFFER_UNIQUE_ID_1)
fseries._epoch = self.strain._epoch + s*self.strain.delta_t
# we haven't calculated a resample psd for this delta_f
if delta_f not in self.psds:
psdt = pycbc.psd.interpolate(self.psd, fseries.delta_f)
psdt = pycbc.psd.inverse_spectrum_truncation(psdt,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psdt._delta_f = fseries.delta_f
psd = pycbc.psd.interpolate(self.psd, delta_f)
psd = pycbc.psd.inverse_spectrum_truncation(psd,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psd.psdt = psdt
self.psds[delta_f] = psd
psd = self.psds[delta_f]
fseries /= psd.psdt
# trim ends of strain
if self.reduced_pad != 0:
# IFFT the contents of fseries into overwhite
overwhite = execute_cached_ifft(fseries,
copy_output=False,
uid=STRAINBUFFER_UNIQUE_ID_2)
overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad]
taper_window = self.trim_padding / 2.0 / overwhite.sample_rate
gate_params = [(overwhite2.start_time, 0., taper_window),
(overwhite2.end_time, 0., taper_window)]
gate_data(overwhite2, gate_params)
# FFT the contents of overwhite2 into fseries_trimmed
fseries_trimmed = execute_cached_fft(
overwhite2,
copy_output=True,
uid=STRAINBUFFER_UNIQUE_ID_3
)
fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t
else:
fseries_trimmed = fseries
fseries_trimmed.psd = psd
self.segments[delta_f] = fseries_trimmed
stilde = self.segments[delta_f]
return stilde
def near_hwinj(self):
"""Check that the current set of triggers could be influenced by
a hardware injection.
"""
if not self.state:
return False
if not self.state.is_extent_valid(self.start_time, self.blocksize, pycbc.frame.NO_HWINJ):
return True
return False
def null_advance_strain(self, blocksize):
""" Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
self.strain.roll(-sample_step)
# We should roll this off at some point too...
self.strain[len(self.strain) - csize + self.corruption:] = 0
self.strain.start_time += blocksize
# The next time we need strain will need to be tapered
self.taper_immediate_strain = True
def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
self.gate_params = []
# We have given up so there is no time series
if ts is None:
logging.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
if self.idq:
self.idq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
if self.idq:
self.idq.null_advance(blocksize)
logging.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector and idq timeseries in lockstep
if self.dq:
self.dq.advance(blocksize)
if self.idq:
self.idq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logging.info("Tapering start of %s strain block", self.detector)
strain = gate_data(
strain, [(strain.start_time, 0., self.autogating_taper)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if needed
if self.autogating_threshold is not None:
autogating_duration_length = self.autogating_duration * self.sample_rate
autogating_start_sample = int(len(self.strain) - autogating_duration_length)
glitch_times = detect_loud_glitches(
self.strain[autogating_start_sample:-self.corruption],
psd_duration=self.autogating_psd_segment_length, psd_stride=self.autogating_psd_stride,
threshold=self.autogating_threshold,
cluster_window=self.autogating_cluster,
low_freq_cutoff=self.highpass_frequency,
corrupt_time=self.autogating_pad)
if len(glitch_times) > 0:
logging.info('Autogating %s at %s', self.detector,
', '.join(['%.3f' % gt for gt in glitch_times]))
self.gate_params = \
[(gt, self.autogating_width, self.autogating_taper)
for gt in glitch_times]
self.strain = gate_data(self.strain, self.gate_params)
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
return self.wait_duration <= 0
@classmethod
def from_cli(cls, ifo, args, maxlen):
"""Initialize a StrainBuffer object (data reader) for a particular
detector.
"""
state_channel = analyze_flags = None
if args.state_channel and ifo in args.state_channel \
and args.analyze_flags and ifo in args.analyze_flags:
state_channel = ':'.join([ifo, args.state_channel[ifo]])
analyze_flags = args.analyze_flags[ifo].split(',')
dq_channel = dq_flags = None
if args.data_quality_channel and ifo in args.data_quality_channel \
and args.data_quality_flags and ifo in args.data_quality_flags:
dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]])
dq_flags = args.data_quality_flags[ifo].split(',')
idq_channel = None
if args.idq_channel and ifo in args.idq_channel:
idq_channel = ':'.join([ifo, args.idq_channel[ifo]])
idq_state_channel = None
if args.idq_state_channel and ifo in args.idq_state_channel:
idq_state_channel = ':'.join([ifo, args.idq_state_channel[ifo]])
if args.frame_type:
frame_src = pycbc.frame.frame_paths(args.frame_type[ifo],
args.start_time,
args.end_time)
else:
frame_src = [args.frame_src[ifo]]
strain_channel = ':'.join([ifo, args.channel_name[ifo]])
return cls(frame_src, strain_channel,
args.start_time, max_buffer=maxlen * 2,
state_channel=state_channel,
data_quality_channel=dq_channel,
idq_channel=idq_channel,
idq_state_channel=idq_state_channel,
idq_threshold=args.idq_threshold,
sample_rate=args.sample_rate,
low_frequency_cutoff=args.low_frequency_cutoff,
highpass_frequency=args.highpass_frequency,
highpass_reduction=args.highpass_reduction,
highpass_bandwidth=args.highpass_bandwidth,
psd_samples=args.psd_samples,
trim_padding=args.trim_padding,
psd_segment_length=args.psd_segment_length,
psd_inverse_length=args.psd_inverse_length,
autogating_threshold=args.autogating_threshold,
autogating_cluster=args.autogating_cluster,
autogating_pad=args.autogating_pad,
autogating_width=args.autogating_width,
autogating_taper=args.autogating_taper,
autogating_duration=args.autogating_duration,
autogating_psd_segment_length=args.autogating_psd_segment_length,
autogating_psd_stride=args.autogating_psd_stride,
psd_abort_difference=args.psd_abort_difference,
psd_recalculate_difference=args.psd_recalculate_difference,
force_update_cache=args.force_update_cache,
increment_update_cache=args.increment_update_cache[ifo],
analyze_flags=analyze_flags,
data_quality_flags=dq_flags,
dq_padding=args.data_quality_padding)
| 93,225
| 45.243056
| 113
|
py
|
pycbc
|
pycbc-master/pycbc/strain/calibration.py
|
# Copyright (C) 2018 Colm Talbot
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Functions for adding calibration factors to waveform templates.
"""
import numpy as np
from scipy.interpolate import UnivariateSpline
from abc import (ABCMeta, abstractmethod)
class Recalibrate(metaclass=ABCMeta):
name = None
def __init__(self, ifo_name):
self.ifo_name = ifo_name
self.params = dict()
@abstractmethod
def apply_calibration(self, strain):
"""Apply calibration model
This method should be overwritten by subclasses
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
return
def map_to_adjust(self, strain, prefix='recalib_', **params):
"""Map an input dictionary of sampling parameters to the
adjust_strain function by filtering the dictionary for the
calibration parameters, then calling adjust_strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
prefix: str
Prefix for calibration parameter names
params : dict
Dictionary of sampling parameters which includes
calibration parameters.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
self.params.update({
key[len(prefix):]: params[key]
for key in params if prefix in key and self.ifo_name in key})
strain_adjusted = self.apply_calibration(strain)
return strain_adjusted
@classmethod
def from_config(cls, cp, ifo, section):
"""Read a config file to get calibration options and transfer
functions which will be used to intialize the model.
Parameters
----------
cp : WorkflowConfigParser
An open config file.
ifo : string
The detector (H1, L1) for which the calibration model will
be loaded.
section : string
The section name in the config file from which to retrieve
the calibration options.
Return
------
instance
An instance of the class.
"""
all_params = dict(cp.items(section))
params = {key[len(ifo)+1:]: all_params[key]
for key in all_params if ifo.lower() in key}
model = params.pop('model')
params['ifo_name'] = ifo.lower()
return all_models[model](**params)
class CubicSpline(Recalibrate):
name = 'cubic_spline'
def __init__(self, minimum_frequency, maximum_frequency, n_points,
ifo_name):
"""
Cubic spline recalibration
see https://dcc.ligo.org/LIGO-T1400682/public
This assumes the spline points follow
np.logspace(np.log(minimum_frequency), np.log(maximum_frequency),
n_points)
Parameters
----------
minimum_frequency: float
minimum frequency of spline points
maximum_frequency: float
maximum frequency of spline points
n_points: int
number of spline points
"""
Recalibrate.__init__(self, ifo_name=ifo_name)
minimum_frequency = float(minimum_frequency)
maximum_frequency = float(maximum_frequency)
n_points = int(n_points)
if n_points < 4:
raise ValueError(
'Use at least 4 spline points for calibration model')
self.n_points = n_points
self.spline_points = np.logspace(np.log10(minimum_frequency),
np.log10(maximum_frequency), n_points)
def apply_calibration(self, strain):
"""Apply calibration model
This applies cubic spline calibration to the strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
amplitude_parameters =\
[self.params['amplitude_{}_{}'.format(self.ifo_name, ii)]
for ii in range(self.n_points)]
amplitude_spline = UnivariateSpline(self.spline_points,
amplitude_parameters)
delta_amplitude = amplitude_spline(strain.sample_frequencies.numpy())
phase_parameters =\
[self.params['phase_{}_{}'.format(self.ifo_name, ii)]
for ii in range(self.n_points)]
phase_spline = UnivariateSpline(self.spline_points, phase_parameters)
delta_phase = phase_spline(strain.sample_frequencies.numpy())
strain_adjusted = strain * (1.0 + delta_amplitude)\
* (2.0 + 1j * delta_phase) / (2.0 - 1j * delta_phase)
return strain_adjusted
all_models = {
CubicSpline.name: CubicSpline
}
| 5,742
| 31.446328
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/strain/lines.py
|
# Copyright (C) 2019 Miriam Cabero
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Functions for removing frequency lines from real data.
"""
import numpy
from pycbc.types import TimeSeries, zeros
def complex_median(complex_list):
""" Get the median value of a list of complex numbers.
Parameters
----------
complex_list: list
List of complex numbers to calculate the median.
Returns
-------
a + 1.j*b: complex number
The median of the real and imaginary parts.
"""
median_real = numpy.median([complex_number.real
for complex_number in complex_list])
median_imag = numpy.median([complex_number.imag
for complex_number in complex_list])
return median_real + 1.j*median_imag
def avg_inner_product(data1, data2, bin_size):
""" Calculate the time-domain inner product averaged over bins.
Parameters
----------
data1: pycbc.types.TimeSeries
First data set.
data2: pycbc.types.TimeSeries
Second data set, with same duration and sample rate as data1.
bin_size: float
Duration of the bins the data will be divided into to calculate
the inner product.
Returns
-------
inner_prod: list
The (complex) inner product of data1 and data2 obtained in each bin.
amp: float
The absolute value of the median of the inner product.
phi: float
The angle of the median of the inner product.
"""
assert data1.duration == data2.duration
assert data1.sample_rate == data2.sample_rate
seglen = int(bin_size * data1.sample_rate)
inner_prod = []
for idx in range(int(data1.duration / bin_size)):
start, end = idx * seglen, (idx+1) * seglen
norm = len(data1[start:end])
bin_prod = 2 * sum(data1.data[start:end].real *
numpy.conjugate(data2.data[start:end])) / norm
inner_prod.append(bin_prod)
# Get the median over all bins to avoid outliers due to the presence
# of a signal in a particular bin.
inner_median = complex_median(inner_prod)
return inner_prod, numpy.abs(inner_median), numpy.angle(inner_median)
def line_model(freq, data, tref, amp=1, phi=0):
""" Simple time-domain model for a frequency line.
Parameters
----------
freq: float
Frequency of the line.
data: pycbc.types.TimeSeries
Reference data, to get delta_t, start_time, duration and sample_times.
tref: float
Reference time for the line model.
amp: {1., float}, optional
Amplitude of the frequency line.
phi: {0. float}, optional
Phase of the frequency line (radians).
Returns
-------
freq_line: pycbc.types.TimeSeries
A timeseries of the line model with frequency 'freq'. The returned
data are complex to allow measuring the amplitude and phase of the
corresponding frequency line in the strain data. For extraction, use
only the real part of the data.
"""
freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t,
epoch=data.start_time)
times = data.sample_times - float(tref)
alpha = 2 * numpy.pi * freq * times + phi
freq_line.data = amp * numpy.exp(1.j * alpha)
return freq_line
def matching_line(freq, data, tref, bin_size=1):
""" Find the parameter of the line with frequency 'freq' in the data.
Parameters
----------
freq: float
Frequency of the line to find in the data.
data: pycbc.types.TimeSeries
Data from which the line wants to be measured.
tref: float
Reference time for the frequency line.
bin_size: {1, float}, optional
Duration of the bins the data will be divided into for averaging.
Returns
-------
line_model: pycbc.types.TimeSeries
A timeseries containing the frequency line with the amplitude
and phase measured from the data.
"""
template_line = line_model(freq, data, tref=tref)
# Measure amplitude and phase of the line in the data
_, amp, phi = avg_inner_product(data, template_line,
bin_size=bin_size)
return line_model(freq, data, tref=tref, amp=amp, phi=phi)
def calibration_lines(freqs, data, tref=None):
""" Extract the calibration lines from strain data.
Parameters
----------
freqs: list
List containing the frequencies of the calibration lines.
data: pycbc.types.TimeSeries
Strain data to extract the calibration lines from.
tref: {None, float}, optional
Reference time for the line. If None, will use data.start_time.
Returns
-------
data: pycbc.types.TimeSeries
The strain data with the calibration lines removed.
"""
if tref is None:
tref = float(data.start_time)
for freq in freqs:
measured_line = matching_line(freq, data, tref,
bin_size=data.duration)
data -= measured_line.data.real
return data
def clean_data(freqs, data, chunk, avg_bin):
""" Extract time-varying (wandering) lines from strain data.
Parameters
----------
freqs: list
List containing the frequencies of the wandering lines.
data: pycbc.types.TimeSeries
Strain data to extract the wandering lines from.
chunk: float
Duration of the chunks the data will be divided into to account
for the time variation of the wandering lines. Should be smaller
than data.duration, and allow for at least a few chunks.
avg_bin: float
Duration of the bins each chunk will be divided into for averaging
the inner product when measuring the parameters of the line. Should
be smaller than chunk.
Returns
-------
data: pycbc.types.TimeSeries
The strain data with the wandering lines removed.
"""
if avg_bin >= chunk:
raise ValueError('The bin size for averaging the inner product '
'must be less than the chunk size.')
if chunk >= data.duration:
raise ValueError('The chunk size must be less than the '
'data duration.')
steps = numpy.arange(0, int(data.duration/chunk)-0.5, 0.5)
seglen = chunk * data.sample_rate
tref = float(data.start_time)
for freq in freqs:
for step in steps:
start, end = int(step*seglen), int((step+1)*seglen)
chunk_line = matching_line(freq, data[start:end],
tref, bin_size=avg_bin)
# Apply hann window on sides of chunk_line to smooth boundaries
# and avoid discontinuities
hann_window = numpy.hanning(len(chunk_line))
apply_hann = TimeSeries(numpy.ones(len(chunk_line)),
delta_t=chunk_line.delta_t,
epoch=chunk_line.start_time)
if step == 0:
apply_hann.data[len(hann_window)/2:] *= \
hann_window[len(hann_window)/2:]
elif step == steps[-1]:
apply_hann.data[:len(hann_window)/2] *= \
hann_window[:len(hann_window)/2]
else:
apply_hann.data *= hann_window
chunk_line.data *= apply_hann.data
data.data[start:end] -= chunk_line.data.real
return data
| 8,123
| 35.760181
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/strain/__init__.py
|
from .recalibrate import CubicSpline, PhysicalModel
from .strain import detect_loud_glitches
from .strain import from_cli, from_cli_single_ifo, from_cli_multi_ifos
from .strain import insert_strain_option_group, insert_strain_option_group_multi_ifo
from .strain import verify_strain_options, verify_strain_options_multi_ifo
from .strain import gate_data, StrainSegments, StrainBuffer
from .gate import add_gate_option_group, gates_from_cli
from .gate import apply_gates_to_td, apply_gates_to_fd, psd_gates_from_cli
models = {
CubicSpline.name: CubicSpline,
PhysicalModel.name: PhysicalModel
}
def read_model_from_config(cp, ifo, section="calibration"):
"""Returns an instance of the calibration model specified in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
ifo : string
The detector (H1, L1) whose model will be loaded.
section : {"calibration", string}
Section name from which to retrieve the model.
Returns
-------
instance
An instance of the calibration model class.
"""
model = cp.get_opt_tag(section, "{}_model".format(ifo.lower()), None)
recalibrator = models[model].from_config(cp, ifo.lower(), section)
return recalibrator
| 1,299
| 31.5
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/strain/gate.py
|
# Copyright (C) 2016 Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Functions for applying gates to data.
"""
from scipy import linalg
from . import strain
def _gates_from_cli(opts, gate_opt):
"""Parses the given `gate_opt` into something understandable by
`strain.gate_data`.
"""
gates = {}
if getattr(opts, gate_opt) is None:
return gates
for gate in getattr(opts, gate_opt):
try:
ifo, central_time, half_dur, taper_dur = gate.split(':')
central_time = float(central_time)
half_dur = float(half_dur)
taper_dur = float(taper_dur)
except ValueError:
raise ValueError("--gate {} not formatted correctly; ".format(
gate) + "see help")
try:
gates[ifo].append((central_time, half_dur, taper_dur))
except KeyError:
gates[ifo] = [(central_time, half_dur, taper_dur)]
return gates
def gates_from_cli(opts):
"""Parses the --gate option into something understandable by
`strain.gate_data`.
"""
return _gates_from_cli(opts, 'gate')
def psd_gates_from_cli(opts):
"""Parses the --psd-gate option into something understandable by
`strain.gate_data`.
"""
return _gates_from_cli(opts, 'psd_gate')
def apply_gates_to_td(strain_dict, gates):
"""Applies the given dictionary of gates to the given dictionary of
strain.
Parameters
----------
strain_dict : dict
Dictionary of time-domain strain, keyed by the ifos.
gates : dict
Dictionary of gates. Keys should be the ifo to apply the data to,
values are a tuple giving the central time of the gate, the half
duration, and the taper duration.
Returns
-------
dict
Dictionary of time-domain strain with the gates applied.
"""
# copy data to new dictionary
outdict = dict(strain_dict.items())
for ifo in gates:
outdict[ifo] = strain.gate_data(outdict[ifo], gates[ifo])
return outdict
def apply_gates_to_fd(stilde_dict, gates):
"""Applies the given dictionary of gates to the given dictionary of
strain in the frequency domain.
Gates are applied by IFFT-ing the strain data to the time domain, applying
the gate, then FFT-ing back to the frequency domain.
Parameters
----------
stilde_dict : dict
Dictionary of frequency-domain strain, keyed by the ifos.
gates : dict
Dictionary of gates. Keys should be the ifo to apply the data to,
values are a tuple giving the central time of the gate, the half
duration, and the taper duration.
Returns
-------
dict
Dictionary of frequency-domain strain with the gates applied.
"""
# copy data to new dictionary
outdict = dict(stilde_dict.items())
# create a time-domin strain dictionary to apply the gates to
strain_dict = dict([[ifo, outdict[ifo].to_timeseries()] for ifo in gates])
# apply gates and fft back to the frequency domain
for ifo,d in apply_gates_to_td(strain_dict, gates).items():
outdict[ifo] = d.to_frequencyseries()
return outdict
def add_gate_option_group(parser):
"""Adds the options needed to apply gates to data.
Parameters
----------
parser : object
ArgumentParser instance.
"""
gate_group = parser.add_argument_group("Options for gating data")
gate_group.add_argument("--gate", nargs="+", type=str,
metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR",
help="Apply one or more gates to the data before "
"filtering.")
gate_group.add_argument("--gate-overwhitened", action="store_true",
help="Overwhiten data first, then apply the "
"gates specified in --gate. Overwhitening "
"allows for sharper tapers to be used, "
"since lines are not blurred.")
gate_group.add_argument("--psd-gate", nargs="+", type=str,
metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR",
help="Apply one or more gates to the data used "
"for computing the PSD. Gates are applied "
"prior to FFT-ing the data for PSD "
"estimation.")
return gate_group
def gate_and_paint(data, lindex, rindex, invpsd, copy=True):
"""Gates and in-paints data.
Parameters
----------
data : TimeSeries
The data to gate.
lindex : int
The start index of the gate.
rindex : int
The end index of the gate.
invpsd : FrequencySeries
The inverse of the PSD.
copy : bool, optional
Copy the data before applying the gate. Otherwise, the gate will
be applied in-place. Default is True.
Returns
-------
TimeSeries :
The gated and in-painted time series.
"""
# Uses the hole-filling method of
# https://arxiv.org/pdf/1908.05644.pdf
# Copy the data and zero inside the hole
if copy:
data = data.copy()
data[lindex:rindex] = 0
# get the over-whitened gated data
tdfilter = invpsd.astype('complex').to_timeseries() * invpsd.delta_t
owhgated_data = (data.to_frequencyseries() * invpsd).to_timeseries()
# remove the projection into the null space
proj = linalg.solve_toeplitz(tdfilter[:(rindex - lindex)],
owhgated_data[lindex:rindex])
data[lindex:rindex] -= proj
data.projslc = (lindex, rindex)
data.proj = proj
return data
| 6,383
| 33.695652
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/neutron_stars/eos_utils.py
|
# Copyright (C) 2022 Francesco Pannarale, Andrew Williamson,
# Samuel Higginbotham
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Utility functions for handling NS equations of state
"""
import os.path
import numpy as np
from scipy.interpolate import interp1d
import lalsimulation as lalsim
from . import NS_SEQUENCES, NS_DATA_DIRECTORY
from .pg_isso_solver import PG_ISSO_solver
def load_ns_sequence(eos_name):
"""
Load the data of an NS non-rotating equilibrium sequence generated
using the equation of state (EOS) chosen by the user.
File format is: grav mass (Msun), baryonic mass (Msun), compactness
Parameters
-----------
eos_name : string
NS equation of state label ('2H' is the only supported
choice at the moment)
Returns
----------
ns_sequence : numpy.array
contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
max_ns_g_mass : float
the maximum NS gravitational mass (in solar masses) in
the sequence (this is the mass of the most massive stable
NS)
"""
ns_sequence_file = os.path.join(
NS_DATA_DIRECTORY, 'equil_{}.dat'.format(eos_name))
if eos_name not in NS_SEQUENCES:
raise NotImplementedError(
f'{eos_name} does not have an implemented NS sequence file! '
f'To implement, the file {ns_sequence_file} must exist and '
'contain: NS gravitational mass (in solar masses), NS baryonic '
'mass (in solar masses), NS compactness (dimensionless)')
ns_sequence = np.loadtxt(ns_sequence_file)
max_ns_g_mass = max(ns_sequence[:, 0])
return (ns_sequence, max_ns_g_mass)
def interp_grav_mass_to_baryon_mass(ns_g_mass, ns_sequence, extrapolate=False):
"""
Determines the baryonic mass of an NS given its gravitational
mass and an NS equilibrium sequence (in solar masses).
Parameters
-----------
ns_g_mass : float
NS gravitational mass (in solar masses)
ns_sequence : numpy.array
Contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
extrapolate : boolean, optional
Invoke extrapolation in scipy.interpolate.interp1d.
Default is False (so ValueError is raised for ns_g_mass out of bounds)
Returns
----------
float
"""
x = ns_sequence[:, 0]
y = ns_sequence[:, 1]
fill_value = "extrapolate" if extrapolate else np.nan
f = interp1d(x, y, fill_value=fill_value)
return f(ns_g_mass)
def interp_grav_mass_to_compactness(ns_g_mass, ns_sequence, extrapolate=False):
"""
Determines the dimensionless compactness parameter of an NS given
its gravitational mass and an NS equilibrium sequence.
Parameters
-----------
ns_g_mass : float
NS gravitational mass (in solar masses)
ns_sequence : numpy.array
Contains the sequence data in the form NS gravitational
mass (in solar masses), NS baryonic mass (in solar
masses), NS compactness (dimensionless)
extrapolate : boolean, optional
Invoke extrapolation in scipy.interpolate.interp1d.
Default is False (so ValueError is raised for ns_g_mass out of bounds)
Returns
----------
float
"""
x = ns_sequence[:, 0]
y = ns_sequence[:, 2]
fill_value = "extrapolate" if extrapolate else np.nan
f = interp1d(x, y, fill_value=fill_value)
return f(ns_g_mass)
def initialize_eos(ns_mass, eos, extrapolate=False):
"""Load an equation of state and return the compactness and baryonic
mass for a given neutron star mass
Parameters
----------
ns_mass : {float, array}
The gravitational mass of the neutron star, in solar masses.
eos : str
Name of the equation of state.
extrapolate : boolean, optional
Invoke extrapolation in scipy.interpolate.interp1d in the low-mass
regime. In the high-mass regime, the maximum NS mass supported by the
equation of state is not allowed to be exceeded. Default is False
(so ValueError is raised whenever ns_mass is out of bounds).
Returns
-------
ns_compactness : float
Compactness parameter of the neutron star.
ns_b_mass : float
Baryonic mass of the neutron star.
"""
if isinstance(ns_mass, np.ndarray):
input_is_array = True
if eos in NS_SEQUENCES:
ns_seq, ns_max = load_ns_sequence(eos)
# Never extrapolate beyond the maximum NS mass allowed by the EOS
try:
if any(ns_mass > ns_max) and input_is_array:
raise ValueError(
f'Maximum NS mass for {eos} is {ns_max}, received masses '
f'up to {max(ns_mass[ns_mass > ns_max])}')
except TypeError:
if ns_mass > ns_max and not input_is_array:
raise ValueError(
f'Maximum NS mass for {eos} is {ns_max}, received '
f'{ns_mass}')
# Interpolate NS compactness and rest mass
ns_compactness = interp_grav_mass_to_compactness(
ns_mass, ns_seq, extrapolate=extrapolate)
ns_b_mass = interp_grav_mass_to_baryon_mass(
ns_mass, ns_seq, extrapolate=extrapolate)
elif eos in lalsim.SimNeutronStarEOSNames:
#eos_obj = lalsim.SimNeutronStarEOSByName(eos)
#eos_fam = lalsim.CreateSimNeutronStarFamily(eos_obj)
#r_ns = lalsim.SimNeutronStarRadius(ns_mass * lal.MSUN_SI, eos_obj)
#ns_compactness = lal.G_SI * ns_mass * lal.MSUN_SI / (r_ns * lal.C_SI**2)
raise NotImplementedError(
'LALSimulation EOS interface not yet implemented!')
else:
raise NotImplementedError(
f'{eos} is not implemented! Available are: '
f'{NS_SEQUENCES + list(lalsim.SimNeutronStarEOSNames)}')
return (ns_compactness, ns_b_mass)
def foucart18(
eta, ns_compactness, ns_b_mass, bh_spin_mag, bh_spin_pol):
"""Function that determines the remnant disk mass of an NS-BH system
using the fit to numerical-relativity results discussed in
`Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018)`_.
.. _Foucart, Hinderer & Nissanke, PRD 98, 081501(R) (2018):
https://doi.org/10.1103/PhysRevD.98.081501
Parameters
----------
eta : {float, array}
The symmetric mass ratio of the system
(note: primary is assumed to be the BH).
ns_compactness : {float, array}
NS compactness parameter.
ns_b_mass : {float, array}
Baryonic mass of the NS.
bh_spin_mag: {float, array}
Dimensionless spin magnitude of the BH.
bh_spin_pol : {float, array}
The tilt angle of the BH spin.
"""
isso = PG_ISSO_solver(bh_spin_mag, bh_spin_pol)
# Fit parameters and tidal correction
alpha = 0.406
beta = 0.139
gamma = 0.255
delta = 1.761
fit = (
alpha / eta ** (1/3) * (1 - 2 * ns_compactness)
- beta * ns_compactness / eta * isso
+ gamma
)
return ns_b_mass * np.where(fit > 0.0, fit, 0.0) ** delta
| 7,931
| 36.415094
| 81
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.