_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q31500 | Detector.time_delay_from_detector | train | def time_delay_from_detector(self, other_detector, right_ascension,
declination, t_gps):
"""Return the time delay from the given to detector for a signal with
the given sky location; i.e. return `t1 - t2` where `t1` is the
arrival time in this detector and `t2` is the arrival time in the
other detector. Note that this would return the same value as
`time_delay_from_earth_center` if `other_detector` was geocentric.
Parameters
----------
other_detector : detector.Detector
A detector instance.
right_ascension : float
The right ascension (in rad) of the signal.
declination : float
The declination (in rad) of the signal.
t_gps : float
The GPS time (in s) of the signal.
Returns
-------
float
The arrival time difference between the detectors.
"""
return self.time_delay_from_location(other_detector.location,
right_ascension,
declination,
t_gps) | python | {
"resource": ""
} |
q31501 | Detector.project_wave | train | def project_wave(self, hp, hc, longitude, latitude, polarization):
"""Return the strain of a waveform as measured by the detector.
Apply the time shift for the given detector relative to the assumed
geocentric frame and apply the antenna patterns to the plus and cross
polarizations.
"""
h_lal = lalsimulation.SimDetectorStrainREAL8TimeSeries(
hp.astype(np.float64).lal(), hc.astype(np.float64).lal(),
longitude, latitude, polarization, self.frDetector)
return TimeSeries(
h_lal.data.data, delta_t=h_lal.deltaT, epoch=h_lal.epoch,
dtype=np.float64, copy=False) | python | {
"resource": ""
} |
q31502 | Detector.optimal_orientation | train | def optimal_orientation(self, t_gps):
"""Return the optimal orientation in right ascension and declination
for a given GPS time.
Parameters
----------
t_gps: float
Time in gps seconds
Returns
-------
ra: float
Right ascension that is optimally oriented for the detector
dec: float
Declination that is optimally oriented for the detector
"""
ra = self.longitude + (self.gmst_estimate(t_gps) % (2.0*np.pi))
dec = self.latitude
return ra, dec | python | {
"resource": ""
} |
q31503 | _read_channel | train | def _read_channel(channel, stream, start, duration):
""" Get channel using lalframe """
channel_type = lalframe.FrStreamGetTimeSeriesType(channel, stream)
read_func = _fr_type_map[channel_type][0]
d_type = _fr_type_map[channel_type][1]
data = read_func(stream, channel, start, duration, 0)
return TimeSeries(data.data.data, delta_t=data.deltaT, epoch=start,
dtype=d_type) | python | {
"resource": ""
} |
q31504 | _is_gwf | train | def _is_gwf(file_path):
"""Test if a file is a frame file by checking if its contents begins with
the magic string 'IGWD'."""
try:
with open(file_path, 'rb') as f:
if f.read(4) == b'IGWD':
return True
except IOError:
pass
return False | python | {
"resource": ""
} |
q31505 | locations_to_cache | train | def locations_to_cache(locations, latest=False):
""" Return a cumulative cache file build from the list of locations
Parameters
----------
locations : list
A list of strings containing files, globs, or cache files used to build
a combined lal cache file object.
latest : Optional, {False, Boolean}
Only return a cache with the most recent frame in the locations.
If false, all results are returned.
Returns
-------
cache : lal.Cache
A cumulative lal cache object containing the files derived from the
list of locations
"""
cum_cache = lal.Cache()
for source in locations:
flist = glob.glob(source)
if latest:
def relaxed_getctime(fn):
# when building a cache from a directory of temporary
# low-latency frames, files might disappear between
# the glob() and getctime() calls
try:
return os.path.getctime(fn)
except OSError:
return 0
flist = [max(flist, key=relaxed_getctime)]
for file_path in flist:
dir_name, file_name = os.path.split(file_path)
_, file_extension = os.path.splitext(file_name)
if file_extension in [".lcf", ".cache"]:
cache = lal.CacheImport(file_path)
elif file_extension == ".gwf" or _is_gwf(file_path):
cache = lalframe.FrOpen(str(dir_name), str(file_name)).cache
else:
raise TypeError("Invalid location name")
cum_cache = lal.CacheMerge(cum_cache, cache)
return cum_cache | python | {
"resource": ""
} |
q31506 | datafind_connection | train | def datafind_connection(server=None):
""" Return a connection to the datafind server
Parameters
-----------
server : {SERVER:PORT, string}, optional
A string representation of the server and port.
The port may be ommitted.
Returns
--------
connection
The open connection to the datafind server.
"""
if server:
datafind_server = server
else:
# Get the server name from the environment
if 'LIGO_DATAFIND_SERVER' in os.environ:
datafind_server = os.environ["LIGO_DATAFIND_SERVER"]
else:
err = "Trying to obtain the ligo datafind server url from "
err += "the environment, ${LIGO_DATAFIND_SERVER}, but that "
err += "variable is not populated."
raise ValueError(err)
# verify authentication options
if not datafind_server.endswith("80"):
cert_file, key_file = glue.datafind.find_credential()
else:
cert_file, key_file = None, None
# Is a port specified in the server URL
dfs_fields = datafind_server.split(':', 1)
server = dfs_fields[0]
port = int(dfs_fields[1]) if len(dfs_fields) == 2 else None
# Open connection to the datafind server
if cert_file and key_file:
connection = glue.datafind.GWDataFindHTTPSConnection(
host=server, port=port, cert_file=cert_file, key_file=key_file)
else:
connection = glue.datafind.GWDataFindHTTPConnection(
host=server, port=port)
return connection | python | {
"resource": ""
} |
q31507 | frame_paths | train | def frame_paths(frame_type, start_time, end_time, server=None, url_type='file'):
"""Return the paths to a span of frame files
Parameters
----------
frame_type : string
The string representation of the frame type (ex. 'H1_ER_C00_L1')
start_time : int
The start time that we need the frames to span.
end_time : int
The end time that we need the frames to span.
server : {None, SERVER:PORT string}, optional
Optional string to specify the datafind server to use. By default an
attempt is made to use a local datafind server.
url_type : string
Returns only frame URLs with a particular scheme or head such
as "file" or "gsiftp". Default is "file", which queries locally
stored frames. Option can be disabled if set to None.
Returns
-------
paths : list of paths
The list of paths to the frame files.
Examples
--------
>>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048)
"""
site = frame_type[0]
connection = datafind_connection(server)
connection.find_times(site, frame_type,
gpsstart=start_time, gpsend=end_time)
cache = connection.find_frame_urls(site, frame_type, start_time, end_time,urltype=url_type)
paths = [entry.path for entry in cache]
return paths | python | {
"resource": ""
} |
q31508 | write_frame | train | def write_frame(location, channels, timeseries):
"""Write a list of time series to a single frame file.
Parameters
----------
location : string
A frame filename.
channels : string or list of strings
Either a string that contains the channel name or a list of channel
name strings.
timeseries: TimeSeries
A TimeSeries or list of TimeSeries, corresponding to the data to be
written to the frame file for a given channel.
"""
# check if a single channel or a list of channels
if type(channels) is list and type(timeseries) is list:
channels = channels
timeseries = timeseries
else:
channels = [channels]
timeseries = [timeseries]
# check that timeseries have the same start and end time
gps_start_times = {series.start_time for series in timeseries}
gps_end_times = {series.end_time for series in timeseries}
if len(gps_start_times) != 1 or len(gps_end_times) != 1:
raise ValueError("Start and end times of TimeSeries must be identical.")
# check that start, end time, and duration are integers
gps_start_time = gps_start_times.pop()
gps_end_time = gps_end_times.pop()
duration = int(gps_end_time - gps_start_time)
if gps_start_time % 1 or gps_end_time % 1:
raise ValueError("Start and end times of TimeSeries must be integer seconds.")
# create frame
frame = lalframe.FrameNew(epoch=gps_start_time, duration=duration,
project='', run=1, frnum=1,
detectorFlags=lal.LALDETECTORTYPE_ABSENT)
for i,tseries in enumerate(timeseries):
# get data type
for seriestype in _fr_type_map.keys():
if _fr_type_map[seriestype][1] == tseries.dtype:
create_series_func = _fr_type_map[seriestype][2]
create_sequence_func = _fr_type_map[seriestype][4]
add_series_func = _fr_type_map[seriestype][5]
break
# add time series to frame
series = create_series_func(channels[i], tseries.start_time,
0, tseries.delta_t, lal.ADCCountUnit,
len(tseries.numpy()))
series.data = create_sequence_func(len(tseries.numpy()))
series.data.data = tseries.numpy()
add_series_func(frame, series)
# write frame
lalframe.FrameWrite(frame, location) | python | {
"resource": ""
} |
q31509 | DataBuffer.update_cache | train | def update_cache(self):
"""Reset the lal cache. This can be used to update the cache if the
result may change due to more files being added to the filesystem,
for example.
"""
cache = locations_to_cache(self.frame_src, latest=True)
stream = lalframe.FrStreamCacheOpen(cache)
self.stream = stream | python | {
"resource": ""
} |
q31510 | DataBuffer._retrieve_metadata | train | def _retrieve_metadata(stream, channel_name):
"""Retrieve basic metadata by reading the first file in the cache
Parameters
----------
stream: lal stream object
Stream containing a channel we want to learn about
channel_name: str
The name of the channel we want to know the dtype and sample rate of
Returns
-------
channel_type: lal type enum
Enum value which indicates the dtype of the channel
sample_rate: int
The sample rate of the data within this channel
"""
lalframe.FrStreamGetVectorLength(channel_name, stream)
channel_type = lalframe.FrStreamGetTimeSeriesType(channel_name, stream)
create_series_func = _fr_type_map[channel_type][2]
get_series_metadata_func = _fr_type_map[channel_type][3]
series = create_series_func(channel_name, stream.epoch, 0, 0,
lal.ADCCountUnit, 0)
get_series_metadata_func(series, stream)
return channel_type, int(1.0/series.deltaT) | python | {
"resource": ""
} |
q31511 | DataBuffer._read_frame | train | def _read_frame(self, blocksize):
"""Try to read the block of data blocksize seconds long
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
data: TimeSeries
TimeSeries containg 'blocksize' seconds of frame data
Raises
------
RuntimeError:
If data cannot be read for any reason
"""
try:
read_func = _fr_type_map[self.channel_type][0]
dtype = _fr_type_map[self.channel_type][1]
data = read_func(self.stream, self.channel_name,
self.read_pos, int(blocksize), 0)
return TimeSeries(data.data.data, delta_t=data.deltaT,
epoch=self.read_pos,
dtype=dtype)
except Exception:
raise RuntimeError('Cannot read {0} frame data'.format(self.channel_name)) | python | {
"resource": ""
} |
q31512 | DataBuffer.update_cache_by_increment | train | def update_cache_by_increment(self, blocksize):
"""Update the internal cache by starting from the first frame
and incrementing.
Guess the next frame file name by incrementing from the first found
one. This allows a pattern to be used for the GPS folder of the file,
which is indicated by `GPSX` where x is the number of digits to use.
Parameters
----------
blocksize: int
Number of seconds to increment the next frame file.
"""
start = float(self.raw_buffer.end_time)
end = float(start + blocksize)
if not hasattr(self, 'dur'):
fname = glob.glob(self.frame_src[0])[0]
fname = os.path.splitext(os.path.basename(fname))[0].split('-')
self.beg = '-'.join([fname[0], fname[1]])
self.ref = int(fname[2])
self.dur = int(fname[3])
fstart = int(self.ref + numpy.floor((start - self.ref) / float(self.dur)) * self.dur)
starts = numpy.arange(fstart, end, self.dur).astype(numpy.int)
keys = []
for s in starts:
pattern = self.increment_update_cache
if 'GPS' in pattern:
n = int(pattern[int(pattern.index('GPS') + 3)])
pattern = pattern.replace('GPS%s' % n, str(s)[0:n])
name = '%s/%s-%s-%s.gwf' % (pattern, self.beg, s, self.dur)
# check that file actually exists, else abort now
if not os.path.exists(name):
logging.info("%s does not seem to exist yet" % name)
raise RuntimeError
keys.append(name)
cache = locations_to_cache(keys)
stream = lalframe.FrStreamCacheOpen(cache)
self.stream = stream
self.channel_type, self.raw_sample_rate = \
self._retrieve_metadata(self.stream, self.channel_name) | python | {
"resource": ""
} |
q31513 | DataBuffer.attempt_advance | train | def attempt_advance(self, blocksize, timeout=10):
""" Attempt to advance the frame buffer. Retry upon failure, except
if the frame file is beyond the timeout limit.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
timeout: {int, 10}, Optional
Number of seconds before giving up on reading a frame
Returns
-------
data: TimeSeries
TimeSeries containg 'blocksize' seconds of frame data
"""
if self.force_update_cache:
self.update_cache()
try:
if self.increment_update_cache:
self.update_cache_by_increment(blocksize)
return DataBuffer.advance(self, blocksize)
except RuntimeError:
if lal.GPSTimeNow() > timeout + self.raw_buffer.end_time:
# The frame is not there and it should be by now, so we give up
# and treat it as zeros
DataBuffer.null_advance(self, blocksize)
return None
else:
# I am too early to give up on this frame, so we should try again
time.sleep(1)
return self.attempt_advance(blocksize, timeout=timeout) | python | {
"resource": ""
} |
q31514 | StatusBuffer.check_valid | train | def check_valid(self, values, flag=None):
"""Check if the data contains any non-valid status information
Parameters
----------
values: pycbc.types.Array
Array of status information
flag: str, optional
Override the default valid mask with a user defined mask.
Returns
-------
status: boolean
Returns True if all of the status information if valid,
False if any is not.
"""
if self.valid_on_zero:
valid = values.numpy() == 0
else:
if flag is None:
flag = self.valid_mask
valid = numpy.bitwise_and(values.numpy(), flag) == flag
return bool(numpy.all(valid)) | python | {
"resource": ""
} |
q31515 | StatusBuffer.is_extent_valid | train | def is_extent_valid(self, start_time, duration, flag=None):
"""Check if the duration contains any non-valid frames
Parameters
----------
start_time: int
Beginning of the duration to check in gps seconds
duration: int
Number of seconds after the start_time to check
flag: str, optional
Override the default valid mask with a user defined mask.
Returns
-------
status: boolean
Returns True if all of the status information if valid,
False if any is not.
"""
sr = self.raw_buffer.sample_rate
s = int((start_time - self.raw_buffer.start_time) * sr)
e = s + int(duration * sr) + 1
data = self.raw_buffer[s:e]
return self.check_valid(data, flag=flag) | python | {
"resource": ""
} |
q31516 | StatusBuffer.indices_of_flag | train | def indices_of_flag(self, start_time, duration, times, padding=0):
""" Return the indices of the times lying in the flagged region
Parameters
----------
start_time: int
Beginning time to request for
duration: int
Number of seconds to check.
padding: float
Number of seconds to add around flag inactive times to be considered
inactive as well.
Returns
-------
indices: numpy.ndarray
Array of indices marking the location of triggers within valid
time.
"""
from pycbc.events.veto import indices_outside_times
sr = self.raw_buffer.sample_rate
s = int((start_time - self.raw_buffer.start_time - padding) * sr) - 1
e = s + int((duration + padding) * sr) + 1
data = self.raw_buffer[s:e]
stamps = data.sample_times.numpy()
if self.valid_on_zero:
invalid = data.numpy() != 0
else:
invalid = numpy.bitwise_and(data.numpy(), self.valid_mask) \
!= self.valid_mask
starts = stamps[invalid] - padding
ends = starts + 1.0 / sr + padding * 2.0
idx = indices_outside_times(times, starts, ends)
return idx | python | {
"resource": ""
} |
q31517 | snr_series_to_xml | train | def snr_series_to_xml(snr_series, document, sngl_inspiral_id):
"""Save an SNR time series into an XML document, in a format compatible
with BAYESTAR.
"""
snr_lal = snr_series.lal()
snr_lal.name = 'snr'
snr_lal.sampleUnits = ''
snr_xml = _build_series(snr_lal, (u'Time', u'Time,Real,Imaginary'), None,
'deltaT', 's')
snr_node = document.childNodes[-1].appendChild(snr_xml)
eid_param = ligolw_param.Param.build(u'event_id', u'ilwd:char',
sngl_inspiral_id)
snr_node.appendChild(eid_param) | python | {
"resource": ""
} |
q31518 | make_psd_xmldoc | train | def make_psd_xmldoc(psddict, xmldoc=None):
"""Add a set of PSDs to a LIGOLW XML document. If the document is not
given, a new one is created first.
"""
xmldoc = ligolw.Document() if xmldoc is None else xmldoc.childNodes[0]
# the PSDs must be children of a LIGO_LW with name "psd"
root_name = u"psd"
Attributes = ligolw.sax.xmlreader.AttributesImpl
lw = xmldoc.appendChild(
ligolw.LIGO_LW(Attributes({u"Name": root_name})))
for instrument, psd in psddict.items():
xmlseries = _build_series(psd, (u"Frequency,Real", u"Frequency"),
None, 'deltaF', 's^-1')
fs = lw.appendChild(xmlseries)
fs.appendChild(ligolw_param.Param.from_pyvalue(u"instrument",
instrument))
return xmldoc | python | {
"resource": ""
} |
q31519 | SingleCoincForGraceDB.save | train | def save(self, filename):
"""Write this trigger to gracedb compatible xml format
Parameters
----------
filename: str
Name of file to write to disk.
"""
gz = filename.endswith('.gz')
ligolw_utils.write_filename(self.outdoc, filename, gz=gz) | python | {
"resource": ""
} |
q31520 | get_cosmology | train | def get_cosmology(cosmology=None, **kwargs):
r"""Gets an astropy cosmology class.
Parameters
----------
cosmology : str or astropy.cosmology.FlatLambdaCDM, optional
The name of the cosmology to use. For the list of options, see
:py:attr:`astropy.cosmology.parameters.available`. If None, and no
other keyword arguments are provided, will default to
:py:attr:`DEFAULT_COSMOLOGY`. If an instance of
:py:class:`astropy.cosmology.FlatLambdaCDM`, will just return that.
\**kwargs :
If any other keyword arguments are provided they will be passed to
:py:attr:`astropy.cosmology.FlatLambdaCDM` to create a custom
cosmology.
Returns
-------
astropy.cosmology.FlatLambdaCDM
The cosmology to use.
Examples
--------
Use the default:
>>> from pycbc.cosmology import get_cosmology
>>> get_cosmology()
FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307,
Tcmb0=2.725 K, Neff=3.05, m_nu=[0. 0. 0.06] eV,
Ob0=0.0486)
Use properties measured by WMAP instead:
>>> get_cosmology("WMAP9")
FlatLambdaCDM(name="WMAP9", H0=69.3 km / (Mpc s), Om0=0.286, Tcmb0=2.725 K,
Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.0463)
Create your own cosmology (see :py:class:`astropy.cosmology.FlatLambdaCDM`
for details on the default values used):
>>> get_cosmology(H0=70., Om0=0.3)
FlatLambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Tcmb0=0 K, Neff=3.04, m_nu=None,
Ob0=None)
"""
if kwargs and cosmology is not None:
raise ValueError("if providing custom cosmological parameters, do "
"not provide a `cosmology` argument")
if isinstance(cosmology, astropy.cosmology.FlatLambdaCDM):
# just return
return cosmology
if kwargs:
cosmology = astropy.cosmology.FlatLambdaCDM(**kwargs)
else:
if cosmology is None:
cosmology = DEFAULT_COSMOLOGY
if cosmology not in astropy.cosmology.parameters.available:
raise ValueError("unrecognized cosmology {}".format(cosmology))
cosmology = getattr(astropy.cosmology, cosmology)
return cosmology | python | {
"resource": ""
} |
q31521 | z_at_value | train | def z_at_value(func, fval, unit, zmax=1000., **kwargs):
r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays.
Getting a z for a cosmological quantity involves numerically inverting
``func``. The ``zmax`` argument sets how large of a z to guess (see
:py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than
``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still
is not large enough, will just return ``numpy.inf``.
Parameters
----------
func : function or method
A function that takes redshift as input.
fval : float
The value of ``func(z)``.
unit : astropy.unit
The unit of ``fval``.
zmax : float, optional
The initial maximum search limit for ``z``. Default is 1000.
\**kwargs :
All other keyword arguments are passed to
:py:func:``astropy.cosmology.z_at_value``.
Returns
-------
float
The redshift at the requested values.
"""
fval, input_is_array = ensurearray(fval)
# make sure fval is atleast 1D
if fval.size == 1 and fval.ndim == 0:
fval = fval.reshape(1)
zs = numpy.zeros(fval.shape, dtype=float) # the output array
for (ii, val) in enumerate(fval):
try:
zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax,
**kwargs)
except CosmologyError:
# we'll get this if the z was larger than zmax; in that case we'll
# try bumping up zmax later to get a value
zs[ii] = numpy.inf
# check if there were any zs > zmax
replacemask = numpy.isinf(zs)
# try bumping up zmax to get a result
if replacemask.any():
# we'll keep bumping up the maxz until we can get a result
counter = 0 # to prevent running forever
while replacemask.any():
kwargs['zmin'] = zmax
zmax = 10 * zmax
idx = numpy.where(replacemask)
for ii in idx:
val = fval[ii]
try:
zs[ii] = astropy.cosmology.z_at_value(
func, val*unit, zmax=zmax, **kwargs)
replacemask[ii] = False
except CosmologyError:
# didn't work, try on next loop
pass
counter += 1
if counter == 5:
# give up and warn the user
logging.warning("One or more values correspond to a "
"redshift > {0:.1e}. The redshift for these "
"have been set to inf. If you would like "
"better precision, call God.".format(zmax))
break
return formatreturn(zs, input_is_array) | python | {
"resource": ""
} |
q31522 | _redshift | train | def _redshift(distance, **kwargs):
r"""Uses astropy to get redshift from the given luminosity distance.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given luminosity distance.
"""
cosmology = get_cosmology(**kwargs)
return z_at_value(cosmology.luminosity_distance, distance, units.Mpc) | python | {
"resource": ""
} |
q31523 | redshift | train | def redshift(distance, **kwargs):
r"""Returns the redshift associated with the given luminosity distance.
If the requested cosmology is one of the pre-defined ones in
:py:attr:`astropy.cosmology.parameters.available`, :py:class:`DistToZ` is
used to provide a fast interpolation. This takes a few seconds to setup
on the first call.
Parameters
----------
distance : float
The luminosity distance, in Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift corresponding to the given distance.
"""
cosmology = get_cosmology(**kwargs)
try:
z = _d2zs[cosmology.name](distance)
except KeyError:
# not a standard cosmology, call the redshift function
z = _redshift(distance, cosmology=cosmology)
return z | python | {
"resource": ""
} |
q31524 | redshift_from_comoving_volume | train | def redshift_from_comoving_volume(vc, **kwargs):
r"""Returns the redshift from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The redshift at the given comoving volume.
"""
cosmology = get_cosmology(**kwargs)
return z_at_value(cosmology.comoving_volume, vc, units.Mpc**3) | python | {
"resource": ""
} |
q31525 | distance_from_comoving_volume | train | def distance_from_comoving_volume(vc, **kwargs):
r"""Returns the luminosity distance from the given comoving volume.
Parameters
----------
vc : float
The comoving volume, in units of cubed Mpc.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
float :
The luminosity distance at the given comoving volume.
"""
cosmology = get_cosmology(**kwargs)
z = redshift_from_comoving_volume(vc, cosmology=cosmology)
return cosmology.luminosity_distance(z).value | python | {
"resource": ""
} |
q31526 | DistToZ.get_redshift | train | def get_redshift(self, dist):
"""Returns the redshift for the given distance.
"""
dist, input_is_array = ensurearray(dist)
try:
zs = self.nearby_d2z(dist)
except TypeError:
# interpolant hasn't been setup yet
self.setup_interpolant()
zs = self.nearby_d2z(dist)
# if any points had red shifts beyond the nearby, will have nans;
# replace using the faraway interpolation
replacemask = numpy.isnan(zs)
if replacemask.any():
zs[replacemask] = self.faraway_d2z(dist[replacemask])
replacemask = numpy.isnan(zs)
# if we still have nans, means that some distances are beyond our
# furthest default; fall back to using astropy
if replacemask.any():
# well... check that the distance is positive and finite first
if not (dist > 0.).all() and numpy.isfinite(dist).all():
raise ValueError("distance must be finite and > 0")
zs[replacemask] = _redshift(dist[replacemask],
cosmology=self.cosmology)
return formatreturn(zs, input_is_array) | python | {
"resource": ""
} |
q31527 | EmceeFile.write_posterior | train | def write_posterior(self, filename, **kwargs):
"""Write posterior only file
Parameters
----------
filename : str
Name of output file to store posterior
"""
f = h5py.File(filename, 'w')
# Preserve top-level metadata
for key in self.attrs:
f.attrs[key] = self.attrs[key]
f.attrs['filetype'] = PosteriorFile.name
s = f.create_group('samples')
fields = self[self.samples_group].keys()
# Copy and squash fields into one dimensional arrays
for field_name in fields:
fvalue = self[self.samples_group][field_name][:]
thin = fvalue[:,self.thin_start:self.thin_end:self.thin_interval]
s[field_name] = thin.flatten() | python | {
"resource": ""
} |
q31528 | geweke | train | def geweke(x, seg_length, seg_stride, end_idx, ref_start,
ref_end=None, seg_start=0):
""" Calculates Geweke conervergence statistic for a chain of data.
This function will advance along the chain and calculate the
statistic for each step.
Parameters
----------
x : numpy.array
A one-dimensional array of data.
seg_length : int
Number of samples to use for each Geweke calculation.
seg_stride : int
Number of samples to advance before next Geweke calculation.
end_idx : int
Index of last start.
ref_start : int
Index of beginning of end reference segment.
ref_end : int
Index of end of end reference segment. Default is None which
will go to the end of the data array.
seg_start : int
What index to start computing the statistic. Default is 0 which
will go to the beginning of the data array.
Returns
-------
starts : numpy.array
The start index of the first segment in the chain.
ends : numpy.array
The end index of the first segment in the chain.
stats : numpy.array
The Geweke convergence diagnostic statistic for the segment.
"""
# lists to hold statistic and end index
stats = []
ends = []
# get the beginning of all segments
starts = numpy.arange(seg_start, end_idx, seg_stride)
# get second segment of data at the end to compare
x_end = x[ref_start:ref_end]
# loop over all segments
for start in starts:
# find the end of the first segment
x_start_end = int(start + seg_length)
# get first segment
x_start = x[start:x_start_end]
# compute statistic
stats.append((x_start.mean() - x_end.mean()) / numpy.sqrt(
x_start.var() + x_end.var()))
# store end of first segment
ends.append(x_start_end)
return numpy.array(starts), numpy.array(ends), numpy.array(stats) | python | {
"resource": ""
} |
q31529 | insert_fft_option_group | train | def insert_fft_option_group(parser):
"""
Adds the options used to choose an FFT backend. This should be used
if your program supports the ability to select the FFT backend; otherwise
you may simply call the fft and ifft functions and rely on default
choices. This function will also attempt to add any options exported
by available backends through a function called insert_fft_options.
These submodule functions should take the fft_group object as argument.
Parameters
----------
parser : object
OptionParser instance
"""
fft_group = parser.add_argument_group("Options for selecting the"
" FFT backend and controlling its performance"
" in this program.")
# We have one argument to specify the backends. This becomes the default list used
# if none is specified for a particular call of fft() of ifft(). Note that this
# argument expects a *list* of inputs, as indicated by the nargs='*'.
fft_group.add_argument("--fft-backends",
help="Preference list of the FFT backends. "
"Choices are: \n" + str(get_backend_names()),
nargs='*', default=[])
for backend in get_backend_modules():
try:
backend.insert_fft_options(fft_group)
except AttributeError:
pass | python | {
"resource": ""
} |
q31530 | set_grb_start_end | train | def set_grb_start_end(cp, start, end):
"""
Function to update analysis boundaries as workflow is generated
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
start : int
The start of the workflow analysis time.
end : int
The end of the workflow analysis time.
Returns
--------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The modified WorkflowConfigParser object.
"""
cp.set("workflow", "start-time", str(start))
cp.set("workflow", "end-time", str(end))
return cp | python | {
"resource": ""
} |
q31531 | get_coh_PTF_files | train | def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):
"""
Retrieve files needed to run coh_PTF jobs within a PyGRB workflow
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
run_dir : str
The run directory, destination for retrieved files.
bank_veto : Boolean
If true, will retrieve the bank_veto_bank.xml file.
summary_files : Boolean
If true, will retrieve the summary page style files.
Returns
-------
file_list : pycbc.workflow.FileList object
A FileList containing the retrieved files.
"""
if os.getenv("LAL_SRC") is None:
raise ValueError("The environment variable LAL_SRC must be set to a "
"location containing the file lalsuite.git")
else:
lalDir = os.getenv("LAL_SRC")
sci_seg = segments.segment(int(cp.get("workflow", "start-time")),
int(cp.get("workflow", "end-time")))
file_list = FileList([])
# Bank veto
if bank_veto:
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"bank_veto_bank.xml" % lalDir, "%s" % run_dir)
bank_veto_url = "file://localhost%s/bank_veto_bank.xml" % run_dir
bank_veto = File(ifos, "bank_veto_bank", sci_seg,
file_url=bank_veto_url)
bank_veto.PFN(bank_veto.cache_entry.path, site="local")
file_list.extend(FileList([bank_veto]))
if summary_files:
# summary.js file
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"coh_PTF_html_summary.js" % lalDir, "%s" % run_dir)
summary_js_url = "file://localhost%s/coh_PTF_html_summary.js" \
% run_dir
summary_js = File(ifos, "coh_PTF_html_summary_js", sci_seg,
file_url=summary_js_url)
summary_js.PFN(summary_js.cache_entry.path, site="local")
file_list.extend(FileList([summary_js]))
# summary.css file
shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
"coh_PTF_html_summary.css" % lalDir, "%s" % run_dir)
summary_css_url = "file://localhost%s/coh_PTF_html_summary.css" \
% run_dir
summary_css = File(ifos, "coh_PTF_html_summary_css", sci_seg,
file_url=summary_css_url)
summary_css.PFN(summary_css.cache_entry.path, site="local")
file_list.extend(FileList([summary_css]))
return file_list | python | {
"resource": ""
} |
q31532 | make_exttrig_file | train | def make_exttrig_file(cp, ifos, sci_seg, out_dir):
'''
Make an ExtTrig xml file containing information on the external trigger
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
sci_seg : ligo.segments.segment
The science segment for the analysis run.
out_dir : str
The output directory, destination for xml file.
Returns
-------
xml_file : pycbc.workflow.File object
The xml file with external trigger information.
'''
# Initialise objects
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
tbl = lsctables.New(lsctables.ExtTriggersTable)
cols = tbl.validcolumns
xmldoc.childNodes[-1].appendChild(tbl)
row = tbl.appendRow()
# Add known attributes for this GRB
setattr(row, "event_ra", float(cp.get("workflow", "ra")))
setattr(row, "event_dec", float(cp.get("workflow", "dec")))
setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))
# Fill in all empty rows
for entry in cols.keys():
if not hasattr(row, entry):
if cols[entry] in ['real_4','real_8']:
setattr(row,entry,0.)
elif cols[entry] == 'int_4s':
setattr(row,entry,0)
elif cols[entry] == 'lstring':
setattr(row,entry,'')
elif entry == 'process_id':
row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
elif entry == 'event_id':
row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
else:
print("Column %s not recognized" %(entry), file=sys.stderr)
raise ValueError
# Save file
xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
"trigger-name"))
xml_file_path = os.path.join(out_dir, xml_file_name)
utils.write_filename(xmldoc, xml_file_path)
xml_file_url = urlparse.urljoin("file:", urllib.pathname2url(xml_file_path))
xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
xml_file.PFN(xml_file_url, site="local")
return xml_file | python | {
"resource": ""
} |
q31533 | get_ipn_sky_files | train | def get_ipn_sky_files(workflow, file_url, tags=None):
'''
Retreive the sky point files for searching over the IPN error box and
populating it with injections.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
file_url : string
The URL of the IPN sky points file.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
sky_points_file : pycbc.workflow.core.File
File object representing the IPN sky points file.
'''
tags = tags or []
ipn_sky_points = resolve_url(file_url)
sky_points_url = urlparse.urljoin("file:",
urllib.pathname2url(ipn_sky_points))
sky_points_file = File(workflow.ifos, "IPN_SKY_POINTS",
workflow.analysis_time, file_url=sky_points_url, tags=tags)
sky_points_file.PFN(sky_points_url, site="local")
return sky_points_file | python | {
"resource": ""
} |
q31534 | make_gating_node | train | def make_gating_node(workflow, datafind_files, outdir=None, tags=None):
'''
Generate jobs for autogating the data for PyGRB runs.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
datafind_files : pycbc.workflow.core.FileList
A FileList containing the frame files to be gated.
outdir : string
Path of the output directory
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
condition_strain_nodes : list
List containing the pycbc.workflow.core.Node objects representing the
autogating jobs.
condition_strain_outs : pycbc.workflow.core.FileList
FileList containing the pycbc.workflow.core.File objects representing
the gated frame files.
'''
cp = workflow.cp
if tags is None:
tags = []
condition_strain_class = select_generic_executable(workflow,
"condition_strain")
condition_strain_nodes = []
condition_strain_outs = FileList([])
for ifo in workflow.ifos:
input_files = FileList([datafind_file for datafind_file in \
datafind_files if datafind_file.ifo == ifo])
condition_strain_jobs = condition_strain_class(cp, "condition_strain",
ifo=ifo, out_dir=outdir, tags=tags)
condition_strain_node, condition_strain_out = \
condition_strain_jobs.create_node(input_files, tags=tags)
condition_strain_nodes.append(condition_strain_node)
condition_strain_outs.extend(FileList([condition_strain_out]))
return condition_strain_nodes, condition_strain_outs | python | {
"resource": ""
} |
q31535 | create_waveform_generator | train | def create_waveform_generator(variable_params, data,
recalibration=None, gates=None,
**static_params):
"""Creates a waveform generator for use with a model.
Parameters
----------
variable_params : list of str
The names of the parameters varied.
data : dict
Dictionary mapping detector names to either a
:py:class:`<pycbc.types.TimeSeries TimeSeries>` or
:py:class:`<pycbc.types.FrequencySeries FrequencySeries>`.
recalibration : dict, optional
Dictionary mapping detector names to
:py:class:`<pycbc.calibration.Recalibrate>` instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`.
Returns
-------
pycbc.waveform.FDomainDetFrameGenerator
A waveform generator for frequency domain generation.
"""
# figure out what generator to use based on the approximant
try:
approximant = static_params['approximant']
except KeyError:
raise ValueError("no approximant provided in the static args")
generator_function = generator.select_waveform_generator(approximant)
# get data parameters; we'll just use one of the data to get the
# values, then check that all the others are the same
delta_f = None
for d in data.values():
if delta_f is None:
delta_f = d.delta_f
delta_t = d.delta_t
start_time = d.start_time
else:
if not all([d.delta_f == delta_f, d.delta_t == delta_t,
d.start_time == start_time]):
raise ValueError("data must all have the same delta_t, "
"delta_f, and start_time")
waveform_generator = generator.FDomainDetFrameGenerator(
generator_function, epoch=start_time,
variable_args=variable_params, detectors=list(data.keys()),
delta_f=delta_f, delta_t=delta_t,
recalib=recalibration, gates=gates,
**static_params)
return waveform_generator | python | {
"resource": ""
} |
q31536 | low_frequency_cutoff_from_config | train | def low_frequency_cutoff_from_config(cp):
"""Gets the low frequency cutoff from the given config file.
This looks for ``low-frequency-cutoff`` in the ``[model]`` section and
casts it to float. If none is found, or the casting to float fails, an
error is raised.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
Returns
-------
float :
The low frequency cutoff.
"""
try:
low_frequency_cutoff = float(
cp.get('model', 'low-frequency-cutoff'))
except (NoOptionError, NoSectionError) as e:
logging.warning("Low frequency cutoff for calculation of inner "
"product needs to be specified in config file "
"under section 'model'")
raise e
except Exception as e:
# everything the float() can throw
logging.warning("Low frequency cutoff could not be "
"converted to float ")
raise e
return low_frequency_cutoff | python | {
"resource": ""
} |
q31537 | high_frequency_cutoff_from_config | train | def high_frequency_cutoff_from_config(cp):
"""Gets the high frequency cutoff from the given config file.
This looks for ``high-frequency-cutoff`` in the ``[model]`` section and
casts it to float. If none is found, will just return ``None``.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
Returns
-------
float or None :
The high frequency cutoff.
"""
if cp.has_option('model', 'high-frequency-cutoff'):
high_frequency_cutoff = float(
cp.get('model', 'high-frequency-cutoff'))
else:
high_frequency_cutoff = None
return high_frequency_cutoff | python | {
"resource": ""
} |
q31538 | GaussianNoise._lognl | train | def _lognl(self):
"""Computes the log likelihood assuming the data is noise.
Since this is a constant for Gaussian noise, this is only computed once
then stored.
"""
try:
return self.__lognl
except AttributeError:
det_lognls = {}
for (det, d) in self._data.items():
kmin = self._kmin
kmax = self._kmax
det_lognls[det] = -0.5 * d[kmin:kmax].inner(d[kmin:kmax]).real
self.__det_lognls = det_lognls
self.__lognl = sum(det_lognls.values())
return self.__lognl | python | {
"resource": ""
} |
q31539 | GaussianNoise._nowaveform_loglr | train | def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
for det in self._data:
setattr(self._current_stats, 'loglikelihood', -numpy.inf)
setattr(self._current_stats, '{}_cplx_loglr'.format(det),
-numpy.inf)
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf | python | {
"resource": ""
} |
q31540 | GaussianNoise.det_lognl | train | def det_lognl(self, det):
"""Returns the log likelihood of the noise in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The log likelihood of the noise in the requested detector.
"""
try:
return self.__det_lognls[det]
except AttributeError:
# hasn't been calculated yet, call lognl to calculate & store
self._lognl()
# now try returning
return self.__det_lognls[det] | python | {
"resource": ""
} |
q31541 | GaussianNoise.det_cplx_loglr | train | def det_cplx_loglr(self, det):
"""Returns the complex log likelihood ratio in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
complex float :
The complex log likelihood ratio.
"""
# try to get it from current stats
try:
return getattr(self._current_stats, '{}_cplx_loglr'.format(det))
except AttributeError:
# hasn't been calculated yet; call loglr to do so
self._loglr()
# now try returning again
return getattr(self._current_stats, '{}_cplx_loglr'.format(det)) | python | {
"resource": ""
} |
q31542 | GaussianNoise.det_optimal_snrsq | train | def det_optimal_snrsq(self, det):
"""Returns the opitmal SNR squared in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The opimtal SNR squared.
"""
# try to get it from current stats
try:
return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
except AttributeError:
# hasn't been calculated yet; call loglr to do so
self._loglr()
# now try returning again
return getattr(self._current_stats, '{}_optimal_snrsq'.format(det)) | python | {
"resource": ""
} |
q31543 | GaussianNoise.write_metadata | train | def write_metadata(self, fp):
"""Adds writing the psds and lognl, since it's a constant.
The lognl is written to the sample group's ``attrs``.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
"""
super(GaussianNoise, self).write_metadata(fp)
fp.attrs['low_frequency_cutoff'] = self.low_frequency_cutoff
if self.high_frequency_cutoff is not None:
fp.attrs['high_frequency_cutoff'] = self.high_frequency_cutoff
if self._psds is not None:
fp.write_psd(self._psds)
try:
attrs = fp[fp.samples_group].attrs
except KeyError:
# group doesn't exist, create it
fp.create_group(fp.samples_group)
attrs = fp[fp.samples_group].attrs
attrs['lognl'] = self.lognl
for det in self.detectors:
attrs['{}_lognl'.format(det)] = self.det_lognl(det) | python | {
"resource": ""
} |
q31544 | GaussianNoise.from_config | train | def from_config(cls, cp, **kwargs):
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
args = cls._init_args_from_config(cp)
args['low_frequency_cutoff'] = low_frequency_cutoff_from_config(cp)
args['high_frequency_cutoff'] = high_frequency_cutoff_from_config(cp)
# get any other keyword arguments provided in the model section
ignore_args = ['name', 'low-frequency-cutoff', 'high-frequency-cutoff']
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
args.update(kwargs)
return cls(**args) | python | {
"resource": ""
} |
q31545 | loadfile | train | def loadfile(path, mode=None, filetype=None, **kwargs):
"""Loads the given file using the appropriate InferenceFile class.
If ``filetype`` is not provided, this will try to retreive the ``filetype``
from the file's ``attrs``. If the file does not exist yet, an IOError will
be raised if ``filetype`` is not provided.
Parameters
----------
path : str
The filename to load.
mode : str, optional
What mode to load the file with, e.g., 'w' for write, 'r' for read,
'a' for append. Default will default to h5py.File's mode, which is 'a'.
filetype : str, optional
Force the file to be loaded with the given class name. This must be
provided if creating a new file.
Returns
-------
filetype instance
An open file handler to the file. The class used for IO with the file
is determined by the ``filetype`` keyword (if provided) or the
``filetype`` stored in the file (if not provided).
"""
if filetype is None:
# try to read the file to get its filetype
try:
fileclass = get_file_type(path)
except IOError:
# file doesn't exist, filetype must be provided
raise IOError("The file appears not to exist. In this case, "
"filetype must be provided.")
else:
fileclass = filetypes[filetype]
return fileclass(path, mode=mode, **kwargs) | python | {
"resource": ""
} |
q31546 | check_integrity | train | def check_integrity(filename):
"""Checks the integrity of an InferenceFile.
Checks done are:
* can the file open?
* do all of the datasets in the samples group have the same shape?
* can the first and last sample in all of the datasets in the samples
group be read?
If any of these checks fail, an IOError is raised.
Parameters
----------
filename: str
Name of an InferenceFile to check.
Raises
------
ValueError
If the given file does not exist.
KeyError
If the samples group does not exist.
IOError
If any of the checks fail.
"""
# check that the file exists
if not os.path.exists(filename):
raise ValueError("file {} does not exist".format(filename))
# if the file is corrupted such that it cannot be opened, the next line
# will raise an IOError
with loadfile(filename, 'r') as fp:
# check that all datasets in samples have the same shape
parameters = fp[fp.samples_group].keys()
# but only do the check if parameters have been written
if len(parameters) > 0:
group = fp.samples_group + '/{}'
# use the first parameter as a reference shape
ref_shape = fp[group.format(parameters[0])].shape
if not all(fp[group.format(param)].shape == ref_shape
for param in parameters):
raise IOError("not all datasets in the samples group have the "
"same shape")
# check that we can read the first/last sample
firstidx = tuple([0]*len(ref_shape))
lastidx = tuple([-1]*len(ref_shape))
for param in parameters:
_ = fp[group.format(param)][firstidx]
_ = fp[group.format(param)][lastidx] | python | {
"resource": ""
} |
q31547 | get_common_parameters | train | def get_common_parameters(input_files, collection=None):
"""Gets a list of variable params that are common across all input files.
If no common parameters are found, a ``ValueError`` is raised.
Parameters
----------
input_files : list of str
List of input files to load.
collection : str, optional
What group of parameters to load. Can be the name of a list of
parameters stored in the files' attrs (e.g., "variable_params"), or
"all". If "all", will load all of the parameters in the files'
samples group. Default is to load all.
Returns
-------
list :
List of the parameter names.
"""
if collection is None:
collection = "all"
parameters = []
for fn in input_files:
fp = loadfile(fn, 'r')
if collection == 'all':
ps = fp[fp.samples_group].keys()
else:
ps = fp.attrs[collection]
parameters.append(set(ps))
fp.close()
parameters = list(set.intersection(*parameters))
if parameters == []:
raise ValueError("no common parameters found for collection {} in "
"files {}".format(collection, ', '.join(input_files)))
return parameters | python | {
"resource": ""
} |
q31548 | results_from_cli | train | def results_from_cli(opts, load_samples=True, **kwargs):
"""Loads an inference result file along with any labels associated with it
from the command line options.
Parameters
----------
opts : ArgumentParser options
The options from the command line.
load_samples : bool, optional
Load the samples from the file.
Returns
-------
fp_all : (list of) BaseInferenceFile type
The result file as an hdf file. If more than one input file,
then it returns a list.
parameters : list of str
List of the parameters to use, parsed from the parameters option.
labels : dict
Dictionary of labels to associate with the parameters.
samples_all : (list of) FieldArray(s) or None
If load_samples, the samples as a FieldArray; otherwise, None.
If more than one input file, then it returns a list.
\**kwargs :
Any other keyword arguments that are passed to read samples using
samples_from_cli
"""
# lists for files and samples from all input files
fp_all = []
samples_all = []
input_files = opts.input_file
if isinstance(input_files, str):
input_files = [input_files]
# loop over all input files
for input_file in input_files:
logging.info("Reading input file %s", input_file)
# read input file
fp = loadfile(input_file, "r")
# load the samples
if load_samples:
logging.info("Loading samples")
# check if need extra parameters for a non-sampling parameter
file_parameters, ts = _transforms.get_common_cbc_transforms(
opts.parameters, fp.variable_params)
# read samples from file
samples = fp.samples_from_cli(opts, parameters=file_parameters, **kwargs)
logging.info("Using {} samples".format(samples.size))
# add parameters not included in file
samples = _transforms.apply_transforms(samples, ts)
# else do not read samples
else:
samples = None
# add results to lists from all input files
if len(input_files) > 1:
fp_all.append(fp)
samples_all.append(samples)
# else only one input file then do not return lists
else:
fp_all = fp
samples_all = samples
return fp_all, opts.parameters, opts.parameters_labels, samples_all | python | {
"resource": ""
} |
q31549 | ResultsArgumentParser._unset_required | train | def _unset_required(self):
"""Convenience function to turn off required arguments for first parse.
"""
self._required_args = [act for act in self._actions if act.required]
for act in self._required_args:
act.required = False | python | {
"resource": ""
} |
q31550 | ResultsArgumentParser.parse_known_args | train | def parse_known_args(self, args=None, namespace=None):
"""Parse args method to handle input-file dependent arguments."""
# run parse args once to make sure the name space is populated with
# the input files. We'll turn off raising NoInputFileErrors on this
# pass
self.no_input_file_err = True
self._unset_required()
opts, extra_opts = super(ResultsArgumentParser, self).parse_known_args(
args, namespace)
# now do it again
self.no_input_file_err = False
self._reset_required()
opts, extra_opts = super(ResultsArgumentParser, self).parse_known_args(
args, opts)
# populate the parameters option if it wasn't specified
if opts.parameters is None:
parameters = get_common_parameters(opts.input_file,
collection='variable_params')
# now call parse parameters action to populate the namespace
self.actions['parameters'](self, opts, parameters)
# parse the sampler-specific options and check for any unknowns
unknown = []
for fn in opts.input_file:
fp = loadfile(fn, 'r')
sampler_parser, _ = fp.extra_args_parser(skip_args=self.skip_args)
if sampler_parser is not None:
opts, still_unknown = sampler_parser.parse_known_args(
extra_opts, namespace=opts)
unknown.append(set(still_unknown))
# the intersection of the unknowns are options not understood by
# any of the files
if len(unknown) > 0:
unknown = set.intersection(*unknown)
return opts, list(unknown) | python | {
"resource": ""
} |
q31551 | ResultsArgumentParser.add_results_option_group | train | def add_results_option_group(self):
"""Adds the options used to call pycbc.inference.io.results_from_cli
function to the parser.
These are options releated to loading the results from a run of
pycbc_inference, for purposes of plotting and/or creating tables.
Any argument strings included in the ``skip_args`` attribute will not
be added.
"""
results_reading_group = self.add_argument_group(
title="Arguments for loading results",
description="Additional, file-specific arguments may also be "
"provided, depending on what input-files are given. See "
"--file-help for details.")
results_reading_group.add_argument(
"--input-file", type=str, required=True, nargs="+",
action=ParseLabelArg, metavar='FILE[:LABEL]',
help="Path to input HDF file(s). A label may be specified for "
"each input file to use for plots when multiple files are "
"specified.")
# advanced help
results_reading_group.add_argument(
"-H", "--file-help",
action=PrintFileParams, skip_args=self.skip_args,
help="Based on the provided input-file(s), print all available "
"parameters that may be retrieved and all possible functions "
"on those parameters. Also print available additional "
"arguments that may be passed. This option is like an "
"advanced --help: if run, the program will just print the "
"information to screen, then exit.")
results_reading_group.add_argument(
"--parameters", type=str, nargs="+", metavar="PARAM[:LABEL]",
action=ParseParametersArg,
help="Name of parameters to load. If none provided will load all "
"of the model params in the input-file. If provided, the "
"parameters can be any of the model params or posterior "
"stats (loglikelihood, logprior, etc.) in the input file(s), "
"derived parameters from them, or any function of them. If "
"multiple files are provided, any parameter common to all "
"files may be used. Syntax for functions is python; any math "
"functions in the numpy libary may be used. Can optionally "
"also specify a LABEL for each parameter. If no LABEL is "
"provided, PARAM will used as the LABEL. If LABEL is the "
"same as a parameter in pycbc.waveform.parameters, the label "
"property of that parameter will be used (e.g., if LABEL "
"were 'mchirp' then {} would be used). To see all possible "
"parameters that may be used with the given input file(s), "
"as well as all avaiable functions, run --file-help, along "
"with one or more input files.".format(
_waveform.parameters.mchirp.label))
return results_reading_group | python | {
"resource": ""
} |
q31552 | setup_tmpltbank_workflow | train | def setup_tmpltbank_workflow(workflow, science_segs, datafind_outs,
output_dir=None, psd_files=None, tags=None,
return_format=None):
'''
Setup template bank section of CBC workflow. This function is responsible
for deciding which of the various template bank workflow generation
utilities should be used.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
science_segs : Keyed dictionary of glue.segmentlist objects
scienceSegs[ifo] holds the science segments to be analysed for each
ifo.
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
output_dir : path string
The directory where data products will be placed.
psd_files : pycbc.workflow.core.FileList
The file list containing predefined PSDs, if provided.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
tmplt_banks : pycbc.workflow.core.FileList
The FileList holding the details of all the template bank jobs.
'''
if tags is None:
tags = []
logging.info("Entering template bank generation module.")
make_analysis_dir(output_dir)
cp = workflow.cp
# Parse for options in ini file
tmpltbankMethod = cp.get_opt_tags("workflow-tmpltbank", "tmpltbank-method",
tags)
# There can be a large number of different options here, for e.g. to set
# up fixed bank, or maybe something else
if tmpltbankMethod == "PREGENERATED_BANK":
logging.info("Setting template bank from pre-generated bank(s).")
tmplt_banks = setup_tmpltbank_pregenerated(workflow, tags=tags)
# Else we assume template banks will be generated in the workflow
elif tmpltbankMethod == "WORKFLOW_INDEPENDENT_IFOS":
logging.info("Adding template bank jobs to workflow.")
if cp.has_option_tags("workflow-tmpltbank",
"tmpltbank-link-to-matchedfilter", tags):
if not cp.has_option_tags("workflow-matchedfilter",
"matchedfilter-link-to-tmpltbank", tags):
errMsg = "If using tmpltbank-link-to-matchedfilter, you should "
errMsg = "also use matchedfilter-link-to-tmpltbank."
logging.warn(errMsg)
linkToMatchedfltr = True
else:
linkToMatchedfltr = False
if cp.has_option_tags("workflow-tmpltbank",
"tmpltbank-compatibility-mode", tags):
if not linkToMatchedfltr:
errMsg = "Compatibility mode requires that the "
errMsg += "tmpltbank-link-to-matchedfilter option is also set."
raise ValueError(errMsg)
if not cp.has_option_tags("workflow-matchedfilter",
"matchedfilter-compatibility-mode", tags):
errMsg = "If using compatibility mode it must be set both in "
errMsg += "the template bank and matched-filtering stages."
raise ValueError(errMsg)
compatibility_mode = True
else:
compatibility_mode = False
tmplt_banks = setup_tmpltbank_dax_generated(workflow, science_segs,
datafind_outs, output_dir, tags=tags,
link_to_matchedfltr=linkToMatchedfltr,
compatibility_mode=compatibility_mode,
psd_files=psd_files)
elif tmpltbankMethod == "WORKFLOW_INDEPENDENT_IFOS_NODATA":
logging.info("Adding template bank jobs to workflow.")
tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir,
tags=tags, independent_ifos=True,
psd_files=psd_files)
elif tmpltbankMethod == "WORKFLOW_NO_IFO_VARIATION_NODATA":
logging.info("Adding template bank jobs to workflow.")
tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir,
tags=tags, independent_ifos=False,
psd_files=psd_files)
else:
errMsg = "Template bank method not recognized. Must be either "
errMsg += "PREGENERATED_BANK, WORKFLOW_INDEPENDENT_IFOS "
errMsg += "or WORKFLOW_INDEPENDENT_IFOS_NODATA."
raise ValueError(errMsg)
# Check the format of the input template bank file and return it in
# the format requested as per return_format, provided a conversion
# between the two specific formats has been implemented. Currently,
# a conversion from xml.gz or xml to hdf is supported, but not vice
# versa. If a return_format is not specified the function returns
# the bank in the format as it was inputted.
tmplt_bank_filename=tmplt_banks[0].name
ext = tmplt_bank_filename.split('.', 1)[1]
logging.info("Input bank is a %s file", ext)
if return_format is None :
tmplt_banks_return = tmplt_banks
elif return_format in ('hdf', 'h5', 'hdf5'):
if ext in ('hdf', 'h5', 'hdf5') or ext in ('xml.gz' , 'xml'):
tmplt_banks_return = pycbc.workflow.convert_bank_to_hdf(workflow,
tmplt_banks, "bank")
else :
if ext == return_format:
tmplt_banks_return = tmplt_banks
else:
raise NotImplementedError("{0} to {1} conversion is not "
"supported.".format(ext, return_format))
logging.info("Leaving template bank generation module.")
return tmplt_banks_return | python | {
"resource": ""
} |
q31553 | get_version_info | train | def get_version_info():
"""Get VCS info and write version info to version.py
"""
from pycbc import _version_helper
class vdummy(object):
def __getattr__(self, attr):
return ''
# If this is a pycbc git repo always populate version information using GIT
try:
vinfo = _version_helper.generate_git_version_info()
except:
vinfo = vdummy()
vinfo.version = '1.13.dev7'
vinfo.release = 'False'
with open('pycbc/version.py', 'w') as f:
f.write("# coding: utf-8\n")
f.write("# Generated by setup.py for PyCBC on %s.\n\n"
% vinfo.build_date)
# print general info
f.write('version = \'%s\'\n' % vinfo.version)
f.write('date = \'%s\'\n' % vinfo.date)
f.write('release = %s\n' % vinfo.release)
f.write('last_release = \'%s\'\n' % vinfo.last_release)
# print git info
f.write('\ngit_hash = \'%s\'\n' % vinfo.hash)
f.write('git_branch = \'%s\'\n' % vinfo.branch)
f.write('git_tag = \'%s\'\n' % vinfo.tag)
f.write('git_author = \'%s\'\n' % vinfo.author)
f.write('git_committer = \'%s\'\n' % vinfo.committer)
f.write('git_status = \'%s\'\n' % vinfo.status)
f.write('git_builder = \'%s\'\n' % vinfo.builder)
f.write('git_build_date = \'%s\'\n' % vinfo.build_date)
f.write('git_verbose_msg = """Version: %s\n'
'Branch: %s\n'
'Tag: %s\n'
'Id: %s\n'
'Builder: %s\n'
'Build date: %s\n'
'Repository status is %s"""\n' %(
vinfo.version,
vinfo.branch,
vinfo.tag,
vinfo.hash,
vinfo.builder,
vinfo.build_date,
vinfo.status))
f.write('from pycbc._version import *\n')
version = vinfo.version
from pycbc import version
version = version.version
return version | python | {
"resource": ""
} |
q31554 | table | train | def table(columns, names, page_size=None, format_strings=None):
""" Return an html table of this data
Parameters
----------
columns : list of numpy arrays
names : list of strings
The list of columns names
page_size : {int, None}, optional
The number of items to show on each page of the table
format_strings : {lists of strings, None}, optional
The ICU format string for this column, None for no formatting. All
columns must have a format string if provided.
Returns
-------
html_table : str
A str containing the html code to display a table of this data
"""
if page_size is None:
page = 'disable'
else:
page = 'enable'
div_id = uuid.uuid4()
column_descriptions = []
for column, name in zip(columns, names):
if column.dtype.kind == 'S':
ctype = 'string'
else:
ctype = 'number'
column_descriptions.append((ctype, name))
data = []
for item in zip(*columns):
data.append(list(item))
return google_table_template.render(div_id=div_id,
page_enable=page,
column_descriptions = column_descriptions,
page_size=page_size,
data=data,
format_strings=format_strings,
) | python | {
"resource": ""
} |
q31555 | load_timeseries | train | def load_timeseries(path, group=None):
"""
Load a TimeSeries from a .hdf, .txt or .npy file. The
default data types will be double precision floating point.
Parameters
----------
path: string
source file path. Must end with either .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
f = h5py.File(path)
data = f[key][:]
series = TimeSeries(data, delta_t=f[key].attrs['delta_t'],
epoch=f[key].attrs['start_time'])
f.close()
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
if data.ndim == 2:
delta_t = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return TimeSeries(data[:,1], delta_t=delta_t, epoch=epoch)
elif data.ndim == 3:
delta_t = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return TimeSeries(data[:,1] + 1j*data[:,2],
delta_t=delta_t, epoch=epoch)
else:
raise ValueError('File has %s dimensions, cannot convert to Array, \
must be 2 (real) or 3 (complex)' % data.ndim) | python | {
"resource": ""
} |
q31556 | TimeSeries.prepend_zeros | train | def prepend_zeros(self, num):
"""Prepend num zeros onto the beginning of this TimeSeries. Update also
epoch to include this prepending.
"""
self.resize(len(self) + num)
self.roll(num)
self._epoch = self._epoch - num * self._delta_t | python | {
"resource": ""
} |
q31557 | TimeSeries.time_slice | train | def time_slice(self, start, end):
"""Return the slice of the time series that contains the time range
in GPS seconds.
"""
if start < self.start_time:
raise ValueError('Time series does not contain a time as early as %s' % start)
if end > self.end_time:
raise ValueError('Time series does not contain a time as late as %s' % end)
start_idx = int((start - self.start_time) * self.sample_rate)
end_idx = int((end - self.start_time) * self.sample_rate)
return self[start_idx:end_idx] | python | {
"resource": ""
} |
q31558 | TimeSeries.get_sample_times | train | def get_sample_times(self):
"""Return an Array containing the sample times.
"""
if self._epoch is None:
return Array(range(len(self))) * self._delta_t
else:
return Array(range(len(self))) * self._delta_t + float(self._epoch) | python | {
"resource": ""
} |
q31559 | TimeSeries.at_time | train | def at_time(self, time, nearest_sample=False):
""" Return the value at the specified gps time
"""
if nearest_sample:
time += self.delta_t / 2.0
return self[int((time-self.start_time)*self.sample_rate)] | python | {
"resource": ""
} |
q31560 | TimeSeries.almost_equal_elem | train | def almost_equal_elem(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the series.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the series.
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_elem(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False | python | {
"resource": ""
} |
q31561 | TimeSeries.lal | train | def lal(self):
"""Produces a LAL time series object equivalent to self.
Returns
-------
lal_data : {lal.*TimeSeries}
LAL time series object containing the same data as self.
The actual type depends on the sample's dtype. If the epoch of
self is 'None', the epoch of the returned LAL object will be
LIGOTimeGPS(0,0); otherwise, the same as that of self.
Raises
------
TypeError
If time series is stored in GPU memory.
"""
lal_data = None
if self._epoch is None:
ep = _lal.LIGOTimeGPS(0,0)
else:
ep = self._epoch
if self._data.dtype == _numpy.float32:
lal_data = _lal.CreateREAL4TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.float64:
lal_data = _lal.CreateREAL8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex64:
lal_data = _lal.CreateCOMPLEX8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex128:
lal_data = _lal.CreateCOMPLEX16TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
lal_data.data.data[:] = self.numpy()
return lal_data | python | {
"resource": ""
} |
q31562 | TimeSeries.crop | train | def crop(self, left, right):
""" Remove given seconds from either end of time series
Parameters
----------
left : float
Number of seconds of data to remove from the left of the time series.
right : float
Number of seconds of data to remove from the right of the time series.
Returns
-------
cropped : pycbc.types.TimeSeries
The reduced time series
"""
if left + right > self.duration:
raise ValueError('Cannot crop more data than we have')
s = int(left * self.sample_rate)
e = len(self) - int(right * self.sample_rate)
return self[s:e] | python | {
"resource": ""
} |
q31563 | TimeSeries.save_to_wav | train | def save_to_wav(self, file_name):
""" Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
"""
scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767)
write_wav(file_name, self.sample_rate, scaled) | python | {
"resource": ""
} |
q31564 | TimeSeries.psd | train | def psd(self, segment_duration, **kwds):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
For more complete options, please see that function.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import welch
seg_len = int(segment_duration * self.sample_rate)
seg_stride = int(seg_len / 2)
return welch(self, seg_len=seg_len,
seg_stride=seg_stride,
**kwds) | python | {
"resource": ""
} |
q31565 | TimeSeries.whiten | train | def whiten(self, segment_duration, max_filter_duration, trunc_method='hann',
remove_corrupted=True, low_frequency_cutoff=None,
return_psd=False, **kwds):
""" Return a whitened time series
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
max_filter_duration : int
Maximum length of the time-domain filter in seconds.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the whitening
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
low_frequency_cutoff : {None, float}
Low frequency cutoff to pass to the inverse spectrum truncation.
This should be matched to a known low frequency cutoff of the
data if there is one.
return_psd : {False, Boolean}
Return the estimated and conditioned PSD that was used to whiten
the data.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
whitened_data : TimeSeries
The whitened time series
"""
from pycbc.psd import inverse_spectrum_truncation, interpolate
# Estimate the noise spectrum
psd = self.psd(segment_duration, **kwds)
psd = interpolate(psd, self.delta_f)
max_filter_len = int(max_filter_duration * self.sample_rate)
# Interpolate and smooth to the desired corruption length
psd = inverse_spectrum_truncation(psd,
max_filter_len=max_filter_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method=trunc_method)
# Whiten the data by the asd
white = (self.to_frequencyseries() / psd**0.5).to_timeseries()
if remove_corrupted:
white = white[int(max_filter_len/2):int(len(self)-max_filter_len/2)]
if return_psd:
return white, psd
return white | python | {
"resource": ""
} |
q31566 | TimeSeries.qtransform | train | def qtransform(self, delta_t=None, delta_f=None, logfsteps=None,
frange=None, qrange=(4,64), mismatch=0.2, return_complex=False):
""" Return the interpolated 2d qtransform of this data
Parameters
----------
delta_t : {self.delta_t, float}
The time resolution to interpolate to
delta_f : float, Optional
The frequency resolution to interpolate to
logfsteps : int
Do a log interpolation (incompatible with delta_f option) and set
the number of steps to take.
frange : {(30, nyquist*0.8), tuple of ints}
frequency range
qrange : {(4, 64), tuple}
q range
mismatch : float
Mismatch between frequency tiles
return_complex: {False, bool}
return the raw complex series instead of the normalized power.
Returns
-------
times : numpy.ndarray
The time that the qtransform is sampled.
freqs : numpy.ndarray
The frequencies that the qtransform is sampled.
qplane : numpy.ndarray (2d)
The two dimensional interpolated qtransform of this time series.
"""
from pycbc.filter.qtransform import qtiling, qplane
from scipy.interpolate import interp2d
if frange is None:
frange = (30, int(self.sample_rate / 2 * 8))
q_base = qtiling(self, qrange, frange, mismatch)
_, times, freqs, q_plane = qplane(q_base, self.to_frequencyseries(),
return_complex=return_complex)
if logfsteps and delta_f:
raise ValueError("Provide only one (or none) of delta_f and logfsteps")
# Interpolate if requested
if delta_f or delta_t or logfsteps:
if return_complex:
interp_amp = interp2d(times, freqs, abs(q_plane))
interp_phase = interp2d(times, freqs, _numpy.angle(q_plane))
else:
interp = interp2d(times, freqs, q_plane)
if delta_t:
times = _numpy.arange(float(self.start_time),
float(self.end_time), delta_t)
if delta_f:
freqs = _numpy.arange(int(frange[0]), int(frange[1]), delta_f)
if logfsteps:
freqs = _numpy.logspace(_numpy.log10(frange[0]),
_numpy.log10(frange[1]),
logfsteps)
if delta_f or delta_t or logfsteps:
if return_complex:
q_plane = _numpy.exp(1.0j * interp_phase(times, freqs))
q_plane *= interp_amp(times, freqs)
else:
q_plane = interp(times, freqs)
return times, freqs, q_plane | python | {
"resource": ""
} |
q31567 | TimeSeries.save | train | def save(self, path, group = None):
"""
Save time series to a Numpy .npy, hdf, or text file. The first column
contains the sample times, the second contains the values.
In the case of a complex time series saved as text, the imaginary
part is written as a third column. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
output = _numpy.vstack((self.sample_times.numpy(), self.numpy())).T
_numpy.save(path, output)
elif ext == '.txt':
if self.kind == 'real':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy())).T
elif self.kind == 'complex':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext =='.hdf':
key = 'data' if group is None else group
f = h5py.File(path)
ds = f.create_dataset(key, data=self.numpy(), compression='gzip',
compression_opts=9, shuffle=True)
ds.attrs['start_time'] = float(self.start_time)
ds.attrs['delta_t'] = float(self.delta_t)
else:
raise ValueError('Path must end with .npy, .txt or .hdf') | python | {
"resource": ""
} |
q31568 | TimeSeries.to_frequencyseries | train | def to_frequencyseries(self, delta_f=None):
""" Return the Fourier transform of this time series
Parameters
----------
delta_f : {None, float}, optional
The frequency resolution of the returned frequency series. By
default the resolution is determined by the duration of the timeseries.
Returns
-------
FrequencySeries:
The fourier transform of this time series.
"""
from pycbc.fft import fft
if not delta_f:
delta_f = 1.0 / self.duration
# add 0.5 to round integer
tlen = int(1.0 / delta_f / self.delta_t + 0.5)
flen = int(tlen / 2 + 1)
if tlen < len(self):
raise ValueError("The value of delta_f (%s) would be "
"undersampled. Maximum delta_f "
"is %s." % (delta_f, 1.0 / self.duration))
if not delta_f:
tmp = self
else:
tmp = TimeSeries(zeros(tlen, dtype=self.dtype),
delta_t=self.delta_t, epoch=self.start_time)
tmp[:len(self)] = self[:]
f = FrequencySeries(zeros(flen,
dtype=complex_same_precision_as(self)),
delta_f=delta_f)
fft(tmp, f)
return f | python | {
"resource": ""
} |
q31569 | TimeSeries.add_into | train | def add_into(self, other):
"""Return the sum of the two time series accounting for the time stamp.
The other vector will be resized and time shifted wiht sub-sample
precision before adding. This assumes that one can assume zeros
outside of the original vector range.
"""
# only handle equal sample rate for now.
if self.sample_rate != other.sample_rate:
raise ValueError('Sample rate must be the same')
# Other is disjoint
if ((other.start_time > self.end_time) or
(self.start_time > other.end_time)):
return self.copy()
other = other.copy()
dt = float((other.start_time - self.start_time) * self.sample_rate)
if not dt.is_integer():
diff = (dt - _numpy.floor(dt))
other.resize(len(other) + (len(other) + 1) % 2 + 1)
other = other.cyclic_time_shift(diff)
ts = self.copy()
start = max(other.start_time, self.start_time)
end = min(other.end_time, self.end_time)
part = ts.time_slice(start, end)
part += other.time_slice(start, end)
return ts | python | {
"resource": ""
} |
q31570 | TimeSeries.detrend | train | def detrend(self, type='linear'):
""" Remove linear trend from the data
Remove a linear trend from the data to improve the approximation that
the data is circularly convolved, this helps reduce the size of filter
transients from a circular convolution / filter.
Parameters
----------
type: str
The choice of detrending. The default ('linear') removes a linear
least squares fit. 'constant' removes only the mean of the data.
"""
from scipy.signal import detrend
return self._return(detrend(self.numpy(), type=type)) | python | {
"resource": ""
} |
q31571 | fRD | train | def fRD( a, M):
"""Calculate the ring-down frequency for the final Kerr BH. Using Eq. 5.5 of Main paper"""
f = (lal.C_SI**3.0 / (2.0*lal.PI*lal.G_SI*M*lal.MSUN_SI)) * (1.5251 - 1.1568*(1.0-a)**0.1292)
return f | python | {
"resource": ""
} |
q31572 | sigma_cached | train | def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm(
self.approximant, psd, len(psd), psd.delta_f, self.f_lower)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
self._sigmasq[key] = self.sigma_scale * \
psd.sigmasq_vec[self.approximant][self.end_idx-1]
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd[self.sslice]
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt)
return self._sigmasq[key] | python | {
"resource": ""
} |
q31573 | boolargs_from_apprxstr | train | def boolargs_from_apprxstr(approximant_strs):
"""Parses a list of strings specifying an approximant and where that
approximant should be used into a list that can be understood by
FieldArray.parse_boolargs.
Parameters
----------
apprxstr : (list of) string(s)
The strings to parse. Each string should be formatted `APPRX:COND`,
where `APPRX` is the approximant and `COND` is a string specifying
where it should be applied (see `FieldArgs.parse_boolargs` for examples
of conditional strings). The last string in the list may exclude a
conditional argument, which is the same as specifying ':else'.
Returns
-------
boolargs : list
A list of tuples giving the approximant and where to apply them. This
can be passed directly to `FieldArray.parse_boolargs`.
"""
if not isinstance(approximant_strs, list):
approximant_strs = [approximant_strs]
return [tuple(arg.split(':')) for arg in approximant_strs] | python | {
"resource": ""
} |
q31574 | add_approximant_arg | train | def add_approximant_arg(parser, default=None, help=None):
"""Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant.
"""
if help is None:
help=str("The approximant(s) to use. Multiple approximants to use "
"in different regions may be provided. If multiple "
"approximants are provided, every one but the last must be "
"be followed by a conditional statement defining where that "
"approximant should be used. Conditionals can be any boolean "
"test understood by numpy. For example, 'Apprx:(mtotal > 4) & "
"(mchirp <= 5)' would use approximant 'Apprx' where total mass "
"is > 4 and chirp mass is <= 5. "
"Conditionals are applied in order, with each successive one "
"only applied to regions not covered by previous arguments. "
"For example, `'TaylorF2:mtotal < 4' 'IMRPhenomD:mchirp < 3'` "
"would result in IMRPhenomD being used where chirp mass is < 3 "
"and total mass is >= 4. The last approximant given may use "
"'else' as the conditional or include no conditional. In either "
"case, this will cause the last approximant to be used in any "
"remaning regions after all the previous conditionals have been "
"applied. For the full list of possible parameters to apply "
"conditionals to, see WaveformArray.default_fields(). Math "
"operations may also be used on parameters; syntax is python, "
"with any operation recognized by numpy.")
parser.add_argument("--approximant", nargs='+', type=str, default=default,
metavar='APPRX[:COND]',
help=help) | python | {
"resource": ""
} |
q31575 | find_variable_start_frequency | train | def find_variable_start_frequency(approximant, parameters, f_start, max_length,
delta_f = 1):
""" Find a frequency value above the starting frequency that results in a
waveform shorter than max_length.
"""
l = max_length + 1
f = f_start - delta_f
while l > max_length:
f += delta_f
l = pycbc.waveform.get_waveform_filter_length_in_time(approximant,
parameters, f_lower=f)
return f | python | {
"resource": ""
} |
q31576 | TemplateBank.ensure_hash | train | def ensure_hash(self):
"""Ensure that there is a correctly populated template_hash.
Check for a correctly populated template_hash and create if it doesn't
already exist.
"""
fields = self.table.fieldnames
if 'template_hash' in fields:
return
# The fields to use in making a template hash
hash_fields = ['mass1', 'mass2', 'inclination',
'spin1x', 'spin1y', 'spin1z',
'spin2x', 'spin2y', 'spin2z',]
fields = [f for f in hash_fields if f in fields]
template_hash = np.array([hash(v) for v in zip(*[self.table[p]
for p in fields])])
self.table = self.table.add_fields(template_hash, 'template_hash') | python | {
"resource": ""
} |
q31577 | TemplateBank.write_to_hdf | train | def write_to_hdf(self, filename, start_index=None, stop_index=None,
force=False, skip_fields=None,
write_compressed_waveforms=True):
"""Writes self to the given hdf file.
Parameters
----------
filename : str
The name of the file to write to. Must end in '.hdf'.
start_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
first template in the slice
stop_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
last template in the slice
force : {False, bool}
If the file already exists, it will be overwritten if True.
Otherwise, an OSError is raised if the file exists.
skip_fields : {None, (list of) strings}
Do not write the given fields to the hdf file. Default is None,
in which case all fields in self.table.fieldnames are written.
write_compressed_waveforms : {True, bool}
Write compressed waveforms to the output (hdf) file if this is
True, which is the default setting. If False, do not write the
compressed waveforms group, but only the template parameters to
the output file.
Returns
-------
h5py.File
The file handler to the output hdf file (left open).
"""
if not filename.endswith('.hdf'):
raise ValueError("Unrecoginized file extension")
if os.path.exists(filename) and not force:
raise IOError("File %s already exists" %(filename))
f = h5py.File(filename, 'w')
parameters = self.parameters
if skip_fields is not None:
if not isinstance(skip_fields, list):
skip_fields = [skip_fields]
parameters = [p for p in parameters if p not in skip_fields]
# save the parameters
f.attrs['parameters'] = parameters
write_tbl = self.table[start_index:stop_index]
for p in parameters:
f[p] = write_tbl[p]
if write_compressed_waveforms and self.has_compressed_waveforms:
for tmplt_hash in write_tbl.template_hash:
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
compressed_waveform.write_to_hdf(f, tmplt_hash)
return f | python | {
"resource": ""
} |
q31578 | TemplateBank.end_frequency | train | def end_frequency(self, index):
""" Return the end frequency of the waveform at the given index value
"""
from pycbc.waveform.waveform import props
return pycbc.waveform.get_waveform_end_frequency(
self.table[index],
approximant=self.approximant(index),
**self.extra_args) | python | {
"resource": ""
} |
q31579 | TemplateBank.approximant | train | def approximant(self, index):
""" Return the name of the approximant ot use at the given index
"""
if 'approximant' not in self.table.fieldnames:
raise ValueError("approximant not found in input file and no "
"approximant was specified on initialization")
return self.table["approximant"][index] | python | {
"resource": ""
} |
q31580 | TemplateBank.template_thinning | train | def template_thinning(self, inj_filter_rejector):
"""Remove templates from bank that are far from all injections."""
if not inj_filter_rejector.enabled or \
inj_filter_rejector.chirp_time_window is None:
# Do nothing!
return
injection_parameters = inj_filter_rejector.injection_params.table
fref = inj_filter_rejector.f_lower
threshold = inj_filter_rejector.chirp_time_window
m1= self.table['mass1']
m2= self.table['mass2']
tau0_temp, _ = pycbc.pnutils.mass1_mass2_to_tau0_tau3(m1, m2, fref)
indices = []
for inj in injection_parameters:
tau0_inj, _ = \
pycbc.pnutils.mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
fref)
inj_indices = np.where(abs(tau0_temp - tau0_inj) <= threshold)[0]
indices.append(inj_indices)
indices_combined = np.concatenate(indices)
indices_unique= np.unique(indices_combined)
self.table = self.table[indices_unique] | python | {
"resource": ""
} |
q31581 | TemplateBank.ensure_standard_filter_columns | train | def ensure_standard_filter_columns(self, low_frequency_cutoff=None):
""" Initialize FilterBank common fields
Parameters
----------
low_frequency_cutoff: {float, None}, Optional
A low frequency cutoff which overrides any given within the
template bank file.
"""
# Make sure we have a template duration field
if not hasattr(self.table, 'template_duration'):
self.table = self.table.add_fields(np.zeros(len(self.table),
dtype=np.float32), 'template_duration')
# Make sure we have a f_lower field
if low_frequency_cutoff is not None:
if not hasattr(self.table, 'f_lower'):
vec = np.zeros(len(self.table), dtype=np.float32)
self.table = self.table.add_fields(vec, 'f_lower')
self.table['f_lower'][:] = low_frequency_cutoff
self.min_f_lower = min(self.table['f_lower'])
if self.f_lower is None and self.min_f_lower == 0.:
raise ValueError('Invalid low-frequency cutoff settings') | python | {
"resource": ""
} |
q31582 | LiveFilterBank.round_up | train | def round_up(self, num):
"""Determine the length to use for this waveform by rounding.
Parameters
----------
num : int
Proposed size of waveform in seconds
Returns
-------
size: int
The rounded size to use for the waveform buffer in seconds. This
is calculaed using an internal `increment` attribute, which determines
the discreteness of the rounding.
"""
inc = self.increment
size = np.ceil(num / self.sample_rate / inc) * self.sample_rate * inc
return size | python | {
"resource": ""
} |
q31583 | FilterBank.get_decompressed_waveform | train | def get_decompressed_waveform(self, tempout, index, f_lower=None,
approximant=None, df=None):
"""Returns a frequency domain decompressed waveform for the template
in the bank corresponding to the index taken in as an argument. The
decompressed waveform is obtained by interpolating in frequency space,
the amplitude and phase points for the compressed template that are
read in from the bank."""
from pycbc.waveform.waveform import props
from pycbc.waveform import get_waveform_filter_length_in_time
# Get the template hash corresponding to the template index taken in as argument
tmplt_hash = self.table.template_hash[index]
# Read the compressed waveform from the bank file
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
# Get the interpolation method to be used to decompress the waveform
if self.waveform_decompression_method is not None :
decompression_method = self.waveform_decompression_method
else :
decompression_method = compressed_waveform.interpolation
logging.info("Decompressing waveform using %s", decompression_method)
if df is not None :
delta_f = df
else :
delta_f = self.delta_f
# Create memory space for writing the decompressed waveform
decomp_scratch = FrequencySeries(tempout[0:self.filter_length], delta_f=delta_f, copy=False)
# Get the decompressed waveform
hdecomp = compressed_waveform.decompress(out=decomp_scratch, f_lower=f_lower, interpolation=decompression_method)
p = props(self.table[index])
p.pop('approximant')
try:
tmpltdur = self.table[index].template_duration
except AttributeError:
tmpltdur = None
if tmpltdur is None or tmpltdur==0.0 :
tmpltdur = get_waveform_filter_length_in_time(approximant, **p)
hdecomp.chirp_length = tmpltdur
hdecomp.length_in_time = hdecomp.chirp_length
return hdecomp | python | {
"resource": ""
} |
q31584 | FilterBank.generate_with_delta_f_and_max_freq | train | def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f,
low_frequency_cutoff=None,
cached_mem=None):
"""Generate the template with index t_num using custom length."""
approximant = self.approximant(t_num)
# Don't want to use INTERP waveforms in here
if approximant.endswith('_INTERP'):
approximant = approximant.replace('_INTERP', '')
# Using SPAtmplt here is bad as the stored cbrt and logv get
# recalculated as we change delta_f values. Fall back to TaylorF2
# in lalsimulation.
if approximant == 'SPAtmplt':
approximant = 'TaylorF2'
if cached_mem is None:
wav_len = int(max_freq / delta_f) + 1
cached_mem = zeros(wav_len, dtype=np.complex64)
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(cached_mem, t_num,
f_lower=low_frequency_cutoff,
approximant=approximant,
df=delta_f)
else :
htilde = pycbc.waveform.get_waveform_filter(
cached_mem, self.table[t_num], approximant=approximant,
f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f,
distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq))
return htilde | python | {
"resource": ""
} |
q31585 | select_waveform_generator | train | def select_waveform_generator(approximant):
"""Returns the single-IFO generator for the approximant.
Parameters
----------
approximant : str
Name of waveform approximant. Valid names can be found using
``pycbc.waveform`` methods.
Returns
-------
generator : (PyCBC generator instance)
A waveform generator object.
Examples
--------
Get a list of available approximants:
>>> from pycbc import waveform
>>> waveform.fd_approximants()
>>> waveform.td_approximants()
>>> from pycbc.waveform import ringdown
>>> ringdown.ringdown_fd_approximants.keys()
Get generator object:
>>> from pycbc.waveform.generator import select_waveform_generator
>>> select_waveform_generator(waveform.fd_approximants()[0])
"""
# check if frequency-domain CBC waveform
if approximant in waveform.fd_approximants():
return FDomainCBCGenerator
# check if time-domain CBC waveform
elif approximant in waveform.td_approximants():
return TDomainCBCGenerator
# check if frequency-domain ringdown waveform
elif approximant in ringdown.ringdown_fd_approximants:
if approximant == 'FdQNMfromFinalMassSpin':
return FDomainMassSpinRingdownGenerator
elif approximant == 'FdQNMfromFreqTau':
return FDomainFreqTauRingdownGenerator
elif approximant in ringdown.ringdown_td_approximants:
if approximant == 'TdQNMfromFinalMassSpin':
return TDomainMassSpinRingdownGenerator
elif approximant == 'TdQNMfromFreqTau':
return TDomainFreqTauRingdownGenerator
# otherwise waveform approximant is not supported
else:
raise ValueError("%s is not a valid approximant." % approximant) | python | {
"resource": ""
} |
q31586 | BaseGenerator.generate_from_args | train | def generate_from_args(self, *args):
"""Generates a waveform. The list of arguments must be in the same
order as self's variable_args attribute.
"""
if len(args) != len(self.variable_args):
raise ValueError("variable argument length mismatch")
return self.generate(**dict(zip(self.variable_args, args))) | python | {
"resource": ""
} |
q31587 | TDomainCBCGenerator._postgenerate | train | def _postgenerate(self, res):
"""Applies a taper if it is in current params.
"""
hp, hc = res
try:
hp = taper_timeseries(hp, tapermethod=self.current_params['taper'])
hc = taper_timeseries(hc, tapermethod=self.current_params['taper'])
except KeyError:
pass
return hp, hc | python | {
"resource": ""
} |
q31588 | FDomainDetFrameGenerator.generate_from_args | train | def generate_from_args(self, *args):
"""Generates a waveform, applies a time shift and the detector response
function from the given args.
The args are assumed to be in the same order as the variable args.
"""
return self.generate(**dict(zip(self.variable_args, args))) | python | {
"resource": ""
} |
q31589 | FDomainDetFrameGenerator.generate | train | def generate(self, **kwargs):
"""Generates a waveform, applies a time shift and the detector response
function from the given kwargs.
"""
self.current_params.update(kwargs)
rfparams = {param: self.current_params[param]
for param in kwargs if param not in self.location_args}
hp, hc = self.rframe_generator.generate(**rfparams)
if isinstance(hp, TimeSeries):
df = self.current_params['delta_f']
hp = hp.to_frequencyseries(delta_f=df)
hc = hc.to_frequencyseries(delta_f=df)
# time-domain waveforms will not be shifted so that the peak amp
# happens at the end of the time series (as they are for f-domain),
# so we add an additional shift to account for it
tshift = 1./df - abs(hp._epoch)
else:
tshift = 0.
hp._epoch = hc._epoch = self._epoch
h = {}
if self.detector_names != ['RF']:
for detname, det in self.detectors.items():
# apply detector response function
fp, fc = det.antenna_pattern(self.current_params['ra'],
self.current_params['dec'],
self.current_params['polarization'],
self.current_params['tc'])
thish = fp*hp + fc*hc
# apply the time shift
tc = self.current_params['tc'] + \
det.time_delay_from_earth_center(self.current_params['ra'],
self.current_params['dec'], self.current_params['tc'])
h[detname] = apply_fd_time_shift(thish, tc+tshift, copy=False)
if self.recalib:
# recalibrate with given calibration model
h[detname] = \
self.recalib[detname].map_to_adjust(h[detname],
**self.current_params)
else:
# no detector response, just use the + polarization
if 'tc' in self.current_params:
hp = apply_fd_time_shift(hp, self.current_params['tc']+tshift,
copy=False)
h['RF'] = hp
if self.gates is not None:
# resize all to nearest power of 2
for d in h.values():
d.resize(ceilpow2(len(d)-1) + 1)
h = strain.apply_gates_to_fd(h, self.gates)
return h | python | {
"resource": ""
} |
q31590 | positive_float | train | def positive_float(s):
"""
Ensure argument is a positive real number and return it as float.
To be used as type in argparse arguments.
"""
err_msg = "must be a positive number, not %r" % s
try:
value = float(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value <= 0:
raise argparse.ArgumentTypeError(err_msg)
return value | python | {
"resource": ""
} |
q31591 | nonnegative_float | train | def nonnegative_float(s):
"""
Ensure argument is a positive real number or zero and return it as float.
To be used as type in argparse arguments.
"""
err_msg = "must be either positive or zero, not %r" % s
try:
value = float(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value < 0:
raise argparse.ArgumentTypeError(err_msg)
return value | python | {
"resource": ""
} |
q31592 | from_cli | train | def from_cli(opt, length, delta_f, low_frequency_cutoff,
strain=None, dyn_range_factor=1, precision=None):
"""Parses the CLI options related to the noise PSD and returns a
FrequencySeries with the corresponding PSD. If necessary, the PSD is
linearly interpolated to achieve the resolution specified in the CLI.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length,
psd_output).
length : int
The length in samples of the output PSD.
delta_f : float
The frequency step of the output PSD.
low_frequency_cutoff: float
The low frequncy cutoff to use when calculating the PSD.
strain : {None, TimeSeries}
Time series containing the data from which the PSD should be measured,
when psd_estimation is in use.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
-------
psd : FrequencySeries
The frequency series containing the PSD.
"""
f_low = low_frequency_cutoff
sample_rate = int((length -1) * 2 * delta_f)
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
exclusive_opts = [opt.psd_model, opt.psd_file, opt.asd_file,
psd_estimation]
if sum(map(bool, exclusive_opts)) != 1:
err_msg = "You must specify exactly one of '--psd-file', "
err_msg += "'--psd-model', '--asd-file', '--psd-estimation'"
raise ValueError(err_msg)
if (opt.psd_model or opt.psd_file or opt.asd_file):
# PSD from lalsimulation or file
if opt.psd_model:
psd = from_string(opt.psd_model, length, delta_f, f_low)
elif opt.psd_file or opt.asd_file:
if opt.asd_file:
psd_file_name = opt.asd_file
else:
psd_file_name = opt.psd_file
if psd_file_name.endswith(('.dat', '.txt')):
is_asd_file = bool(opt.asd_file)
psd = from_txt(psd_file_name, length,
delta_f, f_low, is_asd_file=is_asd_file)
elif opt.asd_file:
err_msg = "ASD files are only valid as ASCII files (.dat or "
err_msg += ".txt). Supplied {}.".format(psd_file_name)
elif psd_file_name.endswith(('.xml', '.xml.gz')):
psd = from_xml(psd_file_name, length, delta_f, f_low,
ifo_string=opt.psd_file_xml_ifo_string,
root_name=opt.psd_file_xml_root_name)
# Set values < flow to the value at flow
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[0:kmin] = psd[kmin]
psd *= dyn_range_factor ** 2
elif psd_estimation:
# estimate PSD from data
psd = welch(strain, avg_method=opt.psd_estimation,
seg_len=int(opt.psd_segment_length * sample_rate),
seg_stride=int(opt.psd_segment_stride * sample_rate),
num_segments=opt.psd_num_segments,
require_exact_data_fit=False)
if delta_f != psd.delta_f:
psd = interpolate(psd, delta_f)
else:
# Shouldn't be possible to get here
raise ValueError("Shouldn't be possible to raise this!")
if opt.psd_inverse_length:
psd = inverse_spectrum_truncation(psd,
int(opt.psd_inverse_length * sample_rate),
low_frequency_cutoff=f_low)
if hasattr(opt, 'psd_output') and opt.psd_output:
(psd.astype(float64) / (dyn_range_factor ** 2)).save(opt.psd_output)
if precision is None:
return psd
elif precision == 'single':
return psd.astype(float32)
elif precision == 'double':
return psd.astype(float64)
else:
err_msg = "If provided the precision kwarg must be either 'single' "
err_msg += "or 'double'. You provided %s." %(precision)
raise ValueError(err_msg) | python | {
"resource": ""
} |
q31593 | from_cli_single_ifo | train | def from_cli_single_ifo(opt, length, delta_f, low_frequency_cutoff, ifo,
**kwargs):
"""
Get the PSD for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt, length, delta_f, low_frequency_cutoff,
**kwargs) | python | {
"resource": ""
} |
q31594 | from_cli_multi_ifos | train | def from_cli_multi_ifos(opt, length_dict, delta_f_dict,
low_frequency_cutoff_dict, ifos, strain_dict=None,
**kwargs):
"""
Get the PSD for all ifos when using the multi-detector CLI
"""
psd = {}
for ifo in ifos:
if strain_dict is not None:
strain = strain_dict[ifo]
else:
strain = None
psd[ifo] = from_cli_single_ifo(opt, length_dict[ifo], delta_f_dict[ifo],
low_frequency_cutoff_dict[ifo], ifo,
strain=strain, **kwargs)
return psd | python | {
"resource": ""
} |
q31595 | generate_overlapping_psds | train | def generate_overlapping_psds(opt, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs to cover a stretch of data. This
allows one to analyse a long stretch of data with PSD measurements that
change with time.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
--------
psd_and_times : list of (start, end, PSD) tuples
This is a list of tuples containing one entry for each PSD. The first
and second entries (start, end) in each tuple represent the index
range of the gwstrain data that was used to estimate that PSD. The
third entry (psd) contains the PSD estimate between that interval.
"""
if not opt.psd_estimation:
psd = from_cli(opt, flen, delta_f, flow, strain=gwstrain,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times = [ (0, len(gwstrain), psd) ]
return psds_and_times
# Figure out the data length used for PSD generation
seg_stride = int(opt.psd_segment_stride * gwstrain.sample_rate)
seg_len = int(opt.psd_segment_length * gwstrain.sample_rate)
input_data_len = len(gwstrain)
if opt.psd_num_segments is None:
# FIXME: Should we make --psd-num-segments mandatory?
# err_msg = "You must supply --num-segments."
# raise ValueError(err_msg)
num_segments = int(input_data_len // seg_stride) - 1
else:
num_segments = int(opt.psd_num_segments)
psd_data_len = (num_segments - 1) * seg_stride + seg_len
# How many unique PSD measurements is this?
psds_and_times = []
if input_data_len < psd_data_len:
err_msg = "Input data length must be longer than data length needed "
err_msg += "to estimate a PSD. You specified that a PSD should be "
err_msg += "estimated with %d seconds. " %(psd_data_len)
err_msg += "Input data length is %d seconds. " %(input_data_len)
raise ValueError(err_msg)
elif input_data_len == psd_data_len:
num_psd_measurements = 1
psd_stride = 0
else:
num_psd_measurements = int(2 * (input_data_len-1) / psd_data_len)
psd_stride = int((input_data_len - psd_data_len) / num_psd_measurements)
for idx in range(num_psd_measurements):
if idx == (num_psd_measurements - 1):
start_idx = input_data_len - psd_data_len
end_idx = input_data_len
else:
start_idx = psd_stride * idx
end_idx = psd_data_len + psd_stride * idx
strain_part = gwstrain[start_idx:end_idx]
psd = from_cli(opt, flen, delta_f, flow, strain=strain_part,
dyn_range_factor=dyn_range_factor, precision=precision)
psds_and_times.append( (start_idx, end_idx, psd) )
return psds_and_times | python | {
"resource": ""
} |
q31596 | associate_psds_to_segments | train | def associate_psds_to_segments(opt, fd_segments, gwstrain, flen, delta_f, flow,
dyn_range_factor=1., precision=None):
"""Generate a set of overlapping PSDs covering the data in GWstrain.
Then associate these PSDs with the appropriate segment in strain_segments.
Parameters
-----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
fd_segments : StrainSegments.fourier_segments() object
The fourier transforms of the various analysis segments. The psd
attribute of each segment is updated to point to the appropriate PSD.
gwstrain : Strain object
The timeseries of raw data on which to estimate PSDs.
flen : int
The length in samples of the output PSDs.
delta_f : float
The frequency step of the output PSDs.
flow: float
The low frequncy cutoff to use when calculating the PSD.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
"""
psds_and_times = generate_overlapping_psds(opt, gwstrain, flen, delta_f,
flow, dyn_range_factor=dyn_range_factor,
precision=precision)
for fd_segment in fd_segments:
best_psd = None
psd_overlap = 0
inp_seg = segments.segment(fd_segment.seg_slice.start,
fd_segment.seg_slice.stop)
for start_idx, end_idx, psd in psds_and_times:
psd_seg = segments.segment(start_idx, end_idx)
if psd_seg.intersects(inp_seg):
curr_overlap = abs(inp_seg & psd_seg)
if curr_overlap > psd_overlap:
psd_overlap = curr_overlap
best_psd = psd
if best_psd is None:
err_msg = "No PSDs found intersecting segment!"
raise ValueError(err_msg)
fd_segment.psd = best_psd | python | {
"resource": ""
} |
q31597 | associate_psds_to_single_ifo_segments | train | def associate_psds_to_single_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifo,
dyn_range_factor=1., precision=None):
"""
Associate PSDs to segments for a single ifo when using the multi-detector
CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
associate_psds_to_segments(single_det_opt, fd_segments, gwstrain, flen,
delta_f, flow, dyn_range_factor=dyn_range_factor,
precision=precision) | python | {
"resource": ""
} |
q31598 | associate_psds_to_multi_ifo_segments | train | def associate_psds_to_multi_ifo_segments(opt, fd_segments, gwstrain, flen,
delta_f, flow, ifos,
dyn_range_factor=1., precision=None):
"""
Associate PSDs to segments for all ifos when using the multi-detector CLI
"""
for ifo in ifos:
if gwstrain is not None:
strain = gwstrain[ifo]
else:
strain = None
if fd_segments is not None:
segments = fd_segments[ifo]
else:
segments = None
associate_psds_to_single_ifo_segments(opt, segments, strain, flen,
delta_f, flow, ifo, dyn_range_factor=dyn_range_factor,
precision=precision) | python | {
"resource": ""
} |
q31599 | CubicSpline.apply_calibration | train | def apply_calibration(self, strain):
"""Apply calibration model
This applies cubic spline calibration to the strain.
Parameters
----------
strain : FrequencySeries
The strain to be recalibrated.
Return
------
strain_adjusted : FrequencySeries
The recalibrated strain.
"""
amplitude_parameters =\
[self.params['amplitude_{}_{}'.format(self.ifo_name, ii)]
for ii in range(self.n_points)]
amplitude_spline = UnivariateSpline(self.spline_points,
amplitude_parameters)
delta_amplitude = amplitude_spline(strain.sample_frequencies.numpy())
phase_parameters =\
[self.params['phase_{}_{}'.format(self.ifo_name, ii)]
for ii in range(self.n_points)]
phase_spline = UnivariateSpline(self.spline_points, phase_parameters)
delta_phase = phase_spline(strain.sample_frequencies.numpy())
strain_adjusted = strain * (1.0 + delta_amplitude)\
* (2.0 + 1j * delta_phase) / (2.0 - 1j * delta_phase)
return strain_adjusted | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.