_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q31800 | walk | train | def walk(chains, start, end, step):
""" Calculates Gelman-Rubin conervergence statistic along chains of data.
This function will advance along the chains and calculate the
statistic for each step.
Parameters
----------
chains : iterable
An iterable of numpy.array instances that contain the samples
for each chain. Each chain has shape (nparameters, niterations).
start : float
Start index of blocks to calculate all statistics.
end : float
Last index of blocks to calculate statistics.
step : float
Step size to take for next block.
Returns
-------
starts : numpy.array
1-D array of start indexes of calculations.
ends : numpy.array
1-D array of end indexes of caluclations.
stats : numpy.array
Array with convergence statistic. It has
shape (nparameters, ncalculations).
"""
# get number of chains, parameters, and iterations
chains = numpy.array(chains)
_, nparameters, _ = chains.shape
# get end index of blocks
ends = numpy.arange(start, end, step)
stats = numpy.zeros((nparameters, len(ends)))
# get start index of blocks
starts = numpy.array(len(ends) * [start])
# loop over end indexes and calculate statistic
for i, e in enumerate(ends):
tmp = chains[:, :, 0:e]
stats[:, i] = gelman_rubin(tmp)
return starts, ends, stats | python | {
"resource": ""
} |
q31801 | Arbitrary.rvs | train | def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from the kde.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
size = int(size)
arr = numpy.zeros(size, dtype=dtype)
draws = self._kde.resample(size)
draws = {param: draws[ii,:] for ii,param in enumerate(self.params)}
for (param,_) in dtype:
try:
# transform back to param space
tparam = self._tparams[param]
tdraws = {tparam: draws[param]}
draws[param] = self._transforms[tparam].inverse_transform(
tdraws)[param]
except KeyError:
pass
arr[param] = draws[param]
return arr | python | {
"resource": ""
} |
q31802 | FromFile.get_arrays_from_file | train | def get_arrays_from_file(params_file, params=None):
"""Reads the values of one or more parameters from an hdf file and
returns as a dictionary.
Parameters
----------
params_file : str
The hdf file that contains the values of the parameters.
params : {None, list}
If provided, will just retrieve the given parameter names.
Returns
-------
dict
A dictionary of the parameters mapping `param_name -> array`.
"""
try:
f = h5py.File(params_file, 'r')
except:
raise ValueError('File not found.')
if params is not None:
if not isinstance(params, list):
params = [params]
for p in params:
if p not in f.keys():
raise ValueError('Parameter {} is not in {}'
.format(p, params_file))
else:
params = [str(k) for k in f.keys()]
params_values = {p:f[p][:] for p in params}
try:
bandwidth = f.attrs["bandwidth"]
except KeyError:
bandwidth = "scott"
f.close()
return params_values, bandwidth | python | {
"resource": ""
} |
q31803 | block | train | def block(seed):
""" Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
Returns
--------
noise : numpy.ndarray
Array of random numbers
"""
num = SAMPLE_RATE * BLOCK_SIZE
rng = RandomState(seed % 2**32)
variance = SAMPLE_RATE / 2
return rng.normal(size=num, scale=variance**0.5) | python | {
"resource": ""
} |
q31804 | colored_noise | train | def colored_noise(psd, start_time, end_time, seed=0, low_frequency_cutoff=1.0):
""" Create noise from a PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd : pycbc.types.FrequencySeries
PSD to color the noise
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {1.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
psd = psd.copy()
flen = int(SAMPLE_RATE / psd.delta_f) / 2 + 1
oldlen = len(psd)
psd.resize(flen)
# Want to avoid zeroes in PSD.
max_val = psd.max()
for i in xrange(len(psd)):
if i >= (oldlen-1):
psd.data[i] = psd[oldlen - 2]
if psd[i] == 0:
psd.data[i] = max_val
wn_dur = int(end_time - start_time) + 2*FILTER_LENGTH
if psd.delta_f >= 1. / (2.*FILTER_LENGTH):
# If the PSD is short enough, this method is less memory intensive than
# resizing and then calling inverse_spectrum_truncation
psd = pycbc.psd.interpolate(psd, 1.0 / (2.*FILTER_LENGTH))
# inverse_spectrum_truncation truncates the inverted PSD. To truncate
# the non-inverted PSD we give it the inverted PSD to truncate and then
# invert the output.
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
psd = psd.astype(complex_same_precision_as(psd))
# Zero-pad the time-domain PSD to desired length. Zeroes must be added
# in the middle, so some rolling between a resize is used.
psd = psd.to_timeseries()
psd.roll(SAMPLE_RATE * FILTER_LENGTH)
psd.resize(wn_dur * SAMPLE_RATE)
psd.roll(-SAMPLE_RATE * FILTER_LENGTH)
# As time series is still mirrored the complex frequency components are
# 0. But convert to real by using abs as in inverse_spectrum_truncate
psd = psd.to_frequencyseries()
else:
psd = pycbc.psd.interpolate(psd, 1.0 / wn_dur)
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[:kmin].clear()
asd = (psd.real())**0.5
del psd
white_noise = normal(start_time - FILTER_LENGTH, end_time + FILTER_LENGTH,
seed=seed)
white_noise = white_noise.to_frequencyseries()
# Here we color. Do not want to duplicate memory here though so use '*='
white_noise *= asd
del asd
colored = white_noise.to_timeseries()
del white_noise
return colored.time_slice(start_time, end_time) | python | {
"resource": ""
} |
q31805 | EmceePTFile.write_sampler_metadata | train | def write_sampler_metadata(self, sampler):
"""Adds writing betas to MultiTemperedMCMCIO.
"""
super(EmceePTFile, self).write_sampler_metadata(sampler)
self[self.sampler_group].attrs["betas"] = sampler.betas | python | {
"resource": ""
} |
q31806 | EmceePTFile.write_acceptance_fraction | train | def write_acceptance_fraction(self, acceptance_fraction):
"""Write acceptance_fraction data to file.
Results are written to ``[sampler_group]/acceptance_fraction``; the
resulting dataset has shape (ntemps, nwalkers).
Parameters
-----------
acceptance_fraction : numpy.ndarray
Array of acceptance fractions to write. Must have shape
ntemps x nwalkers.
"""
# check
assert acceptance_fraction.shape == (self.ntemps, self.nwalkers), (
"acceptance fraction must have shape ntemps x nwalker")
group = self.sampler_group + '/acceptance_fraction'
try:
self[group][:] = acceptance_fraction
except KeyError:
# dataset doesn't exist yet, create it
self[group] = acceptance_fraction | python | {
"resource": ""
} |
q31807 | findchirp_cluster_over_window | train | def findchirp_cluster_over_window(times, values, window_length):
""" Reduce the events by clustering over a window using
the FindChirp clustering algorithm
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples. Must be positive.
Returns
-------
indices: Array
The reduced list of indices of the SNR values
"""
assert window_length > 0, 'Clustering window length is not positive'
from weave import inline
indices = numpy.zeros(len(times), dtype=int)
tlen = len(times) # pylint:disable=unused-variable
k = numpy.zeros(1, dtype=int)
absvalues = abs(values) # pylint:disable=unused-variable
times = times.astype(int)
code = """
int j = 0;
int curr_ind = 0;
for (int i=0; i < tlen; i++){
if ((times[i] - times[curr_ind]) > window_length){
j += 1;
indices[j] = i;
curr_ind = i;
}
else if (absvalues[i] > absvalues[curr_ind]){
indices[j] = i;
curr_ind = i;
}
}
k[0] = j;
"""
inline(code, ['times', 'absvalues', 'window_length', 'indices', 'tlen', 'k'],
extra_compile_args=[WEAVE_FLAGS])
return indices[0:k[0]+1] | python | {
"resource": ""
} |
q31808 | cluster_reduce | train | def cluster_reduce(idx, snr, window_size):
""" Reduce the events by clustering over a window
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples.
Returns
-------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR values
"""
ind = findchirp_cluster_over_window(idx, snr, window_size)
return idx.take(ind), snr.take(ind) | python | {
"resource": ""
} |
q31809 | EventManager.from_multi_ifo_interface | train | def from_multi_ifo_interface(cls, opt, ifo, column, column_types, **kwds):
"""
To use this for a single ifo from the multi ifo interface requires
some small fixing of the opt structure. This does that. As we edit the
opt structure the process_params table will not be correct.
"""
opt = copy.deepcopy(opt)
opt_dict = vars(opt)
for arg, value in opt_dict.items():
if isinstance(value, dict):
setattr(opt, arg, getattr(opt, arg)[ifo])
return cls(opt, column, column_types, **kwds) | python | {
"resource": ""
} |
q31810 | EventManager.newsnr_threshold | train | def newsnr_threshold(self, threshold):
""" Remove events with newsnr smaller than given threshold
"""
if not self.opt.chisq_bins:
raise RuntimeError('Chi-square test must be enabled in order to '
'use newsnr threshold')
remove = [i for i, e in enumerate(self.events) if
ranking.newsnr(abs(e['snr']), e['chisq'] / e['chisq_dof'])
< threshold]
self.events = numpy.delete(self.events, remove) | python | {
"resource": ""
} |
q31811 | EventManager.save_performance | train | def save_performance(self, ncores, nfilters, ntemplates, run_time,
setup_time):
"""
Calls variables from pycbc_inspiral to be used in a timing calculation
"""
self.run_time = run_time
self.setup_time = setup_time
self.ncores = ncores
self.nfilters = nfilters
self.ntemplates = ntemplates
self.write_performance = True | python | {
"resource": ""
} |
q31812 | EventManager.write_events | train | def write_events(self, outname):
""" Write the found events to a sngl inspiral table
"""
self.make_output_dir(outname)
if '.hdf' in outname:
self.write_to_hdf(outname)
else:
raise ValueError('Cannot write to this format') | python | {
"resource": ""
} |
q31813 | EmceePTSampler.model_stats | train | def model_stats(self):
"""Returns the log likelihood ratio and log prior as a dict of arrays.
The returned array has shape ntemps x nwalkers x niterations.
Unfortunately, because ``emcee_pt`` does not have blob support, this
will only return the loglikelihood and logprior (with the logjacobian
set to zero) regardless of what stats the model can return.
.. warning::
Since the `logjacobian` is not saved by `emcee_pt`, the `logprior`
returned here is the log of the prior pdf in the sampling
coordinate frame rather than the variable params frame. This
differs from the variable params frame by the log of the Jacobian
of the transform from one frame to the other. If no sampling
transforms were used, then the `logprior` is the same.
"""
# likelihood has shape ntemps x nwalkers x niterations
logl = self._sampler.lnlikelihood
# get prior from posterior
logp = self._sampler.lnprobability - logl
logjacobian = numpy.zeros(logp.shape)
return {'loglikelihood': logl, 'logprior': logp,
'logjacobian': logjacobian} | python | {
"resource": ""
} |
q31814 | EmceePTSampler.clear_samples | train | def clear_samples(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# now clear the chain
self._sampler.reset() | python | {
"resource": ""
} |
q31815 | EmceePTSampler.run_mcmc | train | def run_mcmc(self, niterations):
"""Advance the ensemble for a number of samples.
Parameters
----------
niterations : int
Number of samples to get from sampler.
"""
pos = self._pos
if pos is None:
pos = self._p0
res = self._sampler.run_mcmc(pos, niterations)
p, _, _ = res[0], res[1], res[2]
# update the positions
self._pos = p | python | {
"resource": ""
} |
q31816 | EmceePTSampler.calculate_logevidence | train | def calculate_logevidence(cls, filename, thin_start=None, thin_end=None,
thin_interval=None):
"""Calculates the log evidence from the given file using ``emcee_pt``'s
thermodynamic integration.
Parameters
----------
filename : str
Name of the file to read the samples from. Should be an
``EmceePTFile``.
thin_start : int
Index of the sample to begin returning stats. Default is to read
stats after burn in. To start from the beginning set thin_start
to 0.
thin_interval : int
Interval to accept every i-th sample. Default is to use the
`fp.acl`. If `fp.acl` is not set, then use all stats
(set thin_interval to 1).
thin_end : int
Index of the last sample to read. If not given then
`fp.niterations` is used.
Returns
-------
lnZ : float
The estimate of log of the evidence.
dlnZ : float
The error on the estimate.
"""
with cls._io(filename, 'r') as fp:
logls = fp.read_raw_samples(['loglikelihood'],
thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end,
temps='all', flatten=False)
logls = logls['loglikelihood']
# we need the betas that were used
betas = fp.betas
# annoyingly, theromdynaimc integration in PTSampler is an instance
# method, so we'll implement a dummy one
ntemps = fp.ntemps
nwalkers = fp.nwalkers
ndim = len(fp.variable_params)
dummy_sampler = emcee.PTSampler(ntemps, nwalkers, ndim, None,
None, betas=betas)
return dummy_sampler.thermodynamic_integration_log_evidence(
logls=logls, fburnin=0.) | python | {
"resource": ""
} |
q31817 | EmceePTSampler.finalize | train | def finalize(self):
"""Calculates the log evidence and writes to the checkpoint file.
The thin start/interval/end for calculating the log evidence are
retrieved from the checkpoint file's thinning attributes.
"""
logging.info("Calculating log evidence")
# get the thinning settings
with self.io(self.checkpoint_file, 'r') as fp:
thin_start = fp.thin_start
thin_interval = fp.thin_interval
thin_end = fp.thin_end
# calculate
logz, dlogz = self.calculate_logevidence(
self.checkpoint_file, thin_start=thin_start, thin_end=thin_end,
thin_interval=thin_interval)
logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz))
# write to both the checkpoint and backup
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
fp.write_logevidence(logz, dlogz) | python | {
"resource": ""
} |
q31818 | losc_frame_json | train | def losc_frame_json(ifo, start_time, end_time):
""" Get the information about the public data files in a duration of time
Parameters
----------
ifo: str
The name of the IFO to find the information about.
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
info: dict
A dictionary containing information about the files that span the
requested times.
"""
import urllib, json
run = get_run(start_time)
run2 = get_run(end_time)
if run != run2:
raise ValueError('Spanning multiple runs is not currently supported.'
'You have requested data that uses '
'both %s and %s' % (run, run2))
url = _losc_url % (run, ifo, int(start_time), int(end_time))
try:
return json.loads(urllib.urlopen(url).read())
except Exception as e:
print(e)
raise ValueError('Failed to find gwf files for '
'ifo=%s, run=%s, between %s-%s' % (ifo, run, start_time, end_time)) | python | {
"resource": ""
} |
q31819 | losc_frame_urls | train | def losc_frame_urls(ifo, start_time, end_time):
""" Get a list of urls to losc frame files
Parameters
----------
ifo: str
The name of the IFO to find the information about.
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
frame_files: list
A dictionary containing information about the files that span the
requested times.
"""
data = losc_frame_json(ifo, start_time, end_time)['strain']
return [d['url'] for d in data if d['format'] == 'gwf'] | python | {
"resource": ""
} |
q31820 | read_frame_losc | train | def read_frame_losc(channels, start_time, end_time):
""" Read channels from losc data
Parameters
----------
channels: str or list
The channel name to read or list of channel names.
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
ts: TimeSeries
Returns a timeseries or list of timeseries with the requested data.
"""
from pycbc.frame import read_frame
if not isinstance(channels, list):
channels = [channels]
ifos = [c[0:2] for c in channels]
urls = {}
for ifo in ifos:
urls[ifo] = losc_frame_urls(ifo, start_time, end_time)
if len(urls[ifo]) == 0:
raise ValueError("No data found for %s so we "
"can't produce a time series" % ifo)
fnames = {ifo:[] for ifo in ifos}
for ifo in ifos:
for url in urls[ifo]:
fname = download_file(url, cache=True)
fnames[ifo].append(fname)
ts = [read_frame(fnames[channel[0:2]], channel,
start_time=start_time, end_time=end_time) for channel in channels]
if len(ts) == 1:
return ts[0]
else:
return ts | python | {
"resource": ""
} |
q31821 | read_strain_losc | train | def read_strain_losc(ifo, start_time, end_time):
""" Get the strain data from the LOSC data
Parameters
----------
ifo: str
The name of the IFO to read data for. Ex. 'H1', 'L1', 'V1'
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
ts: TimeSeries
Returns a timeseries with the strain data.
"""
channel = _get_channel(start_time)
return read_frame_losc('%s:%s' % (ifo, channel), start_time, end_time) | python | {
"resource": ""
} |
q31822 | background_bin_from_string | train | def background_bin_from_string(background_bins, data):
""" Return template ids for each bin as defined by the format string
Parameters
----------
bins: list of strings
List of strings which define how a background bin is taken from the
list of templates.
data: dict of numpy.ndarrays
Dict with parameter key values and numpy.ndarray values which define
the parameters of the template bank to bin up.
Returns
-------
bins: dict
Dictionary of location indices indexed by a bin name
"""
used = numpy.array([], dtype=numpy.uint32)
bins = {}
for mbin in background_bins:
name, bin_type, boundary = tuple(mbin.split(':'))
if boundary[0:2] == 'lt':
member_func = lambda vals, bd=boundary : vals < float(bd[2:])
elif boundary[0:2] == 'gt':
member_func = lambda vals, bd=boundary : vals > float(bd[2:])
else:
raise RuntimeError("Can't parse boundary condition! Must begin "
"with 'lt' or 'gt'")
if bin_type == 'component' and boundary[0:2] == 'lt':
# maximum component mass is less than boundary value
vals = numpy.maximum(data['mass1'], data['mass2'])
if bin_type == 'component' and boundary[0:2] == 'gt':
# minimum component mass is greater than bdary
vals = numpy.minimum(data['mass1'], data['mass2'])
elif bin_type == 'total':
vals = data['mass1'] + data['mass2']
elif bin_type == 'chirp':
vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta(
data['mass1'], data['mass2'])[0]
elif bin_type == 'SEOBNRv2Peak':
vals = pycbc.pnutils.get_freq('fSEOBNRv2Peak',
data['mass1'], data['mass2'], data['spin1z'], data['spin2z'])
elif bin_type == 'SEOBNRv4Peak':
vals = pycbc.pnutils.get_freq('fSEOBNRv4Peak', data['mass1'],
data['mass2'], data['spin1z'],
data['spin2z'])
elif bin_type == 'SEOBNRv2duration':
vals = pycbc.pnutils.get_imr_duration(data['mass1'], data['mass2'],
data['spin1z'], data['spin2z'], data['f_lower'],
approximant='SEOBNRv2')
else:
raise ValueError('Invalid bin type %s' % bin_type)
locs = member_func(vals)
del vals
# make sure we don't reuse anything from an earlier bin
locs = numpy.where(locs)[0]
locs = numpy.delete(locs, numpy.where(numpy.in1d(locs, used))[0])
used = numpy.concatenate([used, locs])
bins[name] = locs
return bins | python | {
"resource": ""
} |
q31823 | calculate_n_louder | train | def calculate_n_louder(bstat, fstat, dec, skip_background=False):
""" Calculate for each foreground event the number of background events
that are louder than it.
Parameters
----------
bstat: numpy.ndarray
Array of the background statistic values
fstat: numpy.ndarray
Array of the foreground statitsic values
dec: numpy.ndarray
Array of the decimation factors for the background statistics
skip_background: optional, {boolean, False}
Skip calculating cumulative numbers for background triggers
Returns
-------
cum_back_num: numpy.ndarray
The cumulative array of background triggers. Does not return this
argument if skip_background == True
fore_n_louder: numpy.ndarray
The number of background triggers above each foreground trigger
"""
sort = bstat.argsort()
bstat = bstat[sort]
dec = dec[sort]
# calculate cumulative number of triggers louder than the trigger in
# a given index. We need to subtract the decimation factor, as the cumsum
# includes itself in the first sum (it is inclusive of the first value)
n_louder = dec[::-1].cumsum()[::-1] - dec
# Determine how many values are louder than the foreground ones
# We need to subtract one from the index, to be consistent with the definition
# of n_louder, as here we do want to include the background value at the
# found index
idx = numpy.searchsorted(bstat, fstat, side='left') - 1
# If the foreground are *quieter* than the background or at the same value
# then the search sorted alorithm will choose position -1, which does not exist
# We force it back to zero.
if isinstance(idx, numpy.ndarray): # Handle the case where our input is an array
idx[idx < 0] = 0
else: # Handle the case where we are simply given a scalar value
if idx < 0:
idx = 0
fore_n_louder = n_louder[idx]
if not skip_background:
unsort = sort.argsort()
back_cum_num = n_louder[unsort]
return back_cum_num, fore_n_louder
else:
return fore_n_louder | python | {
"resource": ""
} |
q31824 | timeslide_durations | train | def timeslide_durations(start1, start2, end1, end2, timeslide_offsets):
""" Find the coincident time for each timeslide.
Find the coincident time for each timeslide, where the first time vector
is slid to the right by the offset in the given timeslide_offsets vector.
Parameters
----------
start1: numpy.ndarray
Array of the start of valid analyzed times for detector 1
start2: numpy.ndarray
Array of the start of valid analyzed times for detector 2
end1: numpy.ndarray
Array of the end of valid analyzed times for detector 1
end2: numpy.ndarray
Array of the end of valid analyzed times for detector 2
timseslide_offset: numpy.ndarray
Array of offsets (in seconds) for each timeslide
Returns
--------
durations: numpy.ndarray
Array of coincident time for each timeslide in the offset array
"""
from . import veto
durations = []
seg2 = veto.start_end_to_segments(start2, end2)
for offset in timeslide_offsets:
seg1 = veto.start_end_to_segments(start1 + offset, end1 + offset)
durations.append(abs((seg1 & seg2).coalesce()))
return numpy.array(durations) | python | {
"resource": ""
} |
q31825 | time_coincidence | train | def time_coincidence(t1, t2, window, slide_step=0):
""" Find coincidences by time window
Parameters
----------
t1 : numpy.ndarray
Array of trigger times from the first detector
t2 : numpy.ndarray
Array of trigger times from the second detector
window : float
The coincidence window in seconds
slide_step : optional, {None, float}
If calculating background coincidences, the interval between background
slides in seconds.
Returns
-------
idx1 : numpy.ndarray
Array of indices into the t1 array.
idx2 : numpy.ndarray
Array of indices into the t2 array.
slide : numpy.ndarray
Array of slide ids
"""
if slide_step:
fold1 = t1 % slide_step
fold2 = t2 % slide_step
else:
fold1 = t1
fold2 = t2
sort1 = fold1.argsort()
sort2 = fold2.argsort()
fold1 = fold1[sort1]
fold2 = fold2[sort2]
if slide_step:
fold2 = numpy.concatenate([fold2 - slide_step, fold2, fold2 + slide_step])
sort2 = numpy.concatenate([sort2, sort2, sort2])
left = numpy.searchsorted(fold2, fold1 - window)
right = numpy.searchsorted(fold2, fold1 + window)
idx1 = numpy.repeat(sort1, right-left)
idx2 = [sort2[l:r] for l,r in zip(left, right)]
if len(idx2) > 0:
idx2 = numpy.concatenate(idx2)
else:
idx2 = numpy.array([], dtype=numpy.int64)
if slide_step:
diff = ((t1 / slide_step)[idx1] - (t2 / slide_step)[idx2])
slide = numpy.rint(diff)
else:
slide = numpy.zeros(len(idx1))
return idx1.astype(numpy.uint32), idx2.astype(numpy.uint32), slide.astype(numpy.int32) | python | {
"resource": ""
} |
q31826 | time_multi_coincidence | train | def time_multi_coincidence(times, slide_step=0, slop=.003,
pivot='H1', fixed='L1'):
""" Find multi detector concidences.
Parameters
----------
times: dict of numpy.ndarrays
Dictionary keyed by ifo of the times of each single detector trigger.
slide_step: float
The interval between time slides
slop: float
The amount of time to add to the TOF between detectors for coincidence
pivot: str
ifo used to test coincidence against in first stage
fixed: str
the other ifo used in the first stage coincidence which we'll use
as a fixed time reference for coincident triggers. All other detectors
are time slid by being fixed to this detector.
"""
# pivots are used to determine standard coincidence triggers, we then
# pair off additional detectors to those.
def win(ifo1, ifo2):
d1 = Detector(ifo1)
d2 = Detector(ifo2)
return d1.light_travel_time_to_detector(d2) + slop
# Find coincs first between the two fully time-slid detectors
pivot_id, fix_id, slide = time_coincidence(times[pivot], times[fixed],
win(pivot, fixed),
slide_step=slide_step)
# additional detectors do not slide independently of the fixed one
# Each trigger in an additional detector must be concident with an
# existing coincident one. All times moved to 'fixed' relative time
fixed_time = times[fixed][fix_id]
pivot_time = times[pivot][pivot_id] - slide_step * slide
ctimes = {fixed: fixed_time, pivot:pivot_time}
ids = {fixed:fix_id, pivot:pivot_id}
dep_ifos = [ifo for ifo in times.keys() if ifo != fixed and ifo != pivot]
for ifo1 in dep_ifos:
otime = times[ifo1]
sort = times[ifo1].argsort()
time = otime[sort]
# Find coincidences between dependent ifo triggers and existing coinc.
for ifo2 in ids.keys():
# Currently assumes that additional detectors do not slide
# independently of the 'fixed one'
#
# To modify that assumption, the code here would be modified
# by adding a function that remaps the coinc time frame and unmaps
# it and the end of this loop.
# This remapping must ensure
# * function of the standard slide number
# * ensure all times remain within coincident segment
# * unbiased distribution of triggers after mapping.
w = win(ifo1, ifo2)
left = numpy.searchsorted(time, ctimes[ifo2] - w)
right = numpy.searchsorted(time, ctimes[ifo2] + w)
# remove elements that will not form a coinc
# There is only at most one trigger for an existing coinc
# (assumes triggers spaced > slide step)
nz = (right - left).nonzero()
dep_ids = left[nz]
# The property that only one trigger can be within the window is ensured
# by the peak finding algorithm we use for each template.
# If that is modifed, this function may need to be
# extended.
if len(left) > 0 and (right - left).max() > 1:
raise ValueError('Somehow triggers are closer than time-delay window')
slide = slide[nz]
for ifo in ctimes:
ctimes[ifo] = ctimes[ifo][nz]
ids[ifo] = ids[ifo][nz]
# Add this detector now to the cumulative set and proceed to the next
# ifo coincidence test
ids[ifo1] = sort[dep_ids]
ctimes[ifo1] = otime[ids[ifo1]]
return ids, slide | python | {
"resource": ""
} |
q31827 | mean_if_greater_than_zero | train | def mean_if_greater_than_zero(vals):
""" Calculate mean over numerical values, ignoring values less than zero.
E.g. used for mean time over coincident triggers when timestamps are set
to -1 for ifos not included in the coincidence.
Parameters
----------
vals: iterator of numerical values
values to be mean averaged
Returns
-------
mean: float
The mean of the values in the original vector which are
greater than zero
num_above_zero: int
The number of entries in the vector which are above zero
"""
vals = numpy.array(vals)
above_zero = vals > 0
return vals[above_zero].mean(), above_zero.sum() | python | {
"resource": ""
} |
q31828 | cluster_over_time | train | def cluster_over_time(stat, time, window, argmax=numpy.argmax):
"""Cluster generalized transient events over time via maximum stat over a
symmetric sliding window
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time: numpy.ndarray
time to use for clustering
window: float
length to cluster over
argmax: function
the function used to calculate the maximum value
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences.
"""
logging.info('Clustering events over %s s window', window)
indices = []
time_sorting = time.argsort()
stat = stat[time_sorting]
time = time[time_sorting]
left = numpy.searchsorted(time, time - window)
right = numpy.searchsorted(time, time + window)
indices = numpy.zeros(len(left), dtype=numpy.uint32)
# i is the index we are inspecting, j is the next one to save
i = 0
j = 0
while i < len(left):
l = left[i]
r = right[i]
# If there are no other points to compare it is obviously the max
if (r - l) == 1:
indices[j] = i
j += 1
i += 1
continue
# Find the location of the maximum within the time interval around i
max_loc = argmax(stat[l:r]) + l
# If this point is the max, we can skip to the right boundary
if max_loc == i:
indices[j] = i
i = r
j += 1
# If the max is later than i, we can skip to it
elif max_loc > i:
i = max_loc
elif max_loc < i:
i += 1
indices = indices[:j]
logging.info('%d triggers remaining', len(indices))
return time_sorting[indices] | python | {
"resource": ""
} |
q31829 | MultiRingBuffer.discard_last | train | def discard_last(self, indices):
"""Discard the triggers added in the latest update"""
for i in indices:
self.buffer_expire[i] = self.buffer_expire[i][:-1]
self.buffer[i] = self.buffer[i][:-1] | python | {
"resource": ""
} |
q31830 | MultiRingBuffer.add | train | def add(self, indices, values):
"""Add triggers in 'values' to the buffers indicated by the indices
"""
for i, v in zip(indices, values):
self.buffer[i] = numpy.append(self.buffer[i], v)
self.buffer_expire[i] = numpy.append(self.buffer_expire[i], self.time)
self.advance_time() | python | {
"resource": ""
} |
q31831 | MultiRingBuffer.data | train | def data(self, buffer_index):
"""Return the data vector for a given ring buffer"""
# Check for expired elements and discard if they exist
expired = self.time - self.max_time
exp = self.buffer_expire[buffer_index]
j = 0
while j < len(exp):
# Everything before this j must be expired
if exp[j] >= expired:
self.buffer_expire[buffer_index] = exp[j:].copy()
self.buffer[buffer_index] = self.buffer[buffer_index][j:].copy()
break
j += 1
return self.buffer[buffer_index] | python | {
"resource": ""
} |
q31832 | CoincExpireBuffer.add | train | def add(self, values, times, ifos):
"""Add values to the internal buffer
Parameters
----------
values: numpy.ndarray
Array of elements to add to the internal buffer.
times: dict of arrays
The current time to use for each element being added.
ifos: list of strs
The set of timers to be incremented.
"""
for ifo in ifos:
self.time[ifo] += 1
# Resize the internal buffer if we need more space
if self.index + len(values) >= len(self.buffer):
newlen = len(self.buffer) * 2
for ifo in self.ifos:
self.timer[ifo].resize(newlen)
self.buffer.resize(newlen)
self.buffer[self.index:self.index+len(values)] = values
if len(values) > 0:
for ifo in self.ifos:
self.timer[ifo][self.index:self.index+len(values)] = times[ifo]
self.index += len(values)
# Remove the expired old elements
keep = None
for ifo in ifos:
kt = self.timer[ifo][:self.index] >= self.time[ifo] - self.expiration
keep = numpy.logical_and(keep, kt) if keep is not None else kt
self.buffer[:keep.sum()] = self.buffer[:self.index][keep]
for ifo in self.ifos:
self.timer[ifo][:keep.sum()] = self.timer[ifo][:self.index][keep]
self.index = keep.sum() | python | {
"resource": ""
} |
q31833 | LiveCoincTimeslideBackgroundEstimator.pick_best_coinc | train | def pick_best_coinc(cls, coinc_results):
"""Choose the best two-ifo coinc by ifar first, then statistic if needed.
This function picks which of the available double-ifo coincs to use.
It chooses the best (highest) ifar. The ranking statistic is used as
a tie-breaker.
A trials factor is applied if multiple types of coincs are possible
at this time given the active ifos.
Parameters
----------
coinc_results: list of coinc result dicts
Dictionary by detector pair of coinc result dicts.
Returns
-------
best: coinc results dict
If there is a coinc, this will contain the 'best' one. Otherwise
it will return the provided dict.
"""
mstat = 0
mifar = 0
mresult = None
# record the trials factor from the possible coincs we could
# maximize over
trials = 0
for result in coinc_results:
# Check that a coinc was possible. See the 'add_singles' method
# to see where this flag was added into the results dict
if 'coinc_possible' in result:
trials += 1
# Check that a coinc exists
if 'foreground/ifar' in result:
ifar = result['foreground/ifar']
stat = result['foreground/stat']
if ifar > mifar or (ifar == mifar and stat > mstat):
mifar = ifar
mstat = stat
mresult = result
# apply trials factor for the best coinc
if mresult:
mresult['foreground/ifar'] = mifar / float(trials)
logging.info('Found %s coinc with ifar %s',
mresult['foreground/type'],
mresult['foreground/ifar'])
return mresult
# If no coinc, just return one of the results dictionaries. They will
# all contain the same results (i.e. single triggers) in this case.
else:
return coinc_results[0] | python | {
"resource": ""
} |
q31834 | LiveCoincTimeslideBackgroundEstimator.background_time | train | def background_time(self):
"""Return the amount of background time that the buffers contain"""
time = 1.0 / self.timeslide_interval
for ifo in self.singles:
time *= self.singles[ifo].filled_time * self.analysis_block
return time | python | {
"resource": ""
} |
q31835 | LiveCoincTimeslideBackgroundEstimator.ifar | train | def ifar(self, coinc_stat):
"""Return the far that would be associated with the coincident given.
"""
n = self.coincs.num_greater(coinc_stat)
return self.background_time / lal.YRJUL_SI / (n + 1) | python | {
"resource": ""
} |
q31836 | LiveCoincTimeslideBackgroundEstimator.set_singles_buffer | train | def set_singles_buffer(self, results):
"""Create the singles buffer
This creates the singles buffer for each ifo. The dtype is determined
by a representative sample of the single triggers in the results.
Parameters
----------
restuls: dict of dict
Dict indexed by ifo and then trigger column.
"""
# Determine the dtype from a sample of the data.
self.singles_dtype = []
data = False
for ifo in self.ifos:
if ifo in results and results[ifo] is not False:
data = results[ifo]
break
if data is False:
return
for key in data:
self.singles_dtype.append((key, data[key].dtype))
if 'stat' not in data:
self.singles_dtype.append(('stat', self.stat_calculator.single_dtype))
# Create a ring buffer for each template ifo combination
for ifo in self.ifos:
self.singles[ifo] = MultiRingBuffer(self.num_templates,
self.buffer_size,
self.singles_dtype) | python | {
"resource": ""
} |
q31837 | LiveCoincTimeslideBackgroundEstimator._add_singles_to_buffer | train | def _add_singles_to_buffer(self, results, ifos):
"""Add single detector triggers to the internal buffer
Parameters
----------
results: dict of arrays
Dictionary of dictionaries indexed by ifo and keys such as 'snr',
'chisq', etc. The specific format it determined by the
LiveBatchMatchedFilter class.
Returns
-------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
"""
if len(self.singles.keys()) == 0:
self.set_singles_buffer(results)
# convert to single detector trigger values
# FIXME Currently configured to use pycbc live output
# where chisq is the reduced chisq and chisq_dof is the actual DOF
logging.info("adding singles to the background estimate...")
updated_indices = {}
for ifo in ifos:
trigs = results[ifo]
if len(trigs['snr'] > 0):
trigsc = copy.copy(trigs)
trigsc['chisq'] = trigs['chisq'] * trigs['chisq_dof']
trigsc['chisq_dof'] = (trigs['chisq_dof'] + 2) / 2
single_stat = self.stat_calculator.single(trigsc)
else:
single_stat = numpy.array([], ndmin=1,
dtype=self.stat_calculator.single_dtype)
trigs['stat'] = single_stat
# add each single detector trigger to the and advance the buffer
data = numpy.zeros(len(single_stat), dtype=self.singles_dtype)
for key, value in trigs.items():
data[key] = value
self.singles[ifo].add(trigs['template_id'], data)
updated_indices[ifo] = trigs['template_id']
return updated_indices | python | {
"resource": ""
} |
q31838 | LiveCoincTimeslideBackgroundEstimator.backout_last | train | def backout_last(self, updated_singles, num_coincs):
"""Remove the recently added singles and coincs
Parameters
----------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
num_coincs: int
The number of coincs that were just added to the internal buffer
of coincident triggers
"""
for ifo in updated_singles:
self.singles[ifo].discard_last(updated_singles[ifo])
self.coincs.remove(num_coincs) | python | {
"resource": ""
} |
q31839 | LiveCoincTimeslideBackgroundEstimator.add_singles | train | def add_singles(self, results):
"""Add singles to the bacckground estimate and find candidates
Parameters
----------
results: dict of arrays
Dictionary of dictionaries indexed by ifo and keys such as 'snr',
'chisq', etc. The specific format it determined by the
LiveBatchMatchedFilter class.
Returns
-------
coinc_results: dict of arrays
A dictionary of arrays containing the coincident results.
"""
# Let's see how large everything is
logging.info('BKG Coincs %s stored %s bytes',
len(self.coincs), self.coincs.nbytes)
# If there are no results just return
valid_ifos = [k for k in results.keys() if results[k] and k in self.ifos]
if len(valid_ifos) == 0: return {}
# Add single triggers to the internal buffer
self._add_singles_to_buffer(results, ifos=valid_ifos)
# Calculate zerolag and background coincidences
_, coinc_results = self._find_coincs(results, ifos=valid_ifos)
# record if a coinc is possible in this chunk
if len(valid_ifos) == 2:
coinc_results['coinc_possible'] = True
return coinc_results | python | {
"resource": ""
} |
q31840 | IndependentChiPChiEff._constraints | train | def _constraints(self, values):
"""Applies physical constraints to the given parameter values.
Parameters
----------
values : {arr or dict}
A dictionary or structured array giving the values.
Returns
-------
bool
Whether or not the values satisfy physical
"""
mass1, mass2, phi_a, phi_s, chi_eff, chi_a, xi1, xi2, _ = \
conversions.ensurearray(values['mass1'], values['mass2'],
values['phi_a'], values['phi_s'],
values['chi_eff'], values['chi_a'],
values['xi1'], values['xi2'])
s1x = conversions.spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s)
s2x = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2,
xi2, phi_a, phi_s)
s1y = conversions.spin1y_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s)
s2y = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2,
xi2, phi_a, phi_s)
s1z = conversions.spin1z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2,
chi_eff, chi_a)
s2z = conversions.spin2z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2,
chi_eff, chi_a)
test = ((s1x**2. + s1y**2. + s1z**2.) < 1.) & \
((s2x**2. + s2y**2. + s2z**2.) < 1.)
return test | python | {
"resource": ""
} |
q31841 | IndependentChiPChiEff._draw | train | def _draw(self, size=1, **kwargs):
"""Draws random samples without applying physical constrains.
"""
# draw masses
try:
mass1 = kwargs['mass1']
except KeyError:
mass1 = self.mass1_distr.rvs(size=size)['mass1']
try:
mass2 = kwargs['mass2']
except KeyError:
mass2 = self.mass2_distr.rvs(size=size)['mass2']
# draw angles
try:
phi_a = kwargs['phi_a']
except KeyError:
phi_a = self.phia_distr.rvs(size=size)['phi_a']
try:
phi_s = kwargs['phi_s']
except KeyError:
phi_s = self.phis_distr.rvs(size=size)['phi_s']
# draw chi_eff, chi_a
try:
chi_eff = kwargs['chi_eff']
except KeyError:
chi_eff = self.chieff_distr.rvs(size=size)['chi_eff']
try:
chi_a = kwargs['chi_a']
except KeyError:
chi_a = self.chia_distr.rvs(size=size)['chi_a']
# draw xis
try:
xi1 = kwargs['xi1']
except KeyError:
xi1 = self.xi1_distr.rvs(size=size)['xi1']
try:
xi2 = kwargs['xi2']
except KeyError:
xi2 = self.xi2_distr.rvs(size=size)['xi2']
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
arr['mass1'] = mass1
arr['mass2'] = mass2
arr['phi_a'] = phi_a
arr['phi_s'] = phi_s
arr['chi_eff'] = chi_eff
arr['chi_a'] = chi_a
arr['xi1'] = xi1
arr['xi2'] = xi2
return arr | python | {
"resource": ""
} |
q31842 | IndependentChiPChiEff.rvs | train | def rvs(self, size=1, **kwargs):
"""Returns random values for all of the parameters.
"""
size = int(size)
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
remaining = size
keepidx = 0
while remaining:
draws = self._draw(size=remaining, **kwargs)
mask = self._constraints(draws)
addpts = mask.sum()
arr[keepidx:keepidx+addpts] = draws[mask]
keepidx += addpts
remaining = size - keepidx
return arr | python | {
"resource": ""
} |
q31843 | get_source | train | def get_source(source):
"""Get the source data for a particular GW catalog
"""
if source == 'gwtc-1':
fname = download_file(gwtc1_url, cache=True)
data = json.load(open(fname, 'r'))
else:
raise ValueError('Unkown catalog source {}'.format(source))
return data['data'] | python | {
"resource": ""
} |
q31844 | init_logging | train | def init_logging(verbose=False, format='%(asctime)s %(message)s'):
""" Common utility for setting up logging in PyCBC.
Installs a signal handler such that verbosity can be activated at
run-time by sending a SIGUSR1 to the process.
"""
def sig_handler(signum, frame):
logger = logging.getLogger()
log_level = logger.level
if log_level == logging.DEBUG:
log_level = logging.WARN
else:
log_level = logging.DEBUG
logging.warn('Got signal %d, setting log level to %d',
signum, log_level)
logger.setLevel(log_level)
signal.signal(signal.SIGUSR1, sig_handler)
if verbose:
initial_level = logging.DEBUG
else:
initial_level = logging.WARN
logging.getLogger().setLevel(initial_level)
logging.basicConfig(format=format, level=initial_level) | python | {
"resource": ""
} |
q31845 | makedir | train | def makedir(path):
"""
Make the analysis directory path and any parent directories that don't
already exist. Will do nothing if path already exists.
"""
if path is not None and not os.path.exists(path):
os.makedirs(path) | python | {
"resource": ""
} |
q31846 | check_output_error_and_retcode | train | def check_output_error_and_retcode(*popenargs, **kwargs):
"""
This function is used to obtain the stdout of a command. It is only used
internally, recommend using the make_external_call command if you want
to call external executables.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, error = process.communicate()
retcode = process.poll()
return output, error, retcode | python | {
"resource": ""
} |
q31847 | get_full_analysis_chunk | train | def get_full_analysis_chunk(science_segs):
"""
Function to find the first and last time point contained in the science segments
and return a single segment spanning that full time.
Parameters
-----------
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
Returns
--------
fullSegment : ligo.segments.segment
The segment spanning the first and last time point contained in science_segs.
"""
extents = [science_segs[ifo].extent() for ifo in science_segs.keys()]
min, max = extents[0]
for lo, hi in extents:
if min > lo:
min = lo
if max < hi:
max = hi
fullSegment = segments.segment(min, max)
return fullSegment | python | {
"resource": ""
} |
q31848 | get_random_label | train | def get_random_label():
"""
Get a random label string to use when clustering jobs.
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) \
for _ in range(15)) | python | {
"resource": ""
} |
q31849 | Executable.ifo | train | def ifo(self):
"""Return the ifo.
If only one ifo in the ifo list this will be that ifo. Otherwise an
error is raised.
"""
if self.ifo_list and len(self.ifo_list) == 1:
return self.ifo_list[0]
else:
errMsg = "self.ifoList must contain only one ifo to access the "
errMsg += "ifo property. %s." %(str(self.ifo_list),)
raise TypeError(errMsg) | python | {
"resource": ""
} |
q31850 | Executable.add_ini_profile | train | def add_ini_profile(self, cp, sec):
"""Add profile from configuration file.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration settings
sec : string
The section containing options for this job.
"""
for opt in cp.options(sec):
namespace = opt.split('|')[0]
if namespace == 'pycbc' or namespace == 'container':
continue
value = string.strip(cp.get(sec, opt))
key = opt.split('|')[1]
self.add_profile(namespace, key, value, force=True)
# Remove if Pegasus can apply this hint in the TC
if namespace == 'hints' and key == 'execution.site':
self.execution_site = value | python | {
"resource": ""
} |
q31851 | Executable.add_ini_opts | train | def add_ini_opts(self, cp, sec):
"""Add job-specific options from configuration file.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration settings
sec : string
The section containing options for this job.
"""
for opt in cp.options(sec):
value = string.strip(cp.get(sec, opt))
opt = '--%s' %(opt,)
if opt in self.file_input_options:
# This now expects the option to be a file
# Check is we have a list of files
values = [path for path in value.split(' ') if path]
self.common_raw_options.append(opt)
self.common_raw_options.append(' ')
# Get LFN and PFN
for path in values:
# Here I decide if the path is URL or
# IFO:/path/to/file or IFO:url://path/to/file
# That's somewhat tricksy as we used : as delimiter
split_path = path.split(':', 1)
if len(split_path) == 1:
ifo = None
path = path
else:
# Have I split a URL or not?
if split_path[1].startswith('//'):
# URL
ifo = None
path = path
else:
#IFO:path or IFO:URL
ifo = split_path[0]
path = split_path[1]
curr_lfn = os.path.basename(path)
# If the file exists make sure to use the
# fill path as a file:// URL
if os.path.isfile(path):
curr_pfn = urlparse.urljoin('file:',
urllib.pathname2url(
os.path.abspath(path)))
else:
curr_pfn = path
if curr_lfn in file_input_from_config_dict.keys():
file_pfn = file_input_from_config_dict[curr_lfn][2]
assert(file_pfn == curr_pfn)
curr_file = file_input_from_config_dict[curr_lfn][1]
else:
local_file_path = resolve_url(curr_pfn)
curr_file = File.from_path(local_file_path)
tuple_val = (local_file_path, curr_file, curr_pfn)
file_input_from_config_dict[curr_lfn] = tuple_val
self.common_input_files.append(curr_file)
if ifo:
self.common_raw_options.append(ifo + ':')
self.common_raw_options.append(curr_file.dax_repr)
else:
self.common_raw_options.append(curr_file.dax_repr)
self.common_raw_options.append(' ')
else:
self.common_options += [opt, value] | python | {
"resource": ""
} |
q31852 | Executable.add_opt | train | def add_opt(self, opt, value=None):
"""Add option to job.
Parameters
-----------
opt : string
Name of option (e.g. --output-file-format)
value : string, (default=None)
The value for the option (no value if set to None).
"""
if value is None:
self.common_options += [opt]
else:
self.common_options += [opt, value] | python | {
"resource": ""
} |
q31853 | Executable.get_opt | train | def get_opt(self, opt):
"""Get value of option from configuration file
Parameters
-----------
opt : string
Name of option (e.g. output-file-format)
Returns
--------
value : string
The value for the option. Returns None if option not present.
"""
for sec in self.sections:
try:
key = self.cp.get(sec, opt)
if key:
return key
except ConfigParser.NoOptionError:
pass
return None | python | {
"resource": ""
} |
q31854 | Executable.has_opt | train | def has_opt(self, opt):
"""Check if option is present in configuration file
Parameters
-----------
opt : string
Name of option (e.g. output-file-format)
"""
for sec in self.sections:
val = self.cp.has_option(sec, opt)
if val:
return val
return False | python | {
"resource": ""
} |
q31855 | Executable.update_current_retention_level | train | def update_current_retention_level(self, value):
"""Set a new value for the current retention level.
This updates the value of self.retain_files for an updated value of the
retention level.
Parameters
-----------
value : int
The new value to use for the retention level.
"""
# Determine the level at which output files should be kept
self.current_retention_level = value
try:
global_retention_level = \
self.cp.get_opt_tags("workflow", "file-retention-level",
self.tags+[self.name])
except ConfigParser.Error:
msg="Cannot find file-retention-level in [workflow] section "
msg+="of the configuration file. Setting a default value of "
msg+="retain all files."
logging.warn(msg)
self.retain_files = True
self.global_retention_threshold = 1
self.cp.set("workflow", "file-retention-level", "all_files")
else:
# FIXME: Are these names suitably descriptive?
retention_choices = {
'all_files' : 1,
'all_triggers' : 2,
'merged_triggers' : 3,
'results' : 4
}
try:
self.global_retention_threshold = \
retention_choices[global_retention_level]
except KeyError:
err_msg = "Cannot recognize the file-retention-level in the "
err_msg += "[workflow] section of the ini file. "
err_msg += "Got : {0}.".format(global_retention_level)
err_msg += "Valid options are: 'all_files', 'all_triggers',"
err_msg += "'merged_triggers' or 'results' "
raise ValueError(err_msg)
if self.current_retention_level == 5:
self.retain_files = True
if type(self).__name__ in Executable._warned_classes_list:
pass
else:
warn_msg = "Attribute current_retention_level has not "
warn_msg += "been set in class {0}. ".format(type(self))
warn_msg += "This value should be set explicitly. "
warn_msg += "All output from this class will be stored."
logging.warn(warn_msg)
Executable._warned_classes_list.append(type(self).__name__)
elif self.global_retention_threshold > self.current_retention_level:
self.retain_files = False
else:
self.retain_files = True | python | {
"resource": ""
} |
q31856 | Executable.update_current_tags | train | def update_current_tags(self, tags):
"""Set a new set of tags for this executable.
Update the set of tags that this job will use. This updated default
file naming and shared options. It will *not* update the pegasus
profile, which belong to the executable and cannot be different for
different nodes.
Parameters
-----------
tags : list
The new list of tags to consider.
"""
if tags is None:
tags = []
tags = [tag.upper() for tag in tags]
self.tags = tags
if len(tags) > 6:
warn_msg = "This job has way too many tags. "
warn_msg += "Current tags are {}. ".format(' '.join(tags))
warn_msg += "Current executable {}.".format(self.name)
logging.info(warn_msg)
if len(tags) != 0:
self.tagged_name = "{0}-{1}".format(self.name, '_'.join(tags))
else:
self.tagged_name = self.name
if self.ifo_string is not None:
self.tagged_name = "{0}-{1}".format(self.tagged_name,
self.ifo_string)
# Determine the sections from the ini file that will configure
# this executable
sections = [self.name]
if self.ifo_list is not None:
if len(self.ifo_list) > 1:
sec_tags = tags + self.ifo_list + [self.ifo_string]
else:
sec_tags = tags + self.ifo_list
else:
sec_tags = tags
for sec_len in range(1, len(sec_tags)+1):
for tag_permutation in permutations(sec_tags, sec_len):
joined_name = '-'.join(tag_permutation)
section = '{0}-{1}'.format(self.name, joined_name.lower())
if self.cp.has_section(section):
sections.append(section)
self.sections = sections
# Do some basic sanity checking on the options
for sec1, sec2 in combinations(sections, 2):
self.cp.check_duplicate_options(sec1, sec2, raise_error=True)
# collect the options and profile information
# from the ini file section(s)
self.common_options = []
self.common_raw_options = []
self.common_input_files = []
for sec in sections:
if self.cp.has_section(sec):
self.add_ini_opts(self.cp, sec)
else:
warn_string = "warning: config file is missing section "
warn_string += "[{0}]".format(sec)
logging.warn(warn_string) | python | {
"resource": ""
} |
q31857 | Executable.update_output_directory | train | def update_output_directory(self, out_dir=None):
"""Update the default output directory for output files.
Parameters
-----------
out_dir : string (optional, default=None)
If provided use this as the output directory. Else choose this
automatically from the tags.
"""
# Determine the output directory
if out_dir is not None:
self.out_dir = out_dir
elif len(self.tags) == 0:
self.out_dir = self.name
else:
self.out_dir = self.tagged_name
if not os.path.isabs(self.out_dir):
self.out_dir = os.path.join(os.getcwd(), self.out_dir) | python | {
"resource": ""
} |
q31858 | Executable._set_pegasus_profile_options | train | def _set_pegasus_profile_options(self):
"""Set the pegasus-profile settings for this Executable.
These are a property of the Executable and not of nodes that it will
spawn. Therefore it *cannot* be updated without also changing values
for nodes that might already have been created. Therefore this is
only called once in __init__. Second calls to this will fail.
"""
# Add executable non-specific profile information
if self.cp.has_section('pegasus_profile'):
self.add_ini_profile(self.cp, 'pegasus_profile')
# Executable- and tag-specific profile information
for sec in self.sections:
if self.cp.has_section('pegasus_profile-{0}'.format(sec)):
self.add_ini_profile(self.cp,
'pegasus_profile-{0}'.format(sec)) | python | {
"resource": ""
} |
q31859 | Workflow.execute_node | train | def execute_node(self, node, verbatim_exe = False):
""" Execute this node immediately on the local machine
"""
node.executed = True
# Check that the PFN is for a file or path
if node.executable.needs_fetching:
try:
# The pfn may have been marked local...
pfn = node.executable.get_pfn()
except:
# or it may have been marked nonlocal. That's
# fine, we'll resolve the URL and make a local
# entry.
pfn = node.executable.get_pfn('nonlocal')
resolved = resolve_url(pfn, permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
node.executable.clear_pfns()
node.executable.add_pfn(urlparse.urljoin('file:',
urllib.pathname2url(
resolved)), site='local')
cmd_list = node.get_command_line()
# Must execute in output directory.
curr_dir = os.getcwd()
out_dir = node.executable.out_dir
os.chdir(out_dir)
# Make call
make_external_call(cmd_list, out_dir=os.path.join(out_dir, 'logs'),
out_basename=node.executable.name)
# Change back
os.chdir(curr_dir)
for fil in node._outputs:
fil.node = None
fil.PFN(urlparse.urljoin('file:',
urllib.pathname2url(fil.storage_path)),
site='local') | python | {
"resource": ""
} |
q31860 | Workflow.save_config | train | def save_config(self, fname, output_dir, cp=None):
""" Writes configuration file to disk and returns a pycbc.workflow.File
instance for the configuration file.
Parameters
-----------
fname : string
The filename of the configuration file written to disk.
output_dir : string
The directory where the file is written to disk.
cp : ConfigParser object
The ConfigParser object to write. If None then uses self.cp.
Returns
-------
FileList
The FileList object with the configuration file.
"""
cp = self.cp if cp is None else cp
ini_file_path = os.path.join(output_dir, fname)
with open(ini_file_path, "wb") as fp:
cp.write(fp)
ini_file = FileList([File(self.ifos, "",
self.analysis_time,
file_url="file://" + ini_file_path)])
return ini_file | python | {
"resource": ""
} |
q31861 | Node.new_output_file_opt | train | def new_output_file_opt(self, valid_seg, extension, option_name, tags=None,
store_file=None, use_tmp_subdirs=False):
"""
This function will create a workflow.File object corresponding to the given
information and then add that file as output of this node.
Parameters
-----------
valid_seg : ligo.segments.segment
The time span over which the job is valid for.
extension : string
The extension to be used at the end of the filename.
E.g. '.xml' or '.sqlite'.
option_name : string
The option that is used when setting this job as output. For e.g.
'output-name' or 'output-file', whatever is appropriate for the
current executable.
tags : list of strings, (optional, default=[])
These tags will be added to the list of tags already associated with
the job. They can be used to uniquely identify this output file.
store_file : Boolean, (optional, default=True)
This file is to be added to the output mapper and will be stored
in the specified output location if True. If false file will be
removed when no longer needed in the workflow.
"""
if tags is None:
tags = []
# Changing this from set(tags) to enforce order. It might make sense
# for all jobs to have file names with tags in the same order.
all_tags = copy.deepcopy(self.executable.tags)
for tag in tags:
if tag not in all_tags:
all_tags.append(tag)
store_file = store_file if store_file is not None else self.executable.retain_files
fil = File(self.executable.ifo_list, self.executable.name,
valid_seg, extension=extension, store_file=store_file,
directory=self.executable.out_dir, tags=all_tags,
use_tmp_subdirs=use_tmp_subdirs)
self.add_output_opt(option_name, fil)
return fil | python | {
"resource": ""
} |
q31862 | Node.output_file | train | def output_file(self):
"""
If only one output file return it. Otherwise raise an exception.
"""
out_files = self.output_files
if len(out_files) != 1:
err_msg = "output_file property is only valid if there is a single"
err_msg += " output file. Here there are "
err_msg += "%d output files." %(len(out_files))
raise ValueError(err_msg)
return out_files[0] | python | {
"resource": ""
} |
q31863 | File.ifo | train | def ifo(self):
"""
If only one ifo in the ifo_list this will be that ifo. Otherwise an
error is raised.
"""
if len(self.ifo_list) == 1:
return self.ifo_list[0]
else:
err = "self.ifo_list must contain only one ifo to access the "
err += "ifo property. %s." %(str(self.ifo_list),)
raise TypeError(err) | python | {
"resource": ""
} |
q31864 | File.segment | train | def segment(self):
"""
If only one segment in the segmentlist this will be that segment.
Otherwise an error is raised.
"""
if len(self.segment_list) == 1:
return self.segment_list[0]
else:
err = "self.segment_list must only contain one segment to access"
err += " the segment property. %s." %(str(self.segment_list),)
raise TypeError(err) | python | {
"resource": ""
} |
q31865 | File.cache_entry | train | def cache_entry(self):
"""
Returns a CacheEntry instance for File.
"""
if self.storage_path is None:
raise ValueError('This file is temporary and so a lal '
'cache entry cannot be made')
file_url = urlparse.urlunparse(['file', 'localhost', self.storage_path, None,
None, None])
cache_entry = lal.utils.CacheEntry(self.ifo_string,
self.tagged_description, self.segment_list.extent(), file_url)
cache_entry.workflow_file = self
return cache_entry | python | {
"resource": ""
} |
q31866 | File._filename | train | def _filename(self, ifo, description, extension, segment):
"""
Construct the standard output filename. Should only be used internally
of the File class.
"""
if extension.startswith('.'):
extension = extension[1:]
# Follow the frame convention of using integer filenames,
# but stretching to cover partially covered seconds.
start = int(segment[0])
end = int(math.ceil(segment[1]))
duration = str(end-start)
start = str(start)
return "%s-%s-%s-%s.%s" % (ifo, description.upper(), start,
duration, extension) | python | {
"resource": ""
} |
q31867 | FileList.find_output_at_time | train | def find_output_at_time(self, ifo, time):
'''
Return File that covers the given time.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
time : int/float/LIGOGPStime
Return the Files that covers the supplied time. If no
File covers the time this will return None.
Returns
--------
list of File classes
The Files that corresponds to the time.
'''
# Get list of Files that overlap time, for given ifo
outFiles = [i for i in self if ifo in i.ifo_list and time in i.segment_list]
if len(outFiles) == 0:
# No OutFile at this time
return None
elif len(outFiles) == 1:
# 1 OutFile at this time (good!)
return outFiles
else:
# Multiple output files. Currently this is valid, but we may want
# to demand exclusivity later, or in certain cases. Hence the
# separation.
return outFiles | python | {
"resource": ""
} |
q31868 | FileList.find_outputs_in_range | train | def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False):
"""
Return the list of Files that is most appropriate for the supplied
time range. That is, the Files whose coverage time has the
largest overlap with the supplied time range.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
current_segment : glue.segment.segment
The segment of time that files must intersect.
Returns
--------
FileList class
The list of Files that are most appropriate for the time range
"""
currsegment_list = segments.segmentlist([current_segment])
# Get all files overlapping the window
overlap_files = self.find_all_output_in_range(ifo, current_segment,
useSplitLists=useSplitLists)
# By how much do they overlap?
overlap_windows = [abs(i.segment_list & currsegment_list) for i in overlap_files]
if not overlap_windows:
return []
# Return the File with the biggest overlap
# Note if two File have identical overlap, the first is used
# to define the valid segment
overlap_windows = numpy.array(overlap_windows, dtype = int)
segmentLst = overlap_files[overlap_windows.argmax()].segment_list
# Get all output files with the exact same segment definition
output_files = [f for f in overlap_files if f.segment_list==segmentLst]
return output_files | python | {
"resource": ""
} |
q31869 | FileList.find_output_in_range | train | def find_output_in_range(self, ifo, start, end):
'''
Return the File that is most appropriate for the supplied
time range. That is, the File whose coverage time has the
largest overlap with the supplied time range. If no Files
overlap the supplied time window, will return None.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
start : int/float/LIGOGPStime
The start of the time range of interest.
end : int/float/LIGOGPStime
The end of the time range of interest
Returns
--------
File class
The File that is most appropriate for the time range
'''
currsegment_list = segments.segmentlist([segments.segment(start, end)])
# First filter Files corresponding to ifo
outFiles = [i for i in self if ifo in i.ifo_list]
if len(outFiles) == 0:
# No OutFiles correspond to that ifo
return None
# Filter OutFiles to those overlapping the given window
currSeg = segments.segment([start,end])
outFiles = [i for i in outFiles \
if i.segment_list.intersects_segment(currSeg)]
if len(outFiles) == 0:
# No OutFile overlap that time period
return None
elif len(outFiles) == 1:
# One OutFile overlaps that period
return outFiles[0]
else:
overlap_windows = [abs(i.segment_list & currsegment_list) \
for i in outFiles]
# Return the File with the biggest overlap
# Note if two File have identical overlap, this will return
# the first File in the list
overlap_windows = numpy.array(overlap_windows, dtype = int)
return outFiles[overlap_windows.argmax()] | python | {
"resource": ""
} |
q31870 | FileList.find_all_output_in_range | train | def find_all_output_in_range(self, ifo, currSeg, useSplitLists=False):
"""
Return all files that overlap the specified segment.
"""
if not useSplitLists:
# Slower, but simpler method
outFiles = [i for i in self if ifo in i.ifo_list]
outFiles = [i for i in outFiles \
if i.segment_list.intersects_segment(currSeg)]
else:
# Faster, but more complicated
# Basically only check if a subset of files intersects_segment by
# using a presorted list. Sorting only happens once.
if not self._check_split_list_validity():
# FIXME: DO NOT hard code this.
self._temporal_split_list(100)
startIdx = int( (currSeg[0] - self._splitListsStart) / \
self._splitListsStep )
# Add some small rounding here
endIdx = (currSeg[1] - self._splitListsStart) / self._splitListsStep
endIdx = int(endIdx - 0.000001)
outFiles = []
for idx in range(startIdx, endIdx + 1):
if idx < 0 or idx >= self._splitListsNum:
continue
outFilesTemp = [i for i in self._splitLists[idx] \
if ifo in i.ifo_list]
outFiles.extend([i for i in outFilesTemp \
if i.segment_list.intersects_segment(currSeg)])
# Remove duplicates
outFiles = list(set(outFiles))
return self.__class__(outFiles) | python | {
"resource": ""
} |
q31871 | FileList.find_output_with_tag | train | def find_output_with_tag(self, tag):
"""
Find all files who have tag in self.tags
"""
# Enforce upper case
tag = tag.upper()
return FileList([i for i in self if tag in i.tags]) | python | {
"resource": ""
} |
q31872 | FileList.find_output_without_tag | train | def find_output_without_tag(self, tag):
"""
Find all files who do not have tag in self.tags
"""
# Enforce upper case
tag = tag.upper()
return FileList([i for i in self if tag not in i.tags]) | python | {
"resource": ""
} |
q31873 | FileList.find_output_with_ifo | train | def find_output_with_ifo(self, ifo):
"""
Find all files who have ifo = ifo
"""
# Enforce upper case
ifo = ifo.upper()
return FileList([i for i in self if ifo in i.ifo_list]) | python | {
"resource": ""
} |
q31874 | FileList.get_times_covered_by_files | train | def get_times_covered_by_files(self):
"""
Find the coalesced intersection of the segments of all files in the
list.
"""
times = segments.segmentlist([])
for entry in self:
times.extend(entry.segment_list)
times.coalesce()
return times | python | {
"resource": ""
} |
q31875 | FileList.convert_to_lal_cache | train | def convert_to_lal_cache(self):
"""
Return all files in this object as a glue.lal.Cache object
"""
lal_cache = gluelal.Cache([])
for entry in self:
try:
lal_cache.append(entry.cache_entry)
except ValueError:
pass
return lal_cache | python | {
"resource": ""
} |
q31876 | FileList._check_split_list_validity | train | def _check_split_list_validity(self):
"""
See _temporal_split_list above. This function checks if the current
split lists are still valid.
"""
# FIXME: Currently very primitive, but needs to be fast
if not (hasattr(self,"_splitListsSet") and (self._splitListsSet)):
return False
elif len(self) != self._splitListsLength:
return False
else:
return True | python | {
"resource": ""
} |
q31877 | FileList.dump | train | def dump(self, filename):
"""
Output this AhopeFileList to a pickle file
"""
f = open(filename, 'w')
cPickle.dump(self, f) | python | {
"resource": ""
} |
q31878 | FileList.to_file_object | train | def to_file_object(self, name, out_dir):
"""Dump to a pickle file and return an File object reference of this list
Parameters
----------
name : str
An identifier of this file. Needs to be unique.
out_dir : path
path to place this file
Returns
-------
file : AhopeFile
"""
make_analysis_dir(out_dir)
file_ref = File('ALL', name, self.get_times_covered_by_files(),
extension='.pkl', directory=out_dir)
self.dump(file_ref.storage_path)
return file_ref | python | {
"resource": ""
} |
q31879 | SegFile.from_segment_list | train | def from_segment_list(cls, description, segmentlist, name, ifo,
seg_summ_list=None, **kwargs):
""" Initialize a SegFile object from a segmentlist.
Parameters
------------
description : string (required)
See File.__init__
segmentlist : ligo.segments.segmentslist
The segment list that will be stored in this file.
name : str
The name of the segment lists to be stored in the file.
ifo : str
The ifo of the segment lists to be stored in this file.
seg_summ_list : ligo.segments.segmentslist (OPTIONAL)
Specify the segment_summary segmentlist that goes along with the
segmentlist. Default=None, in this case segment_summary is taken
from the valid_segment of the SegFile class.
"""
seglistdict = segments.segmentlistdict()
seglistdict[ifo + ':' + name] = segmentlist
if seg_summ_list is not None:
seg_summ_dict = segments.segmentlistdict()
seg_summ_dict[ifo + ':' + name] = seg_summ_list
else:
seg_summ_dict = None
return cls.from_segment_list_dict(description, seglistdict,
seg_summ_dict=None, **kwargs) | python | {
"resource": ""
} |
q31880 | SegFile.from_multi_segment_list | train | def from_multi_segment_list(cls, description, segmentlists, names, ifos,
seg_summ_lists=None, **kwargs):
""" Initialize a SegFile object from a list of segmentlists.
Parameters
------------
description : string (required)
See File.__init__
segmentlists : List of ligo.segments.segmentslist
List of segment lists that will be stored in this file.
names : List of str
List of names of the segment lists to be stored in the file.
ifos : str
List of ifos of the segment lists to be stored in this file.
seg_summ_lists : ligo.segments.segmentslist (OPTIONAL)
Specify the segment_summary segmentlists that go along with the
segmentlists. Default=None, in this case segment_summary is taken
from the valid_segment of the SegFile class.
"""
seglistdict = segments.segmentlistdict()
for name, ifo, segmentlist in zip(names, ifos, segmentlists):
seglistdict[ifo + ':' + name] = segmentlist
if seg_summ_lists is not None:
seg_summ_dict = segments.segmentlistdict()
for name, ifo, seg_summ_list in zip(names, ifos, seg_summ_lists):
seg_summ_dict[ifo + ':' + name] = seg_summ_list
else:
seg_summ_dict = None
return cls.from_segment_list_dict(description, seglistdict,
seg_summ_dict=seg_summ_dict, **kwargs) | python | {
"resource": ""
} |
q31881 | SegFile.from_segment_list_dict | train | def from_segment_list_dict(cls, description, segmentlistdict,
ifo_list=None, valid_segment=None,
file_exists=False, seg_summ_dict=None,
**kwargs):
""" Initialize a SegFile object from a segmentlistdict.
Parameters
------------
description : string (required)
See File.__init__
segmentlistdict : ligo.segments.segmentslistdict
See SegFile.__init__
ifo_list : string or list (optional)
See File.__init__, if not given a list of all ifos in the
segmentlistdict object will be used
valid_segment : ligo.segments.segment or ligo.segments.segmentlist
See File.__init__, if not given the extent of all segments in the
segmentlistdict is used.
file_exists : boolean (default = False)
If provided and set to True it is assumed that this file already
exists on disk and so there is no need to write again.
seg_summ_dict : ligo.segments.segmentslistdict
Optional. See SegFile.__init__.
"""
if ifo_list is None:
ifo_set = set([i.split(':')[0] for i in segmentlistdict.keys()])
ifo_list = list(ifo_set)
ifo_list.sort()
if valid_segment is None:
if seg_summ_dict and \
numpy.any([len(v) for _, v in seg_summ_dict.items()]):
# Only come here if seg_summ_dict is supplied and it is
# not empty.
valid_segment = seg_summ_dict.extent_all()
else:
try:
valid_segment = segmentlistdict.extent_all()
except:
# Numpty probably didn't supply a glue.segmentlistdict
segmentlistdict=segments.segmentlistdict(segmentlistdict)
try:
valid_segment = segmentlistdict.extent_all()
except ValueError:
# No segment_summary and segment list is empty
# Setting valid segment now is hard!
warn_msg = "No information with which to set valid "
warn_msg += "segment."
logging.warn(warn_msg)
valid_segment = segments.segment([0,1])
instnc = cls(ifo_list, description, valid_segment,
segment_dict=segmentlistdict, seg_summ_dict=seg_summ_dict,
**kwargs)
if not file_exists:
instnc.to_segment_xml()
else:
instnc.PFN(urlparse.urljoin('file:',
urllib.pathname2url(
instnc.storage_path)), site='local')
return instnc | python | {
"resource": ""
} |
q31882 | SegFile.from_segment_xml | train | def from_segment_xml(cls, xml_file, **kwargs):
"""
Read a ligo.segments.segmentlist from the file object file containing an
xml segment table.
Parameters
-----------
xml_file : file object
file object for segment xml file
"""
# load xmldocument and SegmentDefTable and SegmentTables
fp = open(xml_file, 'r')
xmldoc, _ = ligolw_utils.load_fileobj(fp,
gz=xml_file.endswith(".gz"),
contenthandler=ContentHandler)
seg_def_table = table.get_table(xmldoc,
lsctables.SegmentDefTable.tableName)
seg_table = table.get_table(xmldoc, lsctables.SegmentTable.tableName)
seg_sum_table = table.get_table(xmldoc,
lsctables.SegmentSumTable.tableName)
segs = segments.segmentlistdict()
seg_summ = segments.segmentlistdict()
seg_id = {}
for seg_def in seg_def_table:
# Here we want to encode ifo and segment name
full_channel_name = ':'.join([str(seg_def.ifos),
str(seg_def.name)])
seg_id[int(seg_def.segment_def_id)] = full_channel_name
segs[full_channel_name] = segments.segmentlist()
seg_summ[full_channel_name] = segments.segmentlist()
for seg in seg_table:
seg_obj = segments.segment(
lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),
lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))
segs[seg_id[int(seg.segment_def_id)]].append(seg_obj)
for seg in seg_sum_table:
seg_obj = segments.segment(
lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),
lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))
seg_summ[seg_id[int(seg.segment_def_id)]].append(seg_obj)
for seg_name in seg_id.values():
segs[seg_name] = segs[seg_name].coalesce()
xmldoc.unlink()
fp.close()
curr_url = urlparse.urlunparse(['file', 'localhost', xml_file, None,
None, None])
return cls.from_segment_list_dict('SEGMENTS', segs, file_url=curr_url,
file_exists=True,
seg_summ_dict=seg_summ, **kwargs) | python | {
"resource": ""
} |
q31883 | SegFile.remove_short_sci_segs | train | def remove_short_sci_segs(self, minSegLength):
"""
Function to remove all science segments
shorter than a specific length. Also updates the file on disk to remove
these segments.
Parameters
-----------
minSegLength : int
Maximum length of science segments. Segments shorter than this will
be removed.
"""
newsegment_list = segments.segmentlist()
for key, seglist in self.segment_dict.items():
newsegment_list = segments.segmentlist()
for seg in seglist:
if abs(seg) > minSegLength:
newsegment_list.append(seg)
newsegment_list.coalesce()
self.segment_dict[key] = newsegment_list
self.to_segment_xml(override_file_if_exists=True) | python | {
"resource": ""
} |
q31884 | SegFile.parse_segdict_key | train | def parse_segdict_key(self, key):
"""
Return ifo and name from the segdict key.
"""
splt = key.split(':')
if len(splt) == 2:
return splt[0], splt[1]
else:
err_msg = "Key should be of the format 'ifo:name', got %s." %(key,)
raise ValueError(err_msg) | python | {
"resource": ""
} |
q31885 | SegFile.to_segment_xml | train | def to_segment_xml(self, override_file_if_exists=False):
"""
Write the segment list in self.segmentList to self.storage_path.
"""
# create XML doc and add process table
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
process = ligolw_process.register_to_xmldoc(outdoc, sys.argv[0], {})
for key, seglist in self.segment_dict.items():
ifo, name = self.parse_segdict_key(key)
# Ensure we have LIGOTimeGPS
fsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) for seg in seglist]
if self.seg_summ_dict is None:
vsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) \
for seg in self.valid_segments]
else:
vsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) \
for seg in self.seg_summ_dict[key]]
# Add using glue library to set all segment tables
with ligolw_segments.LigolwSegments(outdoc, process) as x:
x.add(ligolw_segments.LigolwSegmentList(active=fsegs,
instruments=set([ifo]), name=name,
version=1, valid=vsegs))
# write file
url = urlparse.urljoin('file:', urllib.pathname2url(self.storage_path))
if not override_file_if_exists or not self.has_pfn(url, site='local'):
self.PFN(url, site='local')
ligolw_utils.write_filename(outdoc, self.storage_path) | python | {
"resource": ""
} |
q31886 | complex_median | train | def complex_median(complex_list):
""" Get the median value of a list of complex numbers.
Parameters
----------
complex_list: list
List of complex numbers to calculate the median.
Returns
-------
a + 1.j*b: complex number
The median of the real and imaginary parts.
"""
median_real = numpy.median([complex_number.real
for complex_number in complex_list])
median_imag = numpy.median([complex_number.imag
for complex_number in complex_list])
return median_real + 1.j*median_imag | python | {
"resource": ""
} |
q31887 | avg_inner_product | train | def avg_inner_product(data1, data2, bin_size):
""" Calculate the time-domain inner product averaged over bins.
Parameters
----------
data1: pycbc.types.TimeSeries
First data set.
data2: pycbc.types.TimeSeries
Second data set, with same duration and sample rate as data1.
bin_size: float
Duration of the bins the data will be divided into to calculate
the inner product.
Returns
-------
inner_prod: list
The (complex) inner product of data1 and data2 obtained in each bin.
amp: float
The absolute value of the median of the inner product.
phi: float
The angle of the median of the inner product.
"""
assert data1.duration == data2.duration
assert data1.sample_rate == data2.sample_rate
seglen = int(bin_size * data1.sample_rate)
inner_prod = []
for idx in range(int(data1.duration / bin_size)):
start, end = idx * seglen, (idx+1) * seglen
norm = len(data1[start:end])
bin_prod = 2 * sum(data1.data[start:end].real *
numpy.conjugate(data2.data[start:end])) / norm
inner_prod.append(bin_prod)
# Get the median over all bins to avoid outliers due to the presence
# of a signal in a particular bin.
inner_median = complex_median(inner_prod)
return inner_prod, numpy.abs(inner_median), numpy.angle(inner_median) | python | {
"resource": ""
} |
q31888 | line_model | train | def line_model(freq, data, tref, amp=1, phi=0):
""" Simple time-domain model for a frequency line.
Parameters
----------
freq: float
Frequency of the line.
data: pycbc.types.TimeSeries
Reference data, to get delta_t, start_time, duration and sample_times.
tref: float
Reference time for the line model.
amp: {1., float}, optional
Amplitude of the frequency line.
phi: {0. float}, optional
Phase of the frequency line (radians).
Returns
-------
freq_line: pycbc.types.TimeSeries
A timeseries of the line model with frequency 'freq'. The returned
data are complex to allow measuring the amplitude and phase of the
corresponding frequency line in the strain data. For extraction, use
only the real part of the data.
"""
freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t,
epoch=data.start_time)
times = data.sample_times - float(tref)
alpha = 2 * numpy.pi * freq * times + phi
freq_line.data = amp * numpy.exp(1.j * alpha)
return freq_line | python | {
"resource": ""
} |
q31889 | matching_line | train | def matching_line(freq, data, tref, bin_size=1):
""" Find the parameter of the line with frequency 'freq' in the data.
Parameters
----------
freq: float
Frequency of the line to find in the data.
data: pycbc.types.TimeSeries
Data from which the line wants to be measured.
tref: float
Reference time for the frequency line.
bin_size: {1, float}, optional
Duration of the bins the data will be divided into for averaging.
Returns
-------
line_model: pycbc.types.TimeSeries
A timeseries containing the frequency line with the amplitude
and phase measured from the data.
"""
template_line = line_model(freq, data, tref=tref)
# Measure amplitude and phase of the line in the data
_, amp, phi = avg_inner_product(data, template_line,
bin_size=bin_size)
return line_model(freq, data, tref=tref, amp=amp, phi=phi) | python | {
"resource": ""
} |
q31890 | calibration_lines | train | def calibration_lines(freqs, data, tref=None):
""" Extract the calibration lines from strain data.
Parameters
----------
freqs: list
List containing the frequencies of the calibration lines.
data: pycbc.types.TimeSeries
Strain data to extract the calibration lines from.
tref: {None, float}, optional
Reference time for the line. If None, will use data.start_time.
Returns
-------
data: pycbc.types.TimeSeries
The strain data with the calibration lines removed.
"""
if tref is None:
tref = float(data.start_time)
for freq in freqs:
measured_line = matching_line(freq, data, tref,
bin_size=data.duration)
data -= measured_line.data.real
return data | python | {
"resource": ""
} |
q31891 | compute_inj_optimal_snr | train | def compute_inj_optimal_snr(workflow, inj_file, precalc_psd_files, out_dir,
tags=None):
"Set up a job for computing optimal SNRs of a sim_inspiral file."
if tags is None:
tags = []
node = Executable(workflow.cp, 'optimal_snr', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--input-file', inj_file)
node.add_input_list_opt('--time-varying-psds', precalc_psd_files)
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0] | python | {
"resource": ""
} |
q31892 | cut_distant_injections | train | def cut_distant_injections(workflow, inj_file, out_dir, tags=None):
"Set up a job for removing injections that are too distant to be seen"
if tags is None:
tags = []
node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--input', inj_file)
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0] | python | {
"resource": ""
} |
q31893 | read_sampling_params_from_config | train | def read_sampling_params_from_config(cp, section_group=None,
section='sampling_params'):
"""Reads sampling parameters from the given config file.
Parameters are read from the `[({section_group}_){section}]` section.
The options should list the variable args to transform; the parameters they
point to should list the parameters they are to be transformed to for
sampling. If a multiple parameters are transformed together, they should
be comma separated. Example:
.. code-block:: ini
[sampling_params]
mass1, mass2 = mchirp, logitq
spin1_a = logitspin1_a
Note that only the final sampling parameters should be listed, even if
multiple intermediate transforms are needed. (In the above example, a
transform is needed to go from mass1, mass2 to mchirp, q, then another one
needed to go from q to logitq.) These transforms should be specified
in separate sections; see ``transforms.read_transforms_from_config`` for
details.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
section_group : str, optional
Append `{section_group}_` to the section name. Default is None.
section : str, optional
The name of the section. Default is 'sampling_params'.
Returns
-------
sampling_params : list
The list of sampling parameters to use instead.
replaced_params : list
The list of variable args to replace in the sampler.
"""
if section_group is not None:
section_prefix = '{}_'.format(section_group)
else:
section_prefix = ''
section = section_prefix + section
replaced_params = set()
sampling_params = set()
for args in cp.options(section):
map_args = cp.get(section, args)
sampling_params.update(set(map(str.strip, map_args.split(','))))
replaced_params.update(set(map(str.strip, args.split(','))))
return list(sampling_params), list(replaced_params) | python | {
"resource": ""
} |
q31894 | ModelStats.getstats | train | def getstats(self, names, default=numpy.nan):
"""Get the requested stats as a tuple.
If a requested stat is not an attribute (implying it hasn't been
stored), then the default value is returned for that stat.
Parameters
----------
names : list of str
The names of the stats to get.
default : float, optional
What to return if a requested stat is not an attribute of self.
Default is ``numpy.nan``.
Returns
-------
tuple
A tuple of the requested stats.
"""
return tuple(getattr(self, n, default) for n in names) | python | {
"resource": ""
} |
q31895 | ModelStats.getstatsdict | train | def getstatsdict(self, names, default=numpy.nan):
"""Get the requested stats as a dictionary.
If a requested stat is not an attribute (implying it hasn't been
stored), then the default value is returned for that stat.
Parameters
----------
names : list of str
The names of the stats to get.
default : float, optional
What to return if a requested stat is not an attribute of self.
Default is ``numpy.nan``.
Returns
-------
dict
A dictionary of the requested stats.
"""
return dict(zip(names, self.getstats(names, default=default))) | python | {
"resource": ""
} |
q31896 | SamplingTransforms.logjacobian | train | def logjacobian(self, **params):
r"""Returns the log of the jacobian needed to transform pdfs in the
``variable_params`` parameter space to the ``sampling_params``
parameter space.
Let :math:`\mathbf{x}` be the set of variable parameters,
:math:`\mathbf{y} = f(\mathbf{x})` the set of sampling parameters, and
:math:`p_x(\mathbf{x})` a probability density function defined over
:math:`\mathbf{x}`.
The corresponding pdf in :math:`\mathbf{y}` is then:
.. math::
p_y(\mathbf{y}) =
p_x(\mathbf{x})\left|\mathrm{det}\,\mathbf{J}_{ij}\right|,
where :math:`\mathbf{J}_{ij}` is the Jacobian of the inverse transform
:math:`\mathbf{x} = g(\mathbf{y})`. This has elements:
.. math::
\mathbf{J}_{ij} = \frac{\partial g_i}{\partial{y_j}}
This function returns
:math:`\log \left|\mathrm{det}\,\mathbf{J}_{ij}\right|`.
Parameters
----------
\**params :
The keyword arguments should specify values for all of the variable
args and all of the sampling args.
Returns
-------
float :
The value of the jacobian.
"""
return numpy.log(abs(transforms.compute_jacobian(
params, self.sampling_transforms, inverse=True))) | python | {
"resource": ""
} |
q31897 | SamplingTransforms.apply | train | def apply(self, samples, inverse=False):
"""Applies the sampling transforms to the given samples.
Parameters
----------
samples : dict or FieldArray
The samples to apply the transforms to.
inverse : bool, optional
Whether to apply the inverse transforms (i.e., go from the sampling
args to the ``variable_params``). Default is False.
Returns
-------
dict or FieldArray
The transformed samples, along with the original samples.
"""
return transforms.apply_transforms(samples, self.sampling_transforms,
inverse=inverse) | python | {
"resource": ""
} |
q31898 | SamplingTransforms.from_config | train | def from_config(cls, cp, variable_params):
"""Gets sampling transforms specified in a config file.
Sampling parameters and the parameters they replace are read from the
``sampling_params`` section, if it exists. Sampling transforms are
read from the ``sampling_transforms`` section(s), using
``transforms.read_transforms_from_config``.
An ``AssertionError`` is raised if no ``sampling_params`` section
exists in the config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of parameter names of the original variable params.
Returns
-------
SamplingTransforms
A sampling transforms class.
"""
if not cp.has_section('sampling_params'):
raise ValueError("no sampling_params section found in config file")
# get sampling transformations
sampling_params, replace_parameters = \
read_sampling_params_from_config(cp)
sampling_transforms = transforms.read_transforms_from_config(
cp, 'sampling_transforms')
logging.info("Sampling in {} in place of {}".format(
', '.join(sampling_params), ', '.join(replace_parameters)))
return cls(variable_params, sampling_params,
replace_parameters, sampling_transforms) | python | {
"resource": ""
} |
q31899 | BaseModel.sampling_params | train | def sampling_params(self):
"""Returns the sampling parameters.
If ``sampling_transforms`` is None, this is the same as the
``variable_params``.
"""
if self.sampling_transforms is None:
sampling_params = self.variable_params
else:
sampling_params = self.sampling_transforms.sampling_params
return sampling_params | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.