_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q31600 | save_html_with_metadata | train | def save_html_with_metadata(fig, filename, fig_kwds, kwds):
""" Save a html output to file with metadata """
if isinstance(fig, str):
text = fig
else:
from mpld3 import fig_to_html
text = fig_to_html(fig, **fig_kwds)
f = open(filename, 'w')
for key, value in kwds.items():
value = escape(value, escape_table)
line = "<div class=pycbc-meta key=\"%s\" value=\"%s\"></div>" % (str(key), value)
f.write(line)
f.write(text) | python | {
"resource": ""
} |
q31601 | load_html_metadata | train | def load_html_metadata(filename):
""" Get metadata from html file """
parser = MetaParser()
data = open(filename, 'r').read()
if 'pycbc-meta' in data:
print("LOADING HTML FILE %s" % filename)
parser.feed(data)
cp = ConfigParser.ConfigParser(parser.metadata)
cp.add_section(os.path.basename(filename))
return cp | python | {
"resource": ""
} |
q31602 | save_png_with_metadata | train | def save_png_with_metadata(fig, filename, fig_kwds, kwds):
""" Save a matplotlib figure to a png with metadata
"""
from PIL import Image, PngImagePlugin
fig.savefig(filename, **fig_kwds)
im = Image.open(filename)
meta = PngImagePlugin.PngInfo()
for key in kwds:
meta.add_text(str(key), str(kwds[key]))
im.save(filename, "png", pnginfo=meta) | python | {
"resource": ""
} |
q31603 | save_fig_with_metadata | train | def save_fig_with_metadata(fig, filename, fig_kwds=None, **kwds):
""" Save plot to file with metadata included. Kewords translate to metadata
that is stored directly in the plot file. Limited format types available.
Parameters
----------
fig: matplotlib figure
The matplotlib figure to save to the file
filename: str
Name of file to store the plot.
"""
if fig_kwds is None:
fig_kwds = {}
try:
extension = os.path.splitext(filename)[1]
kwds['version'] = pycbc.version.git_verbose_msg
_metadata_saver[extension](fig, filename, fig_kwds, kwds)
except KeyError:
raise TypeError('Cannot save file %s with metadata, extension %s not '
'supported at this time' % (filename, extension)) | python | {
"resource": ""
} |
q31604 | load_metadata_from_file | train | def load_metadata_from_file(filename):
""" Load the plot related metadata saved in a file
Parameters
----------
filename: str
Name of file load metadata from.
Returns
-------
cp: ConfigParser
A configparser object containing the metadata
"""
try:
extension = os.path.splitext(filename)[1]
return _metadata_loader[extension](filename)
except KeyError:
raise TypeError('Cannot read metadata from file %s, extension %s not '
'supported at this time' % (filename, extension)) | python | {
"resource": ""
} |
q31605 | get_code_version_numbers | train | def get_code_version_numbers(cp):
"""Will extract the version information from the executables listed in
the executable section of the supplied ConfigParser object.
Returns
--------
dict
A dictionary keyed by the executable name with values giving the
version string for each executable.
"""
code_version_dict = {}
for _, value in cp.items('executables'):
_, exe_name = os.path.split(value)
version_string = None
if value.startswith('gsiftp://') or value.startswith('http://'):
code_version_dict[exe_name] = "Using bundle downloaded from %s" % value
else:
try:
if value.startswith('file://'):
value = value[7:]
version_string = subprocess.check_output([value, '--version'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
version_string = "Executable fails on %s --version" % (value)
except OSError:
version_string = "Executable doesn't seem to exist(!)"
code_version_dict[exe_name] = version_string
return code_version_dict | python | {
"resource": ""
} |
q31606 | initialize_page | train | def initialize_page(title, style, script, header=None):
"""
A function that returns a markup.py page object with the required html
header.
"""
page = markup.page(mode="strict_html")
page._escape = False
page.init(title=title, css=style, script=script, header=header)
return page | python | {
"resource": ""
} |
q31607 | write_table | train | def write_table(page, headers, data, cl=''):
"""
Write table in html
"""
page.table(class_=cl)
# list
if cl=='list':
for i in range(len(headers)):
page.tr()
page.th()
page.add('%s' % headers[i])
page.th.close()
page.td()
page.add('%s' % data[i])
page.td.close()
page.tr.close()
else:
page.tr()
for n in headers:
page.th()
page.add('%s' % n)
page.th.close()
page.tr.close()
if data and not re.search('list',str(type(data[0]))):
data = [data]
for row in data:
page.tr()
for item in row:
page.td()
page.add('%s' % item)
page.td.close()
page.tr.close()
page.table.close()
return page | python | {
"resource": ""
} |
q31608 | write_offsource | train | def write_offsource(page, args, grbtag, onsource=False):
"""
Write offsource SNR versus time plots to markup.page object page
"""
th = ['Re-weighted SNR', 'Coherent SNR']
if args.time_slides:
if onsource:
out_dir = 'ZEROLAG_ALL'
else:
out_dir = 'ZEROLAG_OFF'
else:
if onsource:
out_dir = 'ALL_TIMES'
else:
out_dir = 'OFFSOURCE'
plot = markup.page()
p = "%s/plots_clustered/GRB%s_bestnr_vs_time_noinj.png" % (out_dir, grbtag)
plot.a(href=p, title="Detection statistic versus time")
plot.img(src=p)
plot.a.close()
td = [ plot() ]
plot = markup.page()
p = "%s/plots_clustered/GRB%s_triggers_vs_time_noinj.png" % (out_dir, grbtag)
plot.a(href=p, title="Coherent SNR versus time")
plot.img(src=p)
plot.a.close()
td.append(plot())
ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]
for ifo in ifos:
th.append('%s SNR' % ifo)
plot = markup.page()
p = "%s/plots_clustered/GRB%s_%s_triggers_vs_time_noinj.png"\
% (out_dir, grbtag, ifo)
plot.a(href=p, title="%s SNR versus time" % ifo)
plot.img(src=p)
plot.a.close()
td.append(plot())
page = write_table(page, th, td)
return page | python | {
"resource": ""
} |
q31609 | write_recovery | train | def write_recovery(page, injList):
"""
Write injection recovery plots to markup.page object page
"""
th = ['']+injList
td = []
plots = ['sky_error_time','sky_error_mchirp','sky_error_distance']
text = { 'sky_error_time':'Sky error vs time',\
'sky_error_mchirp':'Sky error vs mchirp',\
'sky_error_distance':'Sky error vs distance' }
for row in plots:
pTag = text[row]
d = [pTag]
for inj in injList:
plot = markup.page()
plot = markup.page()
p = "%s/efficiency_OFFTRIAL_1/found_%s.png" % (inj, row)
plot.a(href=p, title=pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page | python | {
"resource": ""
} |
q31610 | generate_hexagonal_lattice | train | def generate_hexagonal_lattice(maxv1, minv1, maxv2, minv2, mindist):
"""
This function generates a 2-dimensional lattice of points using a hexagonal
lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension
"""
if minv1 > maxv1:
raise ValueError("Invalid input to function.")
if minv2 > maxv2:
raise ValueError("Invalid input to function.")
# Place first point
v1s = [minv1]
v2s = [minv2]
initPoint = [minv1,minv2]
# Place first line
initLine = [initPoint]
tmpv1 = minv1
while (tmpv1 < maxv1):
tmpv1 = tmpv1 + (3 * mindist)**(0.5)
initLine.append([tmpv1,minv2])
v1s.append(tmpv1)
v2s.append(minv2)
initLine = numpy.array(initLine)
initLine2 = copy.deepcopy(initLine)
initLine2[:,0] += 0.5 * (3*mindist)**0.5
initLine2[:,1] += 1.5 * (mindist)**0.5
for i in xrange(len(initLine2)):
v1s.append(initLine2[i,0])
v2s.append(initLine2[i,1])
tmpv2_1 = initLine[0,1]
tmpv2_2 = initLine2[0,1]
while tmpv2_1 < maxv2 and tmpv2_2 < maxv2:
tmpv2_1 = tmpv2_1 + 3.0 * (mindist)**0.5
tmpv2_2 = tmpv2_2 + 3.0 * (mindist)**0.5
initLine[:,1] = tmpv2_1
initLine2[:,1] = tmpv2_2
for i in xrange(len(initLine)):
v1s.append(initLine[i,0])
v2s.append(initLine[i,1])
for i in xrange(len(initLine2)):
v1s.append(initLine2[i,0])
v2s.append(initLine2[i,1])
v1s = numpy.array(v1s)
v2s = numpy.array(v2s)
return v1s, v2s | python | {
"resource": ""
} |
q31611 | newsnr_sgveto | train | def newsnr_sgveto(snr, bchisq, sgchisq):
""" Combined SNR derived from NewSNR and Sine-Gaussian Chisq"""
nsnr = numpy.array(newsnr(snr, bchisq), ndmin=1)
sgchisq = numpy.array(sgchisq, ndmin=1)
t = numpy.array(sgchisq > 4, ndmin=1)
if len(t):
nsnr[t] = nsnr[t] / (sgchisq[t] / 4.0) ** 0.5
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0] | python | {
"resource": ""
} |
q31612 | newsnr_sgveto_psdvar | train | def newsnr_sgveto_psdvar(snr, bchisq, sgchisq, psd_var_val):
""" Combined SNR derived from NewSNR, Sine-Gaussian Chisq and PSD
variation statistic """
nsnr = numpy.array(newsnr_sgveto(snr, bchisq, sgchisq), ndmin=1)
psd_var_val = numpy.array(psd_var_val, ndmin=1)
lgc = psd_var_val >= 1.8
nsnr[lgc] = nsnr[lgc] / numpy.sqrt(psd_var_val[lgc])
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0] | python | {
"resource": ""
} |
q31613 | get_newsnr_sgveto | train | def get_newsnr_sgveto(trigs):
"""
Calculate newsnr re-weigthed by the sine-gaussian veto
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
'chisq_dof', 'snr', 'sg_chisq' and 'chisq' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg = newsnr_sgveto(trigs['snr'][:],
trigs['chisq'][:] / dof,
trigs['sg_chisq'][:])
return numpy.array(nsnr_sg, ndmin=1, dtype=numpy.float32) | python | {
"resource": ""
} |
q31614 | get_newsnr_sgveto_psdvar | train | def get_newsnr_sgveto_psdvar(trigs):
"""
Calculate newsnr re-weighted by the sine-gaussian veto and psd variation
statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Dictionary holding single detector trigger information.
'chisq_dof', 'snr', 'chisq' and 'psd_var_val' are required keys
Returns
-------
numpy.ndarray
Array of newsnr values
"""
dof = 2. * trigs['chisq_dof'][:] - 2.
nsnr_sg_psd = \
newsnr_sgveto_psdvar(trigs['snr'][:], trigs['chisq'][:] / dof,
trigs['sg_chisq'][:],
trigs['psd_var_val'][:])
return numpy.array(nsnr_sg_psd, ndmin=1, dtype=numpy.float32) | python | {
"resource": ""
} |
q31615 | drop_trailing_zeros | train | def drop_trailing_zeros(num):
"""
Drops the trailing zeros in a float that is printed.
"""
txt = '%f' %(num)
txt = txt.rstrip('0')
if txt.endswith('.'):
txt = txt[:-1]
return txt | python | {
"resource": ""
} |
q31616 | get_signum | train | def get_signum(val, err, max_sig=numpy.inf):
"""
Given an error, returns a string for val formated to the appropriate
number of significant figures.
"""
coeff, pwr = ('%e' % err).split('e')
if pwr.startswith('-'):
pwr = int(pwr[1:])
if round(float(coeff)) == 10.:
pwr -= 1
pwr = min(pwr, max_sig)
tmplt = '%.' + str(pwr+1) + 'f'
return tmplt % val
else:
pwr = int(pwr[1:])
if round(float(coeff)) == 10.:
pwr += 1
# if the error is large, we can sometimes get 0;
# adjust the round until we don't get 0 (assuming the actual
# value isn't 0)
return_val = round(val, -pwr+1)
if val != 0.:
loop_count = 0
max_recursion = 100
while return_val == 0.:
pwr -= 1
return_val = round(val, -pwr+1)
loop_count += 1
if loop_count > max_recursion:
raise ValueError("Maximum recursion depth hit! Input " +\
"values are: val = %f, err = %f" %(val, err))
return drop_trailing_zeros(return_val) | python | {
"resource": ""
} |
q31617 | from_cli_single_ifo | train | def from_cli_single_ifo(opt, ifo, **kwargs):
"""
Get the strain for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt, **kwargs) | python | {
"resource": ""
} |
q31618 | from_cli_multi_ifos | train | def from_cli_multi_ifos(opt, ifos, **kwargs):
"""
Get the strain for all ifos when using the multi-detector CLI
"""
strain = {}
for ifo in ifos:
strain[ifo] = from_cli_single_ifo(opt, ifo, **kwargs)
return strain | python | {
"resource": ""
} |
q31619 | gate_data | train | def gate_data(data, gate_params):
"""Apply a set of gating windows to a time series.
Each gating window is
defined by a central time, a given duration (centered on the given
time) to zero out, and a given duration of smooth tapering on each side of
the window. The window function used for tapering is a Tukey window.
Parameters
----------
data : TimeSeries
The time series to be gated.
gate_params : list
List of parameters for the gating windows. Each element should be a
list or tuple with 3 elements: the central time of the gating window,
the half-duration of the portion to zero out, and the duration of the
Tukey tapering on each side. All times in seconds. The total duration
of the data affected by one gating window is thus twice the second
parameter plus twice the third parameter.
Returns
-------
data: TimeSeries
The gated time series.
"""
def inverted_tukey(M, n_pad):
midlen = M - 2*n_pad
if midlen < 0:
raise ValueError("No zeros left after applying padding.")
padarr = 0.5*(1.+numpy.cos(numpy.pi*numpy.arange(n_pad)/n_pad))
return numpy.concatenate((padarr,numpy.zeros(midlen),padarr[::-1]))
sample_rate = 1./data.delta_t
temp = data.data
for glitch_time, glitch_width, pad_width in gate_params:
t_start = glitch_time - glitch_width - pad_width - data.start_time
t_end = glitch_time + glitch_width + pad_width - data.start_time
if t_start > data.duration or t_end < 0.:
continue # Skip gate segments that don't overlap
win_samples = int(2*sample_rate*(glitch_width+pad_width))
pad_samples = int(sample_rate*pad_width)
window = inverted_tukey(win_samples, pad_samples)
offset = int(t_start * sample_rate)
idx1 = max(0, -offset)
idx2 = min(len(window), len(data)-offset)
temp[idx1+offset:idx2+offset] *= window[idx1:idx2]
return data | python | {
"resource": ""
} |
q31620 | StrainSegments.fourier_segments | train | def fourier_segments(self):
""" Return a list of the FFT'd segments.
Return the list of FrequencySeries. Additional properties are
added that describe the strain segment. The property 'analyze'
is a slice corresponding to the portion of the time domain equivelant
of the segment to analyze for triggers. The value 'cumulative_index'
indexes from the beginning of the original strain series.
"""
if not self._fourier_segments:
self._fourier_segments = []
for seg_slice, ana in zip(self.segment_slices, self.analyze_slices):
if seg_slice.start >= 0 and seg_slice.stop <= len(self.strain):
freq_seg = make_frequency_series(self.strain[seg_slice])
# Assume that we cannot have a case where we both zero-pad on
# both sides
elif seg_slice.start < 0:
strain_chunk = self.strain[:seg_slice.stop]
strain_chunk.prepend_zeros(-seg_slice.start)
freq_seg = make_frequency_series(strain_chunk)
elif seg_slice.stop > len(self.strain):
strain_chunk = self.strain[seg_slice.start:]
strain_chunk.append_zeros(seg_slice.stop - len(self.strain))
freq_seg = make_frequency_series(strain_chunk)
freq_seg.analyze = ana
freq_seg.cumulative_index = seg_slice.start + ana.start
freq_seg.seg_slice = seg_slice
self._fourier_segments.append(freq_seg)
return self._fourier_segments | python | {
"resource": ""
} |
q31621 | StrainBuffer.end_time | train | def end_time(self):
""" Return the end time of the current valid segment of data """
return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate) | python | {
"resource": ""
} |
q31622 | StrainBuffer.add_hard_count | train | def add_hard_count(self):
""" Reset the countdown timer, so that we don't analyze data long enough
to generate a new PSD.
"""
self.wait_duration = int(numpy.ceil(self.total_corruption / self.sample_rate + self.psd_duration))
self.invalidate_psd() | python | {
"resource": ""
} |
q31623 | StrainBuffer.recalculate_psd | train | def recalculate_psd(self):
""" Recalculate the psd
"""
seg_len = self.sample_rate * self.psd_segment_length
e = len(self.strain)
s = e - ((self.psd_samples + 1) * self.psd_segment_length / 2) * self.sample_rate
psd = pycbc.psd.welch(self.strain[s:e], seg_len=seg_len, seg_stride=seg_len / 2)
psd.dist = spa_distance(psd, 1.4, 1.4, self.low_frequency_cutoff) * pycbc.DYN_RANGE_FAC
# If the new psd is similar to the old one, don't replace it
if self.psd and self.psd_recalculate_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist < self.psd_recalculate_difference:
logging.info("Skipping recalculation of %s PSD, %s-%s",
self.detector, self.psd.dist, psd.dist)
return True
# If the new psd is *really* different than the old one, return an error
if self.psd and self.psd_abort_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist > self.psd_abort_difference:
logging.info("%s PSD is CRAZY, aborting!!!!, %s-%s",
self.detector, self.psd.dist, psd.dist)
self.psd = psd
self.psds = {}
return False
# If the new estimate replaces the current one, invalide the ineterpolate PSDs
self.psd = psd
self.psds = {}
logging.info("Recalculating %s PSD, %s", self.detector, psd.dist)
return True | python | {
"resource": ""
} |
q31624 | StrainBuffer.overwhitened_data | train | def overwhitened_data(self, delta_f):
""" Return overwhitened data
Parameters
----------
delta_f: float
The sample step to generate overwhitened frequency domain data for
Returns
-------
htilde: FrequencySeries
Overwhited strain data
"""
# we haven't already computed htilde for this delta_f
if delta_f not in self.segments:
buffer_length = int(1.0 / delta_f)
e = len(self.strain)
s = int(e - buffer_length * self.sample_rate - self.reduced_pad * 2)
fseries = make_frequency_series(self.strain[s:e])
# we haven't calculated a resample psd for this delta_f
if delta_f not in self.psds:
psdt = pycbc.psd.interpolate(self.psd, fseries.delta_f)
psdt = pycbc.psd.inverse_spectrum_truncation(psdt,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psdt._delta_f = fseries.delta_f
psd = pycbc.psd.interpolate(self.psd, delta_f)
psd = pycbc.psd.inverse_spectrum_truncation(psd,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psd.psdt = psdt
self.psds[delta_f] = psd
psd = self.psds[delta_f]
fseries /= psd.psdt
# trim ends of strain
if self.reduced_pad != 0:
overwhite = TimeSeries(zeros(e-s, dtype=self.strain.dtype),
delta_t=self.strain.delta_t)
pycbc.fft.ifft(fseries, overwhite)
overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad]
taper_window = self.trim_padding / 2.0 / overwhite.sample_rate
gate_params = [(overwhite2.start_time, 0., taper_window),
(overwhite2.end_time, 0., taper_window)]
gate_data(overwhite2, gate_params)
fseries_trimmed = FrequencySeries(zeros(len(overwhite2) / 2 + 1,
dtype=fseries.dtype), delta_f=delta_f)
pycbc.fft.fft(overwhite2, fseries_trimmed)
fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t
else:
fseries_trimmed = fseries
fseries_trimmed.psd = psd
self.segments[delta_f] = fseries_trimmed
stilde = self.segments[delta_f]
return stilde | python | {
"resource": ""
} |
q31625 | StrainBuffer.near_hwinj | train | def near_hwinj(self):
"""Check that the current set of triggers could be influenced by
a hardware injection.
"""
if not self.state:
return False
if not self.state.is_extent_valid(self.start_time, self.blocksize, pycbc.frame.NO_HWINJ):
return True
return False | python | {
"resource": ""
} |
q31626 | StrainBuffer.advance | train | def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
# We have given up so there is no time series
if ts is None:
logging.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
logging.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector in lockstep
if self.dq:
self.dq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logging.info("Tapering start of %s strain block", self.detector)
strain = gate_data(strain, [(strain.start_time, 0., self.autogating_pad)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if need be: NOT YET IMPLEMENTED
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
if self.wait_duration > 0:
return False
else:
return True | python | {
"resource": ""
} |
q31627 | from_string | train | def from_string(psd_name, length, delta_f, low_freq_cutoff):
"""Generate a frequency series containing a LALSimulation PSD specified
by name.
Parameters
----------
psd_name : string
PSD name as found in LALSimulation, minus the SimNoisePSD prefix.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series.
low_freq_cutoff : float
Frequencies below this value are set to zero.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
# check if valid PSD model
if psd_name not in get_psd_model_list():
raise ValueError(psd_name + ' not found among analytical '
'PSD functions.')
# if PSD model is in LALSimulation
if psd_name in get_lalsim_psd_list():
lalseries = lal.CreateREAL8FrequencySeries(
'', lal.LIGOTimeGPS(0), 0, delta_f, lal.DimensionlessUnit, length)
try:
func = lalsimulation.__dict__[
_name_prefix + psd_name + _name_suffix]
except KeyError:
func = lalsimulation.__dict__[_name_prefix + psd_name]
func(lalseries, low_freq_cutoff)
else:
lalsimulation.SimNoisePSD(lalseries, 0, func)
psd = FrequencySeries(lalseries.data.data, delta_f=delta_f)
# if PSD model is coded in PyCBC
else:
func = pycbc_analytical_psds[psd_name]
psd = func(length, delta_f, low_freq_cutoff)
# zero-out content below low-frequency cutoff
kmin = int(low_freq_cutoff / delta_f)
psd.data[:kmin] = 0
return psd | python | {
"resource": ""
} |
q31628 | flat_unity | train | def flat_unity(length, delta_f, low_freq_cutoff):
""" Returns a FrequencySeries of ones above the low_frequency_cutoff.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : int
Low-frequency cutoff for output FrequencySeries.
Returns
-------
FrequencySeries
Returns a FrequencySeries containing the unity PSD model.
"""
fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f)
kmin = int(low_freq_cutoff / fseries.delta_f)
fseries.data[:kmin] = 0
return fseries | python | {
"resource": ""
} |
q31629 | rough_time_estimate | train | def rough_time_estimate(m1, m2, flow, fudge_length=1.1, fudge_min=0.02):
""" A very rough estimate of the duration of the waveform.
An estimate of the waveform duration starting from flow. This is intended
to be fast but not necessarily accurate. It should be an overestimate of
the length. It is derived from a simplification of the 0PN post-newtonian
terms and includes a fudge factor for possible ringdown, etc.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
flow: float
starting frequency of the waveform
fudge_length: optional, {1.1, float}
Factor to multiply length estimate by to ensure it is a convservative
value
fudge_min: optional, {0.2, float}
Minimum signal duration that can be returned. This should be long
enough to encompass the ringdown and errors in the precise end time.
Returns
-------
time: float
Time from flow untill the end of the waveform
"""
m = m1 + m2
msun = m * lal.MTSUN_SI
t = 5.0 / 256.0 * m * m * msun / (m1 * m2) / \
(numpy.pi * msun * flow) ** (8.0 / 3.0)
# fudge factoriness
return .022 if t < 0 else (t + fudge_min) * fudge_length | python | {
"resource": ""
} |
q31630 | mchirp_compression | train | def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None):
"""Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
"""
sample_points = []
f = fmin
while f < fmax:
if df_multiple is not None:
f = int(f/df_multiple)*df_multiple
sample_points.append(f)
f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen)
# add the last point
if sample_points[-1] < fmax:
sample_points.append(fmax)
return numpy.array(sample_points) | python | {
"resource": ""
} |
q31631 | vecdiff | train | def vecdiff(htilde, hinterp, sample_points, psd=None):
"""Computes a statistic indicating between which sample points a waveform
and the interpolated waveform differ the most.
"""
vecdiffs = numpy.zeros(sample_points.size-1, dtype=float)
for kk,thisf in enumerate(sample_points[:-1]):
nextf = sample_points[kk+1]
vecdiffs[kk] = abs(_vecdiff(htilde, hinterp, thisf, nextf, psd=psd))
return vecdiffs | python | {
"resource": ""
} |
q31632 | fd_decompress | train | def fd_decompress(amp, phase, sample_frequencies, out=None, df=None,
f_lower=None, interpolation='inline_linear'):
"""Decompresses an FD waveform using the given amplitude, phase, and the
frequencies at which they are sampled at.
Parameters
----------
amp : array
The amplitude of the waveform at the sample frequencies.
phase : array
The phase of the waveform at the sample frequencies.
sample_frequencies : array
The frequency (in Hz) of the waveform at the sample frequencies.
out : {None, FrequencySeries}
The output array to save the decompressed waveform to. If this contains
slots for frequencies > the maximum frequency in sample_frequencies,
the rest of the values are zeroed. If not provided, must provide a df.
df : {None, float}
The frequency step to use for the decompressed waveform. Must be
provided if out is None.
f_lower : {None, float}
The frequency to start the decompression at. If None, will use whatever
the lowest frequency is in sample_frequencies. All values at
frequencies less than this will be 0 in the decompressed waveform.
interpolation : {'inline_linear', str}
The interpolation to use for the amplitude and phase. Default is
'inline_linear'. If 'inline_linear' a custom interpolater is used.
Otherwise, ``scipy.interpolate.interp1d`` is used; for other options,
see possible values for that function's ``kind`` argument.
Returns
-------
out : FrequencySeries
If out was provided, writes to that array. Otherwise, a new
FrequencySeries with the decompressed waveform.
"""
precision = _precision_map[sample_frequencies.dtype.name]
if _precision_map[amp.dtype.name] != precision or \
_precision_map[phase.dtype.name] != precision:
raise ValueError("amp, phase, and sample_points must all have the "
"same precision")
if out is None:
if df is None:
raise ValueError("Either provide output memory or a df")
hlen = int(numpy.ceil(sample_frequencies.max()/df+1))
out = FrequencySeries(numpy.zeros(hlen,
dtype=_complex_dtypes[precision]), copy=False,
delta_f=df)
else:
# check for precision compatibility
if out.precision == 'double' and precision == 'single':
raise ValueError("cannot cast single precision to double")
df = out.delta_f
hlen = len(out)
if f_lower is None:
imin = 0 # pylint:disable=unused-variable
f_lower = sample_frequencies[0]
start_index = 0
else:
if f_lower >= sample_frequencies.max():
raise ValueError("f_lower is > than the maximum sample frequency")
if f_lower < sample_frequencies.min():
raise ValueError("f_lower is < than the minimum sample frequency")
imin = int(numpy.searchsorted(sample_frequencies, f_lower,
side='right')) - 1 # pylint:disable=unused-variable
start_index = int(numpy.ceil(f_lower/df))
if start_index >= hlen:
raise ValueError('requested f_lower >= largest frequency in out')
# interpolate the amplitude and the phase
if interpolation == "inline_linear":
# Call the scheme-dependent function
inline_linear_interp(amp, phase, sample_frequencies, out,
df, f_lower, imin, start_index)
else:
# use scipy for fancier interpolation
sample_frequencies = numpy.array(sample_frequencies)
amp = numpy.array(amp)
phase = numpy.array(phase)
outfreq = out.sample_frequencies.numpy()
amp_interp = interpolate.interp1d(sample_frequencies, amp,
kind=interpolation,
bounds_error=False,
fill_value=0.,
assume_sorted=True)
phase_interp = interpolate.interp1d(sample_frequencies, phase,
kind=interpolation,
bounds_error=False,
fill_value=0.,
assume_sorted=True)
A = amp_interp(outfreq)
phi = phase_interp(outfreq)
out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi)
return out | python | {
"resource": ""
} |
q31633 | CompressedWaveform.decompress | train | def decompress(self, out=None, df=None, f_lower=None, interpolation=None):
"""Decompress self.
Parameters
----------
out : {None, FrequencySeries}
Write the decompressed waveform to the given frequency series. The
decompressed waveform will have the same `delta_f` as `out`.
Either this or `df` must be provided.
df : {None, float}
Decompress the waveform such that its `delta_f` has the given
value. Either this or `out` must be provided.
f_lower : {None, float}
The starting frequency at which to decompress the waveform. Cannot
be less than the minimum frequency in `sample_points`. If `None`
provided, will default to the minimum frequency in `sample_points`.
interpolation : {None, str}
The interpolation to use for decompressing the waveform. If `None`
provided, will default to `self.interpolation`.
Returns
-------
FrequencySeries
The decompressed waveform.
"""
if f_lower is None:
# use the minimum of the samlpe points
f_lower = self.sample_points.min()
if interpolation is None:
interpolation = self.interpolation
return fd_decompress(self.amplitude, self.phase, self.sample_points,
out=out, df=df, f_lower=f_lower,
interpolation=interpolation) | python | {
"resource": ""
} |
q31634 | CompressedWaveform.write_to_hdf | train | def write_to_hdf(self, fp, template_hash, root=None, precision=None):
"""Write the compressed waveform to the given hdf file handler.
The waveform is written to:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`. The
`interpolation`, `tolerance`, `mismatch` and `precision` are saved
to the group's attributes.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
A hash, int, or string to map the template to the waveform.
root : {None, str}
Put the `compressed_waveforms` group in the given directory in the
hdf file. If `None`, `compressed_waveforms` will be the root
directory.
precision : {None, str}
Cast the saved parameters to the given precision before saving. If
None provided, will use whatever their current precision is. This
will raise an error if the parameters have single precision but the
requested precision is double.
"""
if root is None:
root = ''
else:
root = '%s/'%(root)
if precision is None:
precision = self.precision
elif precision == 'double' and self.precision == 'single':
raise ValueError("cannot cast single precision to double")
outdtype = _real_dtypes[precision]
group = '%scompressed_waveforms/%s' %(root, str(template_hash))
for param in ['amplitude', 'phase', 'sample_points']:
fp['%s/%s' %(group, param)] = self._get(param).astype(outdtype)
fp_group = fp[group]
fp_group.attrs['mismatch'] = self.mismatch
fp_group.attrs['interpolation'] = self.interpolation
fp_group.attrs['tolerance'] = self.tolerance
fp_group.attrs['precision'] = precision | python | {
"resource": ""
} |
q31635 | CompressedWaveform.from_hdf | train | def from_hdf(cls, fp, template_hash, root=None, load_to_memory=True,
load_now=False):
"""Load a compressed waveform from the given hdf file handler.
The waveform is retrieved from:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
The id of the waveform.
root : {None, str}
Retrieve the `compressed_waveforms` group from the given string.
If `None`, `compressed_waveforms` will be assumed to be in the
top level.
load_to_memory : {True, bool}
Set the `load_to_memory` attribute to the given value in the
returned instance.
load_now : {False, bool}
Immediately load the `sample_points`/`amplitude`/`phase` to memory.
Returns
-------
CompressedWaveform
An instance of this class with parameters loaded from the hdf file.
"""
if root is None:
root = ''
else:
root = '%s/'%(root)
group = '%scompressed_waveforms/%s' %(root, str(template_hash))
fp_group = fp[group]
sample_points = fp_group['sample_points']
amp = fp_group['amplitude']
phase = fp_group['phase']
if load_now:
sample_points = sample_points[:]
amp = amp[:]
phase = phase[:]
return cls(sample_points, amp, phase,
interpolation=fp_group.attrs['interpolation'],
tolerance=fp_group.attrs['tolerance'],
mismatch=fp_group.attrs['mismatch'],
precision=fp_group.attrs['precision'],
load_to_memory=load_to_memory) | python | {
"resource": ""
} |
q31636 | SingleDetAutoChisq.values | train | def values(self, sn, indices, template, psd, norm, stilde=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
"""
Calculate the auto-chisq at the specified indices.
Parameters
-----------
sn : Array[complex]
SNR time series of the template for which auto-chisq is being
computed. Provided unnormalized.
indices : Array[int]
List of points at which to calculate auto-chisq
template : Pycbc template object
The template for which we are calculating auto-chisq
psd : Pycbc psd object
The PSD of the data being analysed
norm : float
The normalization factor to apply to sn
stilde : Pycbc data object, needed if using reverse-template
The data being analysed. Only needed if using reverse-template,
otherwise ignored
low_frequency_cutoff : float
The lower frequency to consider in matched-filters
high_frequency_cutoff : float
The upper frequency to consider in matched-filters
"""
if self.do and (len(indices) > 0):
htilde = make_frequency_series(template)
# Check if we need to recompute the autocorrelation
key = (id(template), id(psd))
if key != self._autocor_id:
logging.info("Calculating autocorrelation")
if not self.reverse_template:
Pt, _, P_norm = matched_filter_core(htilde,
htilde, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
Pt = Pt * (1./ Pt[0])
self._autocor = Array(Pt, copy=True)
else:
Pt, _, P_norm = matched_filter_core(htilde.conj(),
htilde, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
# T-reversed template has same norm as forward template
# so we can normalize using that
# FIXME: Here sigmasq has to be cast to a float or the
# code is really slow ... why??
norm_fac = P_norm / float(((template.sigmasq(psd))**0.5))
Pt *= norm_fac
self._autocor = Array(Pt, copy=True)
self._autocor_id = key
logging.info("...Calculating autochisquare")
sn = sn*norm
if self.reverse_template:
assert(stilde is not None)
asn, _, ahnrm = matched_filter_core(htilde.conj(), stilde,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff,
h_norm=template.sigmasq(psd))
correlation_snr = asn * ahnrm
else:
correlation_snr = sn
achi_list = np.array([])
index_list = np.array(indices)
dof, achi_list, _ = autochisq_from_precomputed(sn, correlation_snr,
self._autocor, index_list, stride=self.stride,
num_points=self.num_points,
oneside=self.one_sided, twophase=self.two_phase,
maxvalued=self.take_maximum_value)
self.dof = dof
return achi_list | python | {
"resource": ""
} |
q31637 | get_param_bounds_from_config | train | def get_param_bounds_from_config(cp, section, tag, param):
"""Gets bounds for the given parameter from a section in a config file.
Minimum and maximum values for bounds are specified by adding
`min-{param}` and `max-{param}` options, where `{param}` is the name of
the parameter. The types of boundary (open, closed, or reflected) to create
may also be specified by adding options `btype-min-{param}` and
`btype-max-{param}`. Cyclic conditions can be adding option
`cyclic-{param}`. If no `btype` arguments are provided, the
left bound will be closed and the right open.
For example, the following will create right-open bounds for parameter
`foo`:
.. code-block:: ini
[{section}-{tag}]
min-foo = -1
max-foo = 1
This would make the boundaries cyclic:
.. code-block:: ini
[{section}-{tag}]
min-foo = -1
max-foo = 1
cyclic-foo =
For more details on boundary types and their meaning, see
`boundaries.Bounds`.
If the parameter is not found in the section will just return None (in
this case, all `btype` and `cyclic` arguments are ignored for that
parameter). If bounds are specified, both a minimum and maximum must be
provided, else a Value or Type Error will be raised.
Parameters
----------
cp : ConfigParser instance
The config file.
section : str
The name of the section.
tag : str
Any tag in the section name. The full section name searched for in
the config file is `{section}(-{tag})`.
param : str
The name of the parameter to retrieve bounds for.
Returns
-------
bounds : {Bounds instance | None}
If bounds were provided, a `boundaries.Bounds` instance
representing the bounds. Otherwise, `None`.
"""
try:
minbnd = float(cp.get_opt_tag(section, 'min-'+param, tag))
except Error:
minbnd = None
try:
maxbnd = float(cp.get_opt_tag(section, 'max-'+param, tag))
except Error:
maxbnd = None
if minbnd is None and maxbnd is None:
bnds = None
elif minbnd is None or maxbnd is None:
raise ValueError("if specifying bounds for %s, " %(param) +
"you must provide both a minimum and a maximum")
else:
bndargs = {'min_bound': minbnd, 'max_bound': maxbnd}
# try to get any other conditions, if provided
try:
minbtype = cp.get_opt_tag(section, 'btype-min-{}'.format(param),
tag)
except Error:
minbtype = 'closed'
try:
maxbtype = cp.get_opt_tag(section, 'btype-max-{}'.format(param),
tag)
except Error:
maxbtype = 'open'
bndargs.update({'btype_min': minbtype, 'btype_max': maxbtype})
cyclic = cp.has_option_tag(section, 'cyclic-{}'.format(param), tag)
bndargs.update({'cyclic': cyclic})
bnds = boundaries.Bounds(**bndargs)
return bnds | python | {
"resource": ""
} |
q31638 | check_status | train | def check_status(status):
""" Check the status of a mkl functions and raise a python exeption if
there is an error.
"""
if status:
msg = lib.DftiErrorMessage(status)
msg = ctypes.c_char_p(msg).value
raise RuntimeError(msg) | python | {
"resource": ""
} |
q31639 | Node.add_arg | train | def add_arg(self, arg):
""" Add an argument
"""
if not isinstance(arg, File):
arg = str(arg)
self._args += [arg] | python | {
"resource": ""
} |
q31640 | Node.add_opt | train | def add_opt(self, opt, value=None):
""" Add a option
"""
if value is not None:
if not isinstance(value, File):
value = str(value)
self._options += [opt, value]
else:
self._options += [opt] | python | {
"resource": ""
} |
q31641 | Node._add_output | train | def _add_output(self, out):
""" Add as destination of output data
"""
self._outputs += [out]
out.node = self
out._set_as_output_of(self) | python | {
"resource": ""
} |
q31642 | Node.add_input_opt | train | def add_input_opt(self, opt, inp):
""" Add an option that determines an input
"""
self.add_opt(opt, inp._dax_repr())
self._add_input(inp) | python | {
"resource": ""
} |
q31643 | Node.add_output_opt | train | def add_output_opt(self, opt, out):
""" Add an option that determines an output
"""
self.add_opt(opt, out._dax_repr())
self._add_output(out) | python | {
"resource": ""
} |
q31644 | Node.add_output_list_opt | train | def add_output_list_opt(self, opt, outputs):
""" Add an option that determines a list of outputs
"""
self.add_opt(opt)
for out in outputs:
self.add_opt(out)
self._add_output(out) | python | {
"resource": ""
} |
q31645 | Node.add_input_list_opt | train | def add_input_list_opt(self, opt, inputs):
""" Add an option that determines a list of inputs
"""
self.add_opt(opt)
for inp in inputs:
self.add_opt(inp)
self._add_input(inp) | python | {
"resource": ""
} |
q31646 | Node.add_list_opt | train | def add_list_opt(self, opt, values):
""" Add an option with a list of non-file parameters.
"""
self.add_opt(opt)
for val in values:
self.add_opt(val) | python | {
"resource": ""
} |
q31647 | Node.add_input_arg | train | def add_input_arg(self, inp):
""" Add an input as an argument
"""
self.add_arg(inp._dax_repr())
self._add_input(inp) | python | {
"resource": ""
} |
q31648 | Node.add_output_arg | train | def add_output_arg(self, out):
""" Add an output as an argument
"""
self.add_arg(out._dax_repr())
self._add_output(out) | python | {
"resource": ""
} |
q31649 | Node.new_output_file_opt | train | def new_output_file_opt(self, opt, name):
""" Add an option and return a new file handle
"""
fil = File(name)
self.add_output_opt(opt, fil)
return fil | python | {
"resource": ""
} |
q31650 | Node.add_profile | train | def add_profile(self, namespace, key, value, force=False):
""" Add profile information to this node at the DAX level
"""
try:
entry = dax.Profile(namespace, key, value)
self._dax_node.addProfile(entry)
except dax.DuplicateError:
if force:
# Replace with the new key
self._dax_node.removeProfile(entry)
self._dax_node.addProfile(entry) | python | {
"resource": ""
} |
q31651 | Workflow.add_workflow | train | def add_workflow(self, workflow):
""" Add a sub-workflow to this workflow
This function adds a sub-workflow of Workflow class to this workflow.
Parent child relationships are determined by data dependencies
Parameters
----------
workflow : Workflow instance
The sub-workflow to add to this one
"""
workflow.in_workflow = self
self.sub_workflows += [workflow]
node = workflow.as_job
self._adag.addJob(node)
node.file.PFN(os.path.join(os.getcwd(), node.file.name), site='local')
self._adag.addFile(node.file)
for inp in self._external_workflow_inputs:
workflow._make_root_dependency(inp.node)
return self | python | {
"resource": ""
} |
q31652 | Workflow.add_node | train | def add_node(self, node):
""" Add a node to this workflow
This function adds nodes to the workflow. It also determines
parent/child relations from the DataStorage inputs to this job.
Parameters
----------
node : pycbc.workflow.pegasus_workflow.Node
A node that should be executed as part of this workflow.
"""
node._finalize()
node.in_workflow = self
self._adag.addJob(node._dax_node)
# Determine the parent child relationships based on the inputs that
# this node requires.
added_nodes = []
for inp in node._inputs:
if inp.node is not None and inp.node.in_workflow == self:
if inp.node not in added_nodes:
parent = inp.node._dax_node
child = node._dax_node
dep = dax.Dependency(parent=parent, child=child)
self._adag.addDependency(dep)
added_nodes.append(inp.node)
elif inp.node is not None and not inp.node.in_workflow:
raise ValueError('Parents of this node must be added to the '
'workflow first.')
elif inp.node is None and not inp.workflow_input:
self._inputs += [inp]
inp.workflow_input = True
elif inp.node is not None and inp.node.in_workflow != self and inp not in self._inputs:
self._inputs += [inp]
self._external_workflow_inputs += [inp]
# Record the outputs that this node generates
self._outputs += node._outputs
# Record the executable that this node uses
if not node.executable.in_workflow:
node.executable.in_workflow = True
self._executables += [node.executable]
return self | python | {
"resource": ""
} |
q31653 | Workflow.save | train | def save(self, filename=None, tc=None):
""" Write this workflow to DAX file
"""
if filename is None:
filename = self.filename
for sub in self.sub_workflows:
sub.save()
# FIXME this is ugly as pegasus 4.9.0 does not support the full
# transformation catalog in the DAX. I have asked Karan to fix this so
# that executables and containers can be specified in the DAX itself.
# Karan says that XML is going away in Pegasus 5.x and so this code
# will need to be re-written anyway.
#
# the transformation catalog is written in the same directory as the
# DAX. pycbc_submit_dax needs to know this so that the right
# transformation catalog is used when the DAX is planned.
if tc is None:
tc = '{}.tc.txt'.format(filename)
p = os.path.dirname(tc)
f = os.path.basename(tc)
if not p:
p = '.'
tc = TransformationCatalog(p, f)
for e in self._adag.executables.copy():
tc.add(e)
try:
tc.add_container(e.container)
except:
pass
self._adag.removeExecutable(e)
f = open(filename, "w")
self._adag.writeXML(f)
tc.write() | python | {
"resource": ""
} |
q31654 | File.has_pfn | train | def has_pfn(self, url, site=None):
""" Wrapper of the pegasus hasPFN function, that allows it to be called
outside of specific pegasus functions.
"""
curr_pfn = dax.PFN(url, site)
return self.hasPFN(curr_pfn) | python | {
"resource": ""
} |
q31655 | File.from_path | train | def from_path(cls, path):
"""Takes a path and returns a File object with the path as the PFN."""
urlparts = urlparse.urlsplit(path)
site = 'nonlocal'
if (urlparts.scheme == '' or urlparts.scheme == 'file'):
if os.path.isfile(urlparts.path):
path = os.path.abspath(urlparts.path)
path = urlparse.urljoin('file:',
urllib.pathname2url(path))
site = 'local'
fil = File(os.path.basename(path))
fil.PFN(path, site)
return fil | python | {
"resource": ""
} |
q31656 | read_from_config | train | def read_from_config(cp, **kwargs):
"""Initializes a model from the given config file.
The section must have a ``name`` argument. The name argument corresponds to
the name of the class to initialize.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All other keyword arguments are passed to the ``from_config`` method
of the class specified by the name argument.
Returns
-------
cls
The initialized model.
"""
# use the name to get the distribution
name = cp.get("model", "name")
return models[name].from_config(cp, **kwargs) | python | {
"resource": ""
} |
q31657 | read_distributions_from_config | train | def read_distributions_from_config(cp, section="prior"):
"""Returns a list of PyCBC distribution instances for a section in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"prior", string}
Prefix on section names from which to retrieve the distributions.
Returns
-------
list
A list of the parsed distributions.
"""
dists = []
variable_args = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
dist = distribs[name].from_config(cp, section, subsection)
if set(dist.params).isdisjoint(variable_args):
dists.append(dist)
variable_args += dist.params
else:
raise ValueError("Same parameter in more than one distribution.")
return dists | python | {
"resource": ""
} |
q31658 | read_params_from_config | train | def read_params_from_config(cp, prior_section='prior',
vargs_section='variable_args',
sargs_section='static_args'):
"""Loads static and variable parameters from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
prior_section : str, optional
Check that priors exist in the given section. Default is 'prior.'
vargs_section : str, optional
The section to get the parameters that will be varied/need priors
defined for them. Default is 'variable_args'.
sargs_section : str, optional
The section to get the parameters that will remain fixed. Default is
'static_args'.
Returns
-------
variable_args : list
The names of the parameters to vary in the PE run.
static_args : dict
Dictionary of names -> values giving the parameters to keep fixed.
"""
# sanity check that each parameter in [variable_args] has a priors section
variable_args = cp.options(vargs_section)
subsections = cp.get_subsections(prior_section)
tags = set([p for tag in subsections for p in tag.split('+')])
missing_prior = set(variable_args) - tags
if any(missing_prior):
raise KeyError("You are missing a priors section in the config file "
"for parameter(s): {}".format(', '.join(missing_prior)))
# get static args
try:
static_args = dict([(key, cp.get_opt_tags(sargs_section, key, []))
for key in cp.options(sargs_section)])
except _ConfigParser.NoSectionError:
static_args = {}
# try converting values to float
for key in static_args:
val = static_args[key]
try:
# the following will raise a ValueError if it cannot be cast to
# float (as we would expect for string arguments)
static_args[key] = float(val)
except ValueError:
# try converting to a list of strings; this function will just
# return val if it does not begin (end) with [ (])
static_args[key] = _convert_liststring_to_list(val)
return variable_args, static_args | python | {
"resource": ""
} |
q31659 | read_constraints_from_config | train | def read_constraints_from_config(cp, transforms=None,
constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, "name", subsection)
constraint_arg = cp.get_opt_tag(
constraint_section, "constraint_arg", subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + "-" + subsection
extra_opts = [key for key in cp.options(section)
if key not in ["name", "constraint_arg"]]
for key in extra_opts:
val = cp.get(section, key)
if key == "required_parameters":
val = val.split(_VARARGS_DELIM)
else:
try:
val = float(val)
except ValueError:
pass
kwargs[key] = val
cons.append(constraints.constraints[name](constraint_arg,
transforms=transforms,
**kwargs))
return cons | python | {
"resource": ""
} |
q31660 | insert_injfilterrejector_option_group | train | def insert_injfilterrejector_option_group(parser):
"""Add options for injfilterrejector to executable."""
injfilterrejector_group = \
parser.add_argument_group(_injfilterrejector_group_help)
curr_arg = "--injection-filter-rejector-chirp-time-window"
injfilterrejector_group.add_argument(curr_arg, type=float, default=None,
help=_injfilterer_cthresh_help)
curr_arg = "--injection-filter-rejector-match-threshold"
injfilterrejector_group.add_argument(curr_arg, type=float, default=None,
help=_injfilterer_mthresh_help)
curr_arg = "--injection-filter-rejector-coarsematch-deltaf"
injfilterrejector_group.add_argument(curr_arg, type=float, default=1.,
help=_injfilterer_deltaf_help)
curr_arg = "--injection-filter-rejector-coarsematch-fmax"
injfilterrejector_group.add_argument(curr_arg, type=float, default=256.,
help=_injfilterer_fmax_help)
curr_arg = "--injection-filter-rejector-seg-buffer"
injfilterrejector_group.add_argument(curr_arg, type=int, default=10,
help=_injfilterer_buffer_help)
curr_arg = "--injection-filter-rejector-f-lower"
injfilterrejector_group.add_argument(curr_arg, type=int, default=None,
help=_injfilterer_flower_help) | python | {
"resource": ""
} |
q31661 | InjFilterRejector.from_cli | train | def from_cli(cls, opt):
"""Create an InjFilterRejector instance from command-line options."""
injection_file = opt.injection_file
chirp_time_window = \
opt.injection_filter_rejector_chirp_time_window
match_threshold = opt.injection_filter_rejector_match_threshold
coarsematch_deltaf = opt.injection_filter_rejector_coarsematch_deltaf
coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax
seg_buffer = opt.injection_filter_rejector_seg_buffer
if opt.injection_filter_rejector_f_lower is not None:
f_lower = opt.injection_filter_rejector_f_lower
else:
# NOTE: Uses main low-frequency cutoff as default option. This may
# need some editing if using this in multi_inspiral, which I
# leave for future work, or if this is being used in another
# code which doesn't have --low-frequency-cutoff
f_lower = opt.low_frequency_cutoff
return cls(injection_file, chirp_time_window, match_threshold,
f_lower, coarsematch_deltaf=coarsematch_deltaf,
coarsematch_fmax=coarsematch_fmax,
seg_buffer=seg_buffer) | python | {
"resource": ""
} |
q31662 | InjFilterRejector.generate_short_inj_from_inj | train | def generate_short_inj_from_inj(self, inj_waveform, simulation_id):
"""Generate and a store a truncated representation of inj_waveform."""
if not self.enabled:
# Do nothing!
return
if simulation_id in self.short_injections:
err_msg = "An injection with simulation id "
err_msg += str(simulation_id)
err_msg += " has already been added. This suggests "
err_msg += "that your injection file contains injections with "
err_msg += "duplicate simulation_ids. This is not allowed."
raise ValueError(err_msg)
curr_length = len(inj_waveform)
new_length = int(nearest_larger_binary_number(curr_length))
# Don't want length less than 1/delta_f
while new_length * inj_waveform.delta_t < 1./self.coarsematch_deltaf:
new_length = new_length * 2
inj_waveform.resize(new_length)
inj_tilde = inj_waveform.to_frequencyseries()
# Dynamic range is important here!
inj_tilde_np = inj_tilde.numpy() * DYN_RANGE_FAC
delta_f = inj_tilde.get_delta_f()
new_freq_len = int(self.coarsematch_fmax / delta_f + 1)
# This shouldn't be a problem if injections are generated at
# 16384 Hz ... It is only a problem of injection sample rate
# gives a lower Nyquist than the trunc_f_max. If this error is
# ever raised one could consider zero-padding the injection.
assert(new_freq_len <= len(inj_tilde))
df_ratio = int(self.coarsematch_deltaf/delta_f)
inj_tilde_np = inj_tilde_np[:new_freq_len:df_ratio]
new_inj = FrequencySeries(inj_tilde_np, dtype=np.complex64,
delta_f=self.coarsematch_deltaf)
self.short_injections[simulation_id] = new_inj | python | {
"resource": ""
} |
q31663 | InjFilterRejector.template_segment_checker | train | def template_segment_checker(self, bank, t_num, segment, start_time):
"""Test if injections in segment are worth filtering with template.
Using the current template, current segment, and injections within that
segment. Test if the injections and sufficiently "similar" to any of
the injections to justify actually performing a matched-filter call.
Ther are two parts to this test: First we check if the chirp time of
the template is within a provided window of any of the injections. If
not then stop here, it is not worth filtering this template, segment
combination for this injection set. If this check passes we compute a
match between a coarse representation of the template and a coarse
representation of each of the injections. If that match is above a
user-provided value for any of the injections then filtering can
proceed. This is currently only available if using frequency-domain
templates.
Parameters
-----------
FIXME
Returns
--------
FIXME
"""
if not self.enabled:
# If disabled, always filter (ie. return True)
return True
# Get times covered by segment analyze
sample_rate = 2. * (len(segment) - 1) * segment.delta_f
cum_ind = segment.cumulative_index
diff = segment.analyze.stop - segment.analyze.start
seg_start_time = cum_ind / sample_rate + start_time
seg_end_time = (cum_ind + diff) / sample_rate + start_time
# And add buffer
seg_start_time = seg_start_time - self.seg_buffer
seg_end_time = seg_end_time + self.seg_buffer
# Chirp time test
if self.chirp_time_window is not None:
m1 = bank.table[t_num]['mass1']
m2 = bank.table[t_num]['mass2']
tau0_temp, _ = mass1_mass2_to_tau0_tau3(m1, m2, self.f_lower)
for inj in self.injection_params.table:
end_time = inj.geocent_end_time + \
1E-9 * inj.geocent_end_time_ns
if not(seg_start_time <= end_time <= seg_end_time):
continue
tau0_inj, _ = \
mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
self.f_lower)
tau_diff = abs(tau0_temp - tau0_inj)
if tau_diff <= self.chirp_time_window:
break
else:
# Get's here if all injections are outside chirp-time window
return False
# Coarse match test
if self.match_threshold:
if self._short_template_mem is None:
# Set the memory for the short templates
wav_len = 1 + int(self.coarsematch_fmax /
self.coarsematch_deltaf)
self._short_template_mem = zeros(wav_len, dtype=np.complex64)
# Set the current short PSD to red_psd
try:
red_psd = self._short_psd_storage[id(segment.psd)]
except KeyError:
# PSD doesn't exist yet, so make it!
curr_psd = segment.psd.numpy()
step_size = int(self.coarsematch_deltaf / segment.psd.delta_f)
max_idx = int(self.coarsematch_fmax / segment.psd.delta_f) + 1
red_psd_data = curr_psd[:max_idx:step_size]
red_psd = FrequencySeries(red_psd_data, #copy=False,
delta_f=self.coarsematch_deltaf)
self._short_psd_storage[id(curr_psd)] = red_psd
# Set htilde to be the current short template
if not t_num == self._short_template_id:
# Set the memory for the short templates if unset
if self._short_template_mem is None:
wav_len = 1 + int(self.coarsematch_fmax /
self.coarsematch_deltaf)
self._short_template_mem = zeros(wav_len,
dtype=np.complex64)
# Generate short waveform
htilde = bank.generate_with_delta_f_and_max_freq(
t_num, self.coarsematch_fmax, self.coarsematch_deltaf,
low_frequency_cutoff=bank.table[t_num].f_lower,
cached_mem=self._short_template_mem)
self._short_template_id = t_num
self._short_template_wav = htilde
else:
htilde = self._short_template_wav
for inj in self.injection_params.table:
end_time = inj.geocent_end_time + \
1E-9 * inj.geocent_end_time_ns
if not(seg_start_time < end_time < seg_end_time):
continue
curr_inj = self.short_injections[inj.simulation_id]
o, _ = match(htilde, curr_inj, psd=red_psd,
low_frequency_cutoff=self.f_lower)
if o > self.match_threshold:
break
else:
# Get's here if all injections are outside match threshold
return False
return True | python | {
"resource": ""
} |
q31664 | fit_above_thresh | train | def fit_above_thresh(distr, vals, thresh=None):
"""
Maximum likelihood fit for the coefficient alpha
Fitting a distribution of discrete values above a given threshold.
Exponential p(x) = alpha exp(-alpha (x-x_t))
Rayleigh p(x) = alpha x exp(-alpha (x**2-x_t**2)/2)
Power p(x) = ((alpha-1)/x_t) (x/x_t)**-alpha
Values below threshold will be discarded.
If no threshold is specified the minimum sample value will be used.
Parameters
----------
distr : {'exponential', 'rayleigh', 'power'}
Name of distribution
vals : sequence of floats
Values to fit
thresh : float
Threshold to apply before fitting; if None, use min(vals)
Returns
-------
alpha : float
Fitted value
sigma_alpha : float
Standard error in fitted value
"""
vals = numpy.array(vals)
if thresh is None:
thresh = min(vals)
else:
vals = vals[vals >= thresh]
alpha = fitalpha_dict[distr](vals, thresh)
return alpha, fitstd_dict[distr](vals, alpha) | python | {
"resource": ""
} |
q31665 | fit_fn | train | def fit_fn(distr, xvals, alpha, thresh):
"""
The fitted function normalized to 1 above threshold
To normalize to a given total count multiply by the count.
Parameters
----------
xvals : sequence of floats
Values where the function is to be evaluated
alpha : float
The fitted parameter
thresh : float
Threshold value applied to fitted values
Returns
-------
fit : array of floats
Fitted function at the requested xvals
"""
xvals = numpy.array(xvals)
fit = fitfn_dict[distr](xvals, alpha, thresh)
# set fitted values below threshold to 0
numpy.putmask(fit, xvals < thresh, 0.)
return fit | python | {
"resource": ""
} |
q31666 | tail_threshold | train | def tail_threshold(vals, N=1000):
"""Determine a threshold above which there are N louder values"""
vals = numpy.array(vals)
if len(vals) < N:
raise RuntimeError('Not enough input values to determine threshold')
vals.sort()
return min(vals[-N:]) | python | {
"resource": ""
} |
q31667 | MultiTemperedAutocorrSupport.compute_acl | train | def compute_acl(cls, filename, start_index=None, end_index=None,
min_nsamples=10):
"""Computes the autocorrleation length for all model params and
temperatures in the given file.
Parameter values are averaged over all walkers at each iteration and
temperature. The ACL is then calculated over the averaged chain.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
start_index : {None, int}
The start index to compute the acl from. If None, will try to use
the number of burn-in iterations in the file; otherwise, will start
at the first sample.
end_index : {None, int}
The end index to compute the acl to. If None, will go to the end
of the current iteration.
min_nsamples : int, optional
Require a minimum number of samples to compute an ACL. If the
number of samples per walker is less than this, will just set to
``inf``. Default is 10.
Returns
-------
dict
A dictionary of ntemps-long arrays of the ACLs of each parameter.
"""
acls = {}
with cls._io(filename, 'r') as fp:
if end_index is None:
end_index = fp.niterations
tidx = numpy.arange(fp.ntemps)
for param in fp.variable_params:
these_acls = numpy.zeros(fp.ntemps)
for tk in tidx:
samples = fp.read_raw_samples(
param, thin_start=start_index, thin_interval=1,
thin_end=end_index, temps=tk, flatten=False)[param]
# contract the walker dimension using the mean, and flatten
# the (length 1) temp dimension
samples = samples.mean(axis=1)[0, :]
if samples.size < min_nsamples:
acl = numpy.inf
else:
acl = autocorrelation.calculate_acl(samples)
if acl <= 0:
acl = numpy.inf
these_acls[tk] = acl
acls[param] = these_acls
return acls | python | {
"resource": ""
} |
q31668 | pycbc_compile_function | train | def pycbc_compile_function(code,arg_names,local_dict,global_dict,
module_dir,
compiler='',
verbose=1,
support_code=None,
headers=None,
customize=None,
type_converters=None,
auto_downcast=1,
**kw):
""" Dummy wrapper around scipy weave compile to implement file locking
"""
headers = [] if headers is None else headers
lockfile_dir = os.environ['PYTHONCOMPILED']
lockfile_name = os.path.join(lockfile_dir, 'code_lockfile')
logging.info("attempting to aquire lock '%s' for "
"compiling code" % lockfile_name)
if not os.path.exists(lockfile_dir):
os.makedirs(lockfile_dir)
lockfile = open(lockfile_name, 'w')
fcntl.lockf(lockfile, fcntl.LOCK_EX)
logging.info("we have aquired the lock")
func = _compile_function(code,arg_names, local_dict, global_dict,
module_dir, compiler, verbose,
support_code, headers, customize,
type_converters,
auto_downcast, **kw)
fcntl.lockf(lockfile, fcntl.LOCK_UN)
logging.info("the lock has been released")
return func | python | {
"resource": ""
} |
q31669 | convert_bank_to_hdf | train | def convert_bank_to_hdf(workflow, xmlbank, out_dir, tags=None):
"""Return the template bank in hdf format"""
if tags is None:
tags = []
#FIXME, make me not needed
if len(xmlbank) > 1:
raise ValueError('Can only convert a single template bank')
logging.info('convert template bank to HDF')
make_analysis_dir(out_dir)
bank2hdf_exe = PyCBCBank2HDFExecutable(workflow.cp, 'bank2hdf',
ifos=workflow.ifos,
out_dir=out_dir, tags=tags)
bank2hdf_node = bank2hdf_exe.create_node(xmlbank[0])
workflow.add_node(bank2hdf_node)
return bank2hdf_node.output_files | python | {
"resource": ""
} |
q31670 | convert_trig_to_hdf | train | def convert_trig_to_hdf(workflow, hdfbank, xml_trigger_files, out_dir, tags=None):
"""Return the list of hdf5 trigger files outputs"""
if tags is None:
tags = []
#FIXME, make me not needed
logging.info('convert single inspiral trigger files to hdf5')
make_analysis_dir(out_dir)
trig_files = FileList()
for ifo, insp_group in zip(*xml_trigger_files.categorize_by_attr('ifo')):
trig2hdf_exe = PyCBCTrig2HDFExecutable(workflow.cp, 'trig2hdf',
ifos=ifo, out_dir=out_dir, tags=tags)
_, insp_bundles = insp_group.categorize_by_attr('segment')
for insps in insp_bundles:
trig2hdf_node = trig2hdf_exe.create_node(insps, hdfbank[0])
workflow.add_node(trig2hdf_node)
trig_files += trig2hdf_node.output_files
return trig_files | python | {
"resource": ""
} |
q31671 | setup_multiifo_interval_coinc_inj | train | def setup_multiifo_interval_coinc_inj(workflow, hdfbank, full_data_trig_files, inj_trig_files,
stat_files, background_file, veto_file, veto_name,
out_dir, pivot_ifo, fixed_ifo, tags=None):
"""
This function sets up exact match multiifo coincidence for injections
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up coincidence for injections')
if len(hdfbank) != 1:
raise ValueError('Must use exactly 1 bank file for this coincidence '
'method, I got %i !' % len(hdfbank))
hdfbank = hdfbank[0]
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence', 'parallelization-factor', tags))
ffiles = {}
ifiles = {}
for ifo, ffi in zip(*full_data_trig_files.categorize_by_attr('ifo')):
ffiles[ifo] = ffi[0]
for ifo, ifi in zip(*inj_trig_files.categorize_by_attr('ifo')):
ifiles[ifo] = ifi[0]
injinj_files = FileList()
injfull_files = FileList()
fullinj_files = FileList()
# For the injfull and fullinj separation we take the pivot_ifo on one side,
# and the rest that are attached to the fixed_ifo on the other side
for ifo in ifiles: # ifiles is keyed on ifo
if ifo == pivot_ifo:
injinj_files.append(ifiles[ifo])
injfull_files.append(ifiles[ifo])
fullinj_files.append(ffiles[ifo])
else:
injinj_files.append(ifiles[ifo])
injfull_files.append(ffiles[ifo])
fullinj_files.append(ifiles[ifo])
combo = [(injinj_files, "injinj"),
(injfull_files, "injfull"),
(fullinj_files, "fullinj"),
]
bg_files = {'injinj':[], 'injfull':[], 'fullinj':[]}
for trig_files, ctag in combo:
findcoinc_exe = PyCBCFindMultiifoCoincExecutable(workflow.cp,
'multiifo_coinc',
ifos=ifiles.keys(),
tags=tags + [ctag],
out_dir=out_dir)
for i in range(factor):
group_str = '%s/%s' % (i, factor)
coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
pivot_ifo,
fixed_ifo,
tags=[veto_name, str(i)])
bg_files[ctag] += coinc_node.output_files
workflow.add_node(coinc_node)
logging.info('...leaving coincidence for injections')
return setup_multiifo_statmap_inj(workflow, ifiles.keys(), bg_files, background_file, out_dir, tags=tags + [veto_name]) | python | {
"resource": ""
} |
q31672 | setup_multiifo_interval_coinc | train | def setup_multiifo_interval_coinc(workflow, hdfbank, trig_files, stat_files,
veto_files, veto_names, out_dir, pivot_ifo, fixed_ifo, tags=None):
"""
This function sets up exact match multiifo coincidence
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up coincidence')
if len(hdfbank) != 1:
raise ValueError('Must use exactly 1 bank file for this coincidence '
'method, I got %i !' % len(hdfbank))
hdfbank = hdfbank[0]
ifos, _ = trig_files.categorize_by_attr('ifo')
findcoinc_exe = PyCBCFindMultiifoCoincExecutable(workflow.cp, 'multiifo_coinc',
ifos=ifos,
tags=tags, out_dir=out_dir)
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence', 'parallelization-factor', tags))
statmap_files = []
for veto_file, veto_name in zip(veto_files, veto_names):
bg_files = FileList()
for i in range(factor):
group_str = '%s/%s' % (i, factor)
coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
pivot_ifo,
fixed_ifo,
tags=[veto_name, str(i)])
bg_files += coinc_node.output_files
workflow.add_node(coinc_node)
statmap_files += [setup_multiifo_statmap(workflow, ifos, bg_files, out_dir, tags=tags + [veto_name])]
logging.info('...leaving coincidence ')
return statmap_files | python | {
"resource": ""
} |
q31673 | setup_multiifo_combine_statmap | train | def setup_multiifo_combine_statmap(workflow, final_bg_file_list, out_dir, tags):
"""
Combine the multiifo statmap files into one background file
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up multiifo combine statmap')
cstat_exe = PyCBCMultiifoCombineStatmap(workflow.cp,
'combine_statmap',
ifos=workflow.ifos,
tags=tags,
out_dir=out_dir)
ifolist = ' '.join(workflow.ifos)
cluster_window = float(workflow.cp.get_opt_tags('combine_statmap',
'cluster-window',
tags))
combine_statmap_node = cstat_exe.create_node(final_bg_file_list,
ifolist,
cluster_window,
tags)
workflow.add_node(combine_statmap_node)
return combine_statmap_node.output_file | python | {
"resource": ""
} |
q31674 | first_phase | train | def first_phase(invec, outvec, N1, N2):
"""
This implements the first phase of the FFT decomposition, using
the standard FFT many plans.
Parameters
-----------
invec : array
The input array.
outvec : array
The output array.
N1 : int
Number of rows.
N2 : int
Number of columns.
"""
global _theplan
if _theplan is None:
_theplan = plan_first_phase(N1, N2)
fexecute(_theplan, invec.ptr, outvec.ptr) | python | {
"resource": ""
} |
q31675 | second_phase | train | def second_phase(invec, indices, N1, N2):
"""
This is the second phase of the FFT decomposition that actually performs
the pruning. It is an explicit calculation for the subset of points. Note
that there seem to be some numerical accumulation issues at various values
of N1 and N2.
Parameters
----------
invec :
The result of the first phase FFT
indices : array of ints
The index locations to calculate the FFT
N1 : int
The length of the second phase "FFT"
N2 : int
The length of the first phase FFT
Returns
-------
out : array of floats
"""
invec = numpy.array(invec.data, copy=False)
NI = len(indices) # pylint:disable=unused-variable
N1=int(N1)
N2=int(N2)
out = numpy.zeros(len(indices), dtype=numpy.complex64)
code = """
float pi = 3.14159265359;
for(int i=0; i<NI; i++){
std::complex<double> val= (0, 0);
unsigned int k = indices[i];
int N = N1*N2;
float k2 = k % N2;
float phase_inc = 2 * pi * float(k) / float(N);
float sp, cp;
for (float n1=0; n1<N1; n1+=1){
sincosf(phase_inc * n1, &sp, &cp);
val += std::complex<float>(cp, sp) * invec[int(k2 + N2*n1)];
}
out[i] = val;
}
"""
weave.inline(code, ['N1', 'N2', 'NI', 'indices', 'out', 'invec'],
)
return out | python | {
"resource": ""
} |
q31676 | splay | train | def splay(vec):
""" Determine two lengths to split stride the input vector by
"""
N2 = 2 ** int(numpy.log2( len(vec) ) / 2)
N1 = len(vec) / N2
return N1, N2 | python | {
"resource": ""
} |
q31677 | pruned_c2cifft | train | def pruned_c2cifft(invec, outvec, indices, pretransposed=False):
"""
Perform a pruned iFFT, only valid for power of 2 iffts as the
decomposition is easier to choose. This is not a strict requirement of the
functions, but it is unlikely to the optimal to use anything but power
of 2. (Alex to provide more details in write up.
Parameters
-----------
invec : array
The input vector. This should be the correlation between the data and
the template at full sample rate. Ideally this is pre-transposed, but
if not this will be transposed in this function.
outvec : array
The output of the first phase of the pruned FFT.
indices : array of ints
The indexes at which to calculate the full sample-rate SNR.
pretransposed : boolean, default=False
Used to indicate whether or not invec is pretransposed.
Returns
--------
SNRs : array
The complex SNRs at the indexes given by indices.
"""
N1, N2 = splay(invec)
if not pretransposed:
invec = fft_transpose(invec)
first_phase(invec, outvec, N1=N1, N2=N2)
out = fast_second_phase(outvec, indices, N1=N1, N2=N2)
return out | python | {
"resource": ""
} |
q31678 | fd_sine_gaussian | train | def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f):
""" Generate a Fourier domain sine-Gaussian
Parameters
----------
amp: float
Amplitude of the sine-Gaussian
quality: float
The quality factor
central_frequency: float
The central frequency of the sine-Gaussian
fmin: float
The minimum frequency to generate the sine-Gaussian. This determines
the length of the output vector.
fmax: float
The maximum frequency to generate the sine-Gaussian
delta_f: float
The size of the frequency step
Returns
-------
sg: pycbc.types.Frequencyseries
A Fourier domain sine-Gaussian
"""
kmin = int(round(fmin / delta_f))
kmax = int(round(fmax / delta_f))
f = numpy.arange(kmin, kmax) * delta_f
tau = quality / 2 / numpy.pi / central_frequency
A = amp * numpy.pi ** 0.5 / 2 * tau
d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0)
d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency))
v = numpy.zeros(kmax, dtype=numpy.complex128)
v[kmin:kmax] = d[:]
return pycbc.types.FrequencySeries(v, delta_f=delta_f) | python | {
"resource": ""
} |
q31679 | columns_from_file_list | train | def columns_from_file_list(file_list, columns, ifo, start, end):
""" Return columns of information stored in single detector trigger
files.
Parameters
----------
file_list_file : string
pickle file containing the list of single detector
triggers.
ifo : string
The ifo to return triggers for.
columns : list of strings
The list of columns to read from the trigger files.
start : int
The start time to get triggers from
end : int
The end time to get triggers from
Returns
-------
trigger_dict : dict
A dictionary of column vectors with column names as keys.
"""
file_list = file_list.find_output_with_ifo(ifo)
file_list = file_list.find_all_output_in_range(ifo, segment(start, end))
trig_dict = {}
for trig_file in file_list:
f = h5py.File(trig_file.storage_path, 'r')
time = f['end_time'][:]
pick = numpy.logical_and(time < end, time > start)
pick_loc = numpy.where(pick)[0]
for col in columns:
if col not in trig_dict:
trig_dict[col] = []
trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]])
return trig_dict | python | {
"resource": ""
} |
q31680 | make_padded_frequency_series | train | def make_padded_frequency_series(vec,filter_N=None):
"""Pad a TimeSeries with a length of zeros greater than its length, such
that the total length is the closest power of 2. This prevents the effects
of wraparound.
"""
if filter_N is None:
power = ceil(log(len(vec),2))+1
N = 2 ** power
else:
N = filter_N
n = N/2+1
if isinstance(vec,FrequencySeries):
vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)),
delta_f=1.0,copy=False)
if len(vectilde) < len(vec):
cplen = len(vectilde)
else:
cplen = len(vec)
vectilde[0:cplen] = vec[0:cplen]
delta_f = vec.delta_f
if isinstance(vec,TimeSeries):
vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t,
dtype=real_same_precision_as(vec))
vec_pad[0:len(vec)] = vec
delta_f = 1.0/(vec.delta_t*N)
vectilde = FrequencySeries(zeros(n),delta_f=1.0,
dtype=complex_same_precision_as(vec))
fft(vec_pad,vectilde)
vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64)
return vectilde | python | {
"resource": ""
} |
q31681 | insert_processing_option_group | train | def insert_processing_option_group(parser):
"""
Adds the options used to choose a processing scheme. This should be used
if your program supports the ability to select the processing scheme.
Parameters
----------
parser : object
OptionParser instance
"""
processing_group = parser.add_argument_group("Options for selecting the"
" processing scheme in this program.")
processing_group.add_argument("--processing-scheme",
help="The choice of processing scheme. "
"Choices are " + str(list(set(scheme_prefix.values()))) +
". (optional for CPU scheme) The number of "
"execution threads "
"can be indicated by cpu:NUM_THREADS, "
"where NUM_THREADS "
"is an integer. The default is a single thread. "
"If the scheme is provided as cpu:env, the number "
"of threads can be provided by the PYCBC_NUM_THREADS "
"environment variable. If the environment variable "
"is not set, the number of threads matches the number "
"of logical cores. ",
default="cpu")
processing_group.add_argument("--processing-device-id",
help="(optional) ID of GPU to use for accelerated "
"processing",
default=0, type=int) | python | {
"resource": ""
} |
q31682 | from_cli | train | def from_cli(opt):
"""Parses the command line options and returns a precessing scheme.
Parameters
----------
opt: object
Result of parsing the CLI with OptionParser, or any object with
the required attributes.
Returns
-------
ctx: Scheme
Returns the requested processing scheme.
"""
scheme_str = opt.processing_scheme.split(':')
name = scheme_str[0]
if name == "cuda":
logging.info("Running with CUDA support")
ctx = CUDAScheme(opt.processing_device_id)
elif name == "mkl":
if len(scheme_str) > 1:
numt = scheme_str[1]
if numt.isdigit():
numt = int(numt)
ctx = MKLScheme(num_threads=numt)
else:
ctx = MKLScheme()
logging.info("Running with MKL support: %s threads" % ctx.num_threads)
else:
if len(scheme_str) > 1:
numt = scheme_str[1]
if numt.isdigit():
numt = int(numt)
ctx = CPUScheme(num_threads=numt)
else:
ctx = CPUScheme()
logging.info("Running with CPU support: %s threads" % ctx.num_threads)
return ctx | python | {
"resource": ""
} |
q31683 | verify_processing_options | train | def verify_processing_options(opt, parser):
"""Parses the processing scheme options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
scheme_types = scheme_prefix.values()
if opt.processing_scheme.split(':')[0] not in scheme_types:
parser.error("(%s) is not a valid scheme type.") | python | {
"resource": ""
} |
q31684 | convert_to_sngl_inspiral_table | train | def convert_to_sngl_inspiral_table(params, proc_id):
'''
Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral
table with mass and spin parameters populated and event IDs assigned
Parameters
-----------
params : iterable
Each entry in the params iterable should be a sequence of
[mass1, mass2, spin1z, spin2z] in that order
proc_id : ilwd char
Process ID to add to each row of the sngl_inspiral table
Returns
----------
SnglInspiralTable
Bank of templates in SnglInspiralTable format
'''
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
col_names = ['mass1','mass2','spin1z','spin2z']
for values in params:
tmplt = return_empty_sngl()
tmplt.process_id = proc_id
for colname, value in zip(col_names, values):
setattr(tmplt, colname, value)
tmplt.mtotal, tmplt.eta = pnutils.mass1_mass2_to_mtotal_eta(
tmplt.mass1, tmplt.mass2)
tmplt.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
tmplt.mass1, tmplt.mass2)
tmplt.template_duration = 0 # FIXME
tmplt.event_id = sngl_inspiral_table.get_next_id()
sngl_inspiral_table.append(tmplt)
return sngl_inspiral_table | python | {
"resource": ""
} |
q31685 | output_sngl_inspiral_table | train | def output_sngl_inspiral_table(outputFile, tempBank, metricParams,
ethincaParams, programName="", optDict = None,
outdoc=None, **kwargs):
"""
Function that converts the information produced by the various pyCBC bank
generation codes into a valid LIGOLW xml file containing a sngl_inspiral
table and outputs to file.
Parameters
-----------
outputFile : string
Name of the file that the bank will be written to
tempBank : iterable
Each entry in the tempBank iterable should be a sequence of
[mass1,mass2,spin1z,spin2z] in that order.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
ethincaParams: {ethincaParameters instance, None}
Structure holding options relevant to the ethinca metric computation
including the upper frequency cutoff to be used for filtering.
NOTE: The computation is currently only valid for non-spinning systems
and uses the TaylorF2 approximant.
programName (key-word-argument) : string
Name of the executable that has been run
optDict (key-word argument) : dictionary
Dictionary of the command line arguments passed to the program
outdoc (key-word argument) : ligolw xml document
If given add template bank to this representation of a xml document and
write to disk. If not given create a new document.
kwargs : key-word arguments
All other key word arguments will be passed directly to
ligolw_process.register_to_xmldoc
"""
if optDict is None:
optDict = {}
if outdoc is None:
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
# get IFO to put in search summary table
ifos = []
if 'channel_name' in optDict.keys():
if optDict['channel_name'] is not None:
ifos = [optDict['channel_name'][0:2]]
proc_id = ligolw_process.register_to_xmldoc(outdoc, programName, optDict,
ifos=ifos, **kwargs).process_id
sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id)
# Calculate Gamma components if needed
if ethincaParams is not None:
if ethincaParams.doEthinca:
for sngl in sngl_inspiral_table:
# Set tau_0 and tau_3 values needed for the calculation of
# ethinca metric distances
(sngl.tau0,sngl.tau3) = pnutils.mass1_mass2_to_tau0_tau3(
sngl.mass1, sngl.mass2, metricParams.f0)
fMax_theor, GammaVals = calculate_ethinca_metric_comps(
metricParams, ethincaParams,
sngl.mass1, sngl.mass2, spin1z=sngl.spin1z,
spin2z=sngl.spin2z, full_ethinca=ethincaParams.full_ethinca)
# assign the upper frequency cutoff and Gamma0-5 values
sngl.f_final = fMax_theor
for i in xrange(len(GammaVals)):
setattr(sngl, "Gamma"+str(i), GammaVals[i])
# If Gamma metric components are not wanted, assign f_final from an
# upper frequency cutoff specified in ethincaParams
elif ethincaParams.cutoff is not None:
for sngl in sngl_inspiral_table:
sngl.f_final = pnutils.frequency_cutoff_from_name(
ethincaParams.cutoff,
sngl.mass1, sngl.mass2, sngl.spin1z, sngl.spin2z)
# set per-template low-frequency cutoff
if 'f_low_column' in optDict and 'f_low' in optDict and \
optDict['f_low_column'] is not None:
for sngl in sngl_inspiral_table:
setattr(sngl, optDict['f_low_column'], optDict['f_low'])
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
# get times to put in search summary table
start_time = 0
end_time = 0
if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys():
start_time = optDict['gps_start_time']
end_time = optDict['gps_end_time']
# make search summary table
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
search_summary = return_search_summary(start_time, end_time,
len(sngl_inspiral_table), ifos, **kwargs)
search_summary_table.append(search_summary)
outdoc.childNodes[0].appendChild(search_summary_table)
# write the xml doc to disk
ligolw_utils.write_filename(outdoc, outputFile,
gz=outputFile.endswith('.gz')) | python | {
"resource": ""
} |
q31686 | spa_length_in_time | train | def spa_length_in_time(**kwds):
"""
Returns the length in time of the template,
based on the masses, PN order, and low-frequency
cut-off.
"""
m1 = kwds['mass1']
m2 = kwds['mass2']
flow = kwds['f_lower']
porder = int(kwds['phase_order'])
# For now, we call the swig-wrapped function below in
# lalinspiral. Eventually would be nice to replace this
# with a function using PN coeffs from lalsimulation.
return findchirp_chirptime(m1, m2, flow, porder) | python | {
"resource": ""
} |
q31687 | spa_tmplt_precondition | train | def spa_tmplt_precondition(length, delta_f, kmin=0):
"""Return the amplitude portion of the TaylorF2 approximant, used to precondition
the strain data. The result is cached, and so should not be modified only read.
"""
global _prec
if _prec is None or _prec.delta_f != delta_f or len(_prec) < length:
v = numpy.arange(0, (kmin+length*2), 1.0) * delta_f
v = numpy.power(v[1:len(v)], -7.0/6.0)
_prec = FrequencySeries(v, delta_f=delta_f, dtype=float32)
return _prec[kmin:kmin + length] | python | {
"resource": ""
} |
q31688 | combine_and_copy | train | def combine_and_copy(f, files, group):
""" Combine the same column from multiple files and save to a third"""
f[group] = np.concatenate([fi[group][:] if group in fi else \
np.array([], dtype=np.uint32) for fi in files]) | python | {
"resource": ""
} |
q31689 | HFile.select | train | def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = []
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1e6))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
#Read each chunks worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i])
#store only the results that pass the function
for arg, part in zip(args, partial):
data[arg].append(part[keep])
i += chunksize
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return indices, res
else:
return res
else:
res = tuple(np.concatenate(data[arg]) for arg in args)
if return_indices:
return (indices,) + res
else:
return res | python | {
"resource": ""
} |
q31690 | DictArray.select | train | def select(self, idx):
""" Return a new DictArray containing only the indexed values
"""
data = {}
for k in self.data:
data[k] = self.data[k][idx]
return self._return(data=data) | python | {
"resource": ""
} |
q31691 | DictArray.remove | train | def remove(self, idx):
""" Return a new DictArray that does not contain the indexed values
"""
data = {}
for k in self.data:
data[k] = np.delete(self.data[k], idx)
return self._return(data=data) | python | {
"resource": ""
} |
q31692 | FileData.mask | train | def mask(self):
"""
Create a mask implementing the requested filter on the datasets
Returns
-------
array of Boolean
True for dataset indices to be returned by the get_column method
"""
if self.filter_func is None:
raise RuntimeError("Can't get a mask without a filter function!")
else:
# only evaluate if no previous calculation was done
if self._mask is None:
# get required columns into the namespace as numpy arrays
for column in self.columns:
if column in self.filter_func:
setattr(self, column, self.group[column][:])
self._mask = eval(self.filter_func)
return self._mask | python | {
"resource": ""
} |
q31693 | DataFromFiles.get_column | train | def get_column(self, col):
"""
Loop over files getting the requested dataset values from each
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested and
concatenated in order of file list
"""
logging.info('getting %s' % col)
vals = []
for f in self.files:
d = FileData(f, group=self.group, columnlist=self.columns,
filter_func=self.filter_func)
vals.append(d.get_column(col))
# Close each file since h5py has an upper limit on the number of
# open file objects (approx. 1000)
d.close()
logging.info('- got %i values' % sum(len(v) for v in vals))
return np.concatenate(vals) | python | {
"resource": ""
} |
q31694 | SingleDetTriggers.get_param_names | train | def get_param_names(cls):
"""Returns a list of plottable CBC parameter variables"""
return [m[0] for m in inspect.getmembers(cls) \
if type(m[1]) == property] | python | {
"resource": ""
} |
q31695 | create_new_output_file | train | def create_new_output_file(sampler, filename, force=False, injection_file=None,
**kwargs):
"""Creates a new output file.
If the output file already exists, an ``OSError`` will be raised. This can
be overridden by setting ``force`` to ``True``.
Parameters
----------
sampler : sampler instance
Sampler
filename : str
Name of the file to create.
force : bool, optional
Create the file even if it already exists. Default is False.
injection_file : str, optional
If an injection was added to the data, write its information.
\**kwargs :
All other keyword arguments are passed through to the file's
``write_metadata`` function.
"""
if os.path.exists(filename):
if force:
os.remove(filename)
else:
raise OSError("output-file already exists; use force if you "
"wish to overwrite it.")
logging.info("Creating file {}".format(filename))
with sampler.io(filename, "w") as fp:
# create the samples group and sampler info group
fp.create_group(fp.samples_group)
fp.create_group(fp.sampler_group)
# save the sampler's metadata
fp.write_sampler_metadata(sampler)
# save injection parameters
if injection_file is not None:
logging.info("Writing injection file to output")
# just use the first one
fp.write_injections(injection_file) | python | {
"resource": ""
} |
q31696 | initial_dist_from_config | train | def initial_dist_from_config(cp, variable_params):
r"""Loads a distribution for the sampler start from the given config file.
A distribution will only be loaded if the config file has a [initial-\*]
section(s).
Parameters
----------
cp : Config parser
The config parser to try to load from.
variable_params : list of str
The variable parameters for the distribution.
Returns
-------
JointDistribution or None :
The initial distribution. If no [initial-\*] section found in the
config file, will just return None.
"""
if len(cp.get_subsections("initial")):
logging.info("Using a different distribution for the starting points "
"than the prior.")
initial_dists = distributions.read_distributions_from_config(
cp, section="initial")
constraints = distributions.read_constraints_from_config(
cp, constraint_section="initial_constraint")
init_dist = distributions.JointDistribution(
variable_params, *initial_dists,
**{"constraints": constraints})
else:
init_dist = None
return init_dist | python | {
"resource": ""
} |
q31697 | BaseSampler.setup_output | train | def setup_output(self, output_file, force=False, injection_file=None):
"""Sets up the sampler's checkpoint and output files.
The checkpoint file has the same name as the output file, but with
``.checkpoint`` appended to the name. A backup file will also be
created.
If the output file already exists, an ``OSError`` will be raised.
This can be overridden by setting ``force`` to ``True``.
Parameters
----------
sampler : sampler instance
Sampler
output_file : str
Name of the output file.
force : bool, optional
If the output file already exists, overwrite it.
injection_file : str, optional
If an injection was added to the data, write its information.
"""
# check for backup file(s)
checkpoint_file = output_file + '.checkpoint'
backup_file = output_file + '.bkup'
# check if we have a good checkpoint and/or backup file
logging.info("Looking for checkpoint file")
checkpoint_valid = validate_checkpoint_files(checkpoint_file,
backup_file)
# Create a new file if the checkpoint doesn't exist, or if it is
# corrupted
self.new_checkpoint = False # keeps track if this is a new file or not
if not checkpoint_valid:
logging.info("Checkpoint not found or not valid")
create_new_output_file(self, checkpoint_file, force=force,
injection_file=injection_file)
# now the checkpoint is valid
self.new_checkpoint = True
# copy to backup
shutil.copy(checkpoint_file, backup_file)
# write the command line, startup
for fn in [checkpoint_file, backup_file]:
with self.io(fn, "a") as fp:
fp.write_command_line()
fp.write_resume_point()
# store
self.checkpoint_file = checkpoint_file
self.backup_file = backup_file
self.checkpoint_valid = checkpoint_valid | python | {
"resource": ""
} |
q31698 | load_frequencyseries | train | def load_frequencyseries(path, group=None):
"""
Load a FrequencySeries from a .hdf, .txt or .npy file. The
default data types will be double precision floating point.
Parameters
----------
path: string
source file path. Must end with either .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
f = h5py.File(path, 'r')
data = f[key][:]
series = FrequencySeries(data, delta_f=f[key].attrs['delta_f'],
epoch=f[key].attrs['epoch'])
f.close()
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
if data.ndim == 2:
delta_f = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return FrequencySeries(data[:,1], delta_f=delta_f, epoch=epoch)
elif data.ndim == 3:
delta_f = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return FrequencySeries(data[:,1] + 1j*data[:,2], delta_f=delta_f,
epoch=epoch)
else:
raise ValueError('File has %s dimensions, cannot convert to Array, \
must be 2 (real) or 3 (complex)' % data.ndim) | python | {
"resource": ""
} |
q31699 | FrequencySeries.almost_equal_elem | train | def almost_equal_elem(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two frequency series are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the series.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the series.
The method also checks that self.delta_f is within 'dtol' of
other.delta_f; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_f. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_f values of the two FrequencySeries.
Returns
-------
boolean: 'True' if the data and delta_fs agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_f tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_f cannot be negative")
if super(FrequencySeries,self).almost_equal_elem(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_f-other._delta_f) <= dtol*self._delta_f)
else:
return (self._epoch == other._epoch and
abs(self._delta_f-other._delta_f) <= dtol)
else:
return False | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.