_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q32000
BaseInferenceFile.copy_info
train
def copy_info(self, other, ignore=None): """Copies "info" from this file to the other. "Info" is defined all groups that are not the samples group. Parameters ---------- other : output file The output file. Must be an hdf file. ignore : (list of) str Don't copy the given groups. """ logging.info("Copying info") # copy non-samples/stats data if ignore is None: ignore = [] if isinstance(ignore, (str, unicode)): ignore = [ignore] ignore = set(ignore + [self.samples_group]) copy_groups = set(self.keys()) - ignore for key in copy_groups: super(BaseInferenceFile, self).copy(key, other)
python
{ "resource": "" }
q32001
BaseInferenceFile.copy_samples
train
def copy_samples(self, other, parameters=None, parameter_names=None, read_args=None, write_args=None): """Should copy samples to the other files. Parameters ---------- other : InferenceFile An open inference file to write to. parameters : list of str, optional List of parameters to copy. If None, will copy all parameters. parameter_names : dict, optional Rename one or more parameters to the given name. The dictionary should map parameter -> parameter name. If None, will just use the original parameter names. read_args : dict, optional Arguments to pass to ``read_samples``. write_args : dict, optional Arguments to pass to ``write_samples``. """ # select the samples to copy logging.info("Reading samples to copy") if parameters is None: parameters = self.variable_params # if list of desired parameters is different, rename if set(parameters) != set(self.variable_params): other.attrs['variable_params'] = parameters samples = self.read_samples(parameters, **read_args) logging.info("Copying {} samples".format(samples.size)) # if different parameter names are desired, get them from the samples if parameter_names: arrs = {pname: samples[p] for p, pname in parameter_names.items()} arrs.update({p: samples[p] for p in parameters if p not in parameter_names}) samples = FieldArray.from_kwargs(**arrs) other.attrs['variable_params'] = samples.fieldnames logging.info("Writing samples") other.write_samples(other, samples, **write_args)
python
{ "resource": "" }
q32002
BaseInferenceFile.copy
train
def copy(self, other, ignore=None, parameters=None, parameter_names=None, read_args=None, write_args=None): """Copies metadata, info, and samples in this file to another file. Parameters ---------- other : str or InferenceFile The file to write to. May be either a string giving a filename, or an open hdf file. If the former, the file will be opened with the write attribute (note that if a file already exists with that name, it will be deleted). ignore : (list of) strings Don't copy the given groups. If the samples group is included, no samples will be copied. parameters : list of str, optional List of parameters in the samples group to copy. If None, will copy all parameters. parameter_names : dict, optional Rename one or more parameters to the given name. The dictionary should map parameter -> parameter name. If None, will just use the original parameter names. read_args : dict, optional Arguments to pass to ``read_samples``. write_args : dict, optional Arguments to pass to ``write_samples``. Returns ------- InferenceFile The open file handler to other. """ if not isinstance(other, h5py.File): # check that we're not trying to overwrite this file if other == self.name: raise IOError("destination is the same as this file") other = self.__class__(other, 'w') # metadata self.copy_metadata(other) # info if ignore is None: ignore = [] if isinstance(ignore, (str, unicode)): ignore = [ignore] self.copy_info(other, ignore=ignore) # samples if self.samples_group not in ignore: self.copy_samples(other, parameters=parameters, parameter_names=parameter_names, read_args=read_args, write_args=write_args) # if any down selection was done, re-set the default # thin-start/interval/end p = self[self.samples_group].keys()[0] my_shape = self[self.samples_group][p].shape p = other[other.samples_group].keys()[0] other_shape = other[other.samples_group][p].shape if my_shape != other_shape: other.attrs['thin_start'] = 0 other.attrs['thin_interval'] = 1 other.attrs['thin_end'] = None return other
python
{ "resource": "" }
q32003
BaseInferenceFile.write_kwargs_to_attrs
train
def write_kwargs_to_attrs(cls, attrs, **kwargs): """Writes the given keywords to the given ``attrs``. If any keyword argument points to a dict, the keyword will point to a list of the dict's keys. Each key is then written to the attrs with its corresponding value. Parameters ---------- attrs : an HDF attrs The ``attrs`` of an hdf file or a group in an hdf file. \**kwargs : The keywords to write. """ for arg, val in kwargs.items(): if val is None: val = str(None) if isinstance(val, dict): attrs[arg] = val.keys() # just call self again with the dict as kwargs cls.write_kwargs_to_attrs(attrs, **val) else: attrs[arg] = val
python
{ "resource": "" }
q32004
ceilpow2
train
def ceilpow2(n): """convenience function to determine a power-of-2 upper frequency limit""" signif,exponent = frexp(n) if (signif < 0): return 1; if (signif == 0.5): exponent -= 1; return (1) << exponent;
python
{ "resource": "" }
q32005
coalign_waveforms
train
def coalign_waveforms(h1, h2, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, resize=True): """ Return two time series which are aligned in time and phase. The alignment is only to the nearest sample point and all changes to the phase are made to the first input waveform. Waveforms should not be split accross the vector boundary. If it is, please use roll or cyclic time shift to ensure that the entire signal is contiguous in the time series. Parameters ---------- h1: pycbc.types.TimeSeries The first waveform to align. h2: pycbc.types.TimeSeries The second waveform to align. psd: {None, pycbc.types.FrequencySeries} A psd to weight the alignment low_frequency_cutoff: {None, float} The low frequency cutoff to weight the matching in Hz. high_frequency_cutoff: {None, float} The high frequency cutoff to weight the matching in Hz. resize: Optional, {True, boolean} If true, the vectors will be resized to match each other. If false, they must be the same length and even in length Returns ------- h1: pycbc.types.TimeSeries The shifted waveform to align with h2 h2: pycbc.type.TimeSeries The resized (if necessary) waveform to align with h1. """ from pycbc.filter import matched_filter mlen = ceilpow2(max(len(h1), len(h2))) h1 = h1.copy() h2 = h2.copy() if resize: h1.resize(mlen) h2.resize(mlen) elif len(h1) != len(h2) or len(h2) % 2 != 0: raise ValueError("Time series must be the same size and even if you do " "not allow resizing") snr = matched_filter(h1, h2, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) _, l = snr.abs_max_loc() rotation = snr[l] / abs(snr[l]) h1 = (h1.to_frequencyseries() * rotation).to_timeseries() h1.roll(l) h1 = TimeSeries(h1, delta_t=h2.delta_t, epoch=h2.start_time) return h1, h2
python
{ "resource": "" }
q32006
phase_from_frequencyseries
train
def phase_from_frequencyseries(htilde, remove_start_phase=True): """Returns the phase from the given frequency-domain waveform. This assumes that the waveform has been sampled finely enough that the phase cannot change by more than pi radians between each step. Parameters ---------- htilde : FrequencySeries The waveform to get the phase for; must be a complex frequency series. remove_start_phase : {True, bool} Subtract the initial phase before returning. Returns ------- FrequencySeries The phase of the waveform as a function of frequency. """ p = numpy.unwrap(numpy.angle(htilde.data)).astype( real_same_precision_as(htilde)) if remove_start_phase: p += -p[0] return FrequencySeries(p, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False)
python
{ "resource": "" }
q32007
amplitude_from_frequencyseries
train
def amplitude_from_frequencyseries(htilde): """Returns the amplitude of the given frequency-domain waveform as a FrequencySeries. Parameters ---------- htilde : FrequencySeries The waveform to get the amplitude of. Returns ------- FrequencySeries The amplitude of the waveform as a function of frequency. """ amp = abs(htilde.data).astype(real_same_precision_as(htilde)) return FrequencySeries(amp, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False)
python
{ "resource": "" }
q32008
time_from_frequencyseries
train
def time_from_frequencyseries(htilde, sample_frequencies=None, discont_threshold=0.99*numpy.pi): """Computes time as a function of frequency from the given frequency-domain waveform. This assumes the stationary phase approximation. Any frequencies lower than the first non-zero value in htilde are assigned the time at the first non-zero value. Times for any frequencies above the next-to-last non-zero value in htilde will be assigned the time of the next-to-last non-zero value. .. note:: Some waveform models (e.g., `SEOBNRv2_ROM_DoubleSpin`) can have discontinuities in the phase towards the end of the waveform due to numerical error. We therefore exclude any points that occur after a discontinuity in the phase, as the time estimate becomes untrustworthy beyond that point. What determines a discontinuity in the phase is set by the `discont_threshold`. To turn this feature off, just set `discont_threshold` to a value larger than pi (due to the unwrapping of the phase, no two points can have a difference > pi). Parameters ---------- htilde : FrequencySeries The waveform to get the time evolution of; must be complex. sample_frequencies : {None, array} The frequencies at which the waveform is sampled. If None, will retrieve from ``htilde.sample_frequencies``. discont_threshold : {0.99*pi, float} If the difference in the phase changes by more than this threshold, it is considered to be a discontinuity. Default is 0.99*pi. Returns ------- FrequencySeries The time evolution of the waveform as a function of frequency. """ if sample_frequencies is None: sample_frequencies = htilde.sample_frequencies.numpy() phase = phase_from_frequencyseries(htilde).data dphi = numpy.diff(phase) time = -dphi / (2.*numpy.pi*numpy.diff(sample_frequencies)) nzidx = numpy.nonzero(abs(htilde.data))[0] kmin, kmax = nzidx[0], nzidx[-2] # exclude everything after a discontinuity discont_idx = numpy.where(abs(dphi[kmin:]) >= discont_threshold)[0] if discont_idx.size != 0: kmax = min(kmax, kmin + discont_idx[0]-1) time[:kmin] = time[kmin] time[kmax:] = time[kmax] return FrequencySeries(time.astype(real_same_precision_as(htilde)), delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False)
python
{ "resource": "" }
q32009
phase_from_polarizations
train
def phase_from_polarizations(h_plus, h_cross, remove_start_phase=True): """Return gravitational wave phase Return the gravitation-wave phase from the h_plus and h_cross polarizations of the waveform. The returned phase is always positive and increasing with an initial phase of 0. Parameters ---------- h_plus : TimeSeries An PyCBC TmeSeries vector that contains the plus polarization of the gravitational waveform. h_cross : TimeSeries A PyCBC TmeSeries vector that contains the cross polarization of the gravitational waveform. Returns ------- GWPhase : TimeSeries A TimeSeries containing the gravitational wave phase. Examples --------s >>> from pycbc.waveform import get_td_waveform, phase_from_polarizations >>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, f_lower=30, delta_t=1.0/4096) >>> phase = phase_from_polarizations(hp, hc) """ p = numpy.unwrap(numpy.arctan2(h_cross.data, h_plus.data)).astype( real_same_precision_as(h_plus)) if remove_start_phase: p += -p[0] return TimeSeries(p, delta_t=h_plus.delta_t, epoch=h_plus.start_time, copy=False)
python
{ "resource": "" }
q32010
amplitude_from_polarizations
train
def amplitude_from_polarizations(h_plus, h_cross): """Return gravitational wave amplitude Return the gravitation-wave amplitude from the h_plus and h_cross polarizations of the waveform. Parameters ---------- h_plus : TimeSeries An PyCBC TmeSeries vector that contains the plus polarization of the gravitational waveform. h_cross : TimeSeries A PyCBC TmeSeries vector that contains the cross polarization of the gravitational waveform. Returns ------- GWAmplitude : TimeSeries A TimeSeries containing the gravitational wave amplitude. Examples -------- >>> from pycbc.waveform import get_td_waveform, phase_from_polarizations >>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, f_lower=30, delta_t=1.0/4096) >>> amp = amplitude_from_polarizations(hp, hc) """ amp = (h_plus.squared_norm() + h_cross.squared_norm()) ** (0.5) return TimeSeries(amp, delta_t=h_plus.delta_t, epoch=h_plus.start_time)
python
{ "resource": "" }
q32011
frequency_from_polarizations
train
def frequency_from_polarizations(h_plus, h_cross): """Return gravitational wave frequency Return the gravitation-wave frequency as a function of time from the h_plus and h_cross polarizations of the waveform. It is 1 bin shorter than the input vectors and the sample times are advanced half a bin. Parameters ---------- h_plus : TimeSeries A PyCBC TimeSeries vector that contains the plus polarization of the gravitational waveform. h_cross : TimeSeries A PyCBC TimeSeries vector that contains the cross polarization of the gravitational waveform. Returns ------- GWFrequency : TimeSeries A TimeSeries containing the gravitational wave frequency as a function of time. Examples -------- >>> from pycbc.waveform import get_td_waveform, phase_from_polarizations >>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10, f_lower=30, delta_t=1.0/4096) >>> freq = frequency_from_polarizations(hp, hc) """ phase = phase_from_polarizations(h_plus, h_cross) freq = numpy.diff(phase) / ( 2 * lal.PI * phase.delta_t ) start_time = phase.start_time + phase.delta_t / 2 return TimeSeries(freq.astype(real_same_precision_as(h_plus)), delta_t=phase.delta_t, epoch=start_time)
python
{ "resource": "" }
q32012
taper_timeseries
train
def taper_timeseries(tsdata, tapermethod=None, return_lal=False): """ Taper either or both ends of a time series using wrapped LALSimulation functions Parameters ---------- tsdata : TimeSeries Series to be tapered, dtype must be either float32 or float64 tapermethod : string Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END', 'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will not change the series! return_lal : Boolean If True, return a wrapped LAL time series object, else return a PyCBC time series. """ if tapermethod is None: raise ValueError("Must specify a tapering method (function was called" "with tapermethod=None)") if tapermethod not in taper_map.keys(): raise ValueError("Unknown tapering method %s, valid methods are %s" % \ (tapermethod, ", ".join(taper_map.keys()))) if tsdata.dtype not in (float32, float64): raise TypeError("Strain dtype must be float32 or float64, not " + str(tsdata.dtype)) taper_func = taper_func_map[tsdata.dtype] # make a LAL TimeSeries to pass to the LALSim function ts_lal = tsdata.astype(tsdata.dtype).lal() if taper_map[tapermethod] is not None: taper_func(ts_lal.data, taper_map[tapermethod]) if return_lal: return ts_lal else: return TimeSeries(ts_lal.data.data[:], delta_t=ts_lal.deltaT, epoch=ts_lal.epoch)
python
{ "resource": "" }
q32013
apply_fd_time_shift
train
def apply_fd_time_shift(htilde, shifttime, kmin=0, fseries=None, copy=True): """Shifts a frequency domain waveform in time. The shift applied is shiftime - htilde.epoch. Parameters ---------- htilde : FrequencySeries The waveform frequency series. shifttime : float The time to shift the frequency series to. kmin : {0, int} The starting index of htilde to apply the time shift. Default is 0. fseries : {None, numpy array} The frequencies of each element in htilde. This is only needed if htilde is not sampled at equal frequency steps. copy : {True, bool} Make a copy of htilde before applying the time shift. If False, the time shift will be applied to htilde's data. Returns ------- FrequencySeries A frequency series with the waveform shifted to the new time. If makecopy is True, will be a new frequency series; if makecopy is False, will be the same as htilde. """ dt = float(shifttime - htilde.epoch) if dt == 0.: # no shift to apply, just copy if desired if copy: htilde = 1. * htilde elif isinstance(htilde, FrequencySeries): # FrequencySeries means equally sampled in frequency, use faster shifting htilde = apply_fseries_time_shift(htilde, dt, kmin=kmin, copy=copy) else: if fseries is None: fseries = htilde.sample_frequencies.numpy() shift = Array(numpy.exp(-2j*numpy.pi*dt*fseries), dtype=complex_same_precision_as(htilde)) if copy: htilde = 1. * htilde htilde *= shift return htilde
python
{ "resource": "" }
q32014
td_taper
train
def td_taper(out, start, end, beta=8, side='left'): """Applies a taper to the given TimeSeries. A half-kaiser window is used for the roll-off. Parameters ---------- out : TimeSeries The ``TimeSeries`` to taper. start : float The time (in s) to start the taper window. end : float The time (in s) to end the taper window. beta : int, optional The beta parameter to use for the Kaiser window. See ``scipy.signal.kaiser`` for details. Default is 8. side : {'left', 'right'} The side to apply the taper to. If ``'left'`` (``'right'``), the taper will roll up (down) between ``start`` and ``end``, with all values before ``start`` (after ``end``) set to zero. Default is ``'left'``. Returns ------- TimeSeries The tapered time series. """ out = out.copy() width = end - start winlen = 2 * int(width / out.delta_t) window = Array(signal.get_window(('kaiser', beta), winlen)) xmin = int((start - out.start_time) / out.delta_t) xmax = xmin + winlen//2 if side == 'left': out[xmin:xmax] *= window[:winlen//2] if xmin > 0: out[:xmin].clear() elif side == 'right': out[xmin:xmax] *= window[winlen//2:] if xmax < len(out): out[xmax:].clear() else: raise ValueError("unrecognized side argument {}".format(side)) return out
python
{ "resource": "" }
q32015
fd_taper
train
def fd_taper(out, start, end, beta=8, side='left'): """Applies a taper to the given FrequencySeries. A half-kaiser window is used for the roll-off. Parameters ---------- out : FrequencySeries The ``FrequencySeries`` to taper. start : float The frequency (in Hz) to start the taper window. end : float The frequency (in Hz) to end the taper window. beta : int, optional The beta parameter to use for the Kaiser window. See ``scipy.signal.kaiser`` for details. Default is 8. side : {'left', 'right'} The side to apply the taper to. If ``'left'`` (``'right'``), the taper will roll up (down) between ``start`` and ``end``, with all values before ``start`` (after ``end``) set to zero. Default is ``'left'``. Returns ------- FrequencySeries The tapered frequency series. """ out = out.copy() width = end - start winlen = 2 * int(width / out.delta_f) window = Array(signal.get_window(('kaiser', beta), winlen)) kmin = int(start / out.delta_f) kmax = kmin + winlen//2 if side == 'left': out[kmin:kmax] *= window[:winlen//2] out[:kmin] *= 0. elif side == 'right': out[kmin:kmax] *= window[winlen//2:] out[kmax:] *= 0. else: raise ValueError("unrecognized side argument {}".format(side)) return out
python
{ "resource": "" }
q32016
fd_to_td
train
def fd_to_td(htilde, delta_t=None, left_window=None, right_window=None, left_beta=8, right_beta=8): """Converts a FD waveform to TD. A window can optionally be applied using ``fd_taper`` to the left or right side of the waveform before being converted to the time domain. Parameters ---------- htilde : FrequencySeries The waveform to convert. delta_t : float, optional Make the returned time series have the given ``delta_t``. left_window : tuple of float, optional A tuple giving the start and end frequency of the FD taper to apply on the left side. If None, no taper will be applied on the left. right_window : tuple of float, optional A tuple giving the start and end frequency of the FD taper to apply on the right side. If None, no taper will be applied on the right. left_beta : int, optional The beta parameter to use for the left taper. See ``fd_taper`` for details. Default is 8. right_beta : int, optional The beta parameter to use for the right taper. Default is 8. Returns ------- TimeSeries The time-series representation of ``htilde``. """ if left_window is not None: start, end = left_window htilde = fd_taper(htilde, start, end, side='left', beta=left_beta) if right_window is not None: start, end = right_window htilde = fd_taper(htilde, start, end, side='right', beta=right_beta) return htilde.to_timeseries(delta_t=delta_t)
python
{ "resource": "" }
q32017
ParameterList.docstr
train
def docstr(self, prefix='', include_label=True): """Returns the ``docstr`` of each parameter joined together.""" return '\n'.join([x.docstr(prefix, include_label) for x in self])
python
{ "resource": "" }
q32018
hist_overflow
train
def hist_overflow(val, val_max, **kwds): """ Make a histogram with an overflow bar above val_max """ import pylab, numpy overflow = len(val[val>=val_max]) pylab.hist(val[val<val_max], **kwds) if 'color' in kwds: color = kwds['color'] else: color = None if overflow > 0: rect = pylab.bar(val_max+0.05, overflow, .5, color=color)[0] pylab.text(rect.get_x(), 1.10*rect.get_height(), '%s+' % val_max)
python
{ "resource": "" }
q32019
setup_psd_workflow
train
def setup_psd_workflow(workflow, science_segs, datafind_outs, output_dir=None, tags=None): ''' Setup static psd section of CBC workflow. At present this only supports pregenerated psd files, in the future these could be created within the workflow. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. science_segs : Keyed dictionary of glue.segmentlist objects scienceSegs[ifo] holds the science segments to be analysed for each ifo. datafind_outs : pycbc.workflow.core.FileList The file list containing the datafind files. output_dir : path string The directory where data products will be placed. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the psd files, 0 or 1 per ifo ''' if tags is None: tags = [] logging.info("Entering static psd module.") make_analysis_dir(output_dir) cp = workflow.cp # Parse for options in ini file. try: psdMethod = cp.get_opt_tags("workflow-psd", "psd-method", tags) except: # Predefined PSD sare optional, just return an empty list if not # provided. return FileList([]) if psdMethod == "PREGENERATED_FILE": logging.info("Setting psd from pre-generated file(s).") psd_files = setup_psd_pregenerated(workflow, tags=tags) else: errMsg = "PSD method not recognized. Only " errMsg += "PREGENERATED_FILE is currently supported." raise ValueError(errMsg) logging.info("Leaving psd module.") return psd_files
python
{ "resource": "" }
q32020
generate_inverse_mapping
train
def generate_inverse_mapping(order): """Genereate a lambda entry -> PN order map. This function will generate the opposite of generate mapping. So where generate_mapping gives dict[key] = item this will give dict[item] = key. Valid PN orders are: {} Parameters ---------- order : string A string containing a PN order. Valid values are given above. Returns -------- mapping : dictionary An inverse mapping between the active Lambda terms and index in the metric """ mapping = generate_mapping(order) inv_mapping = {} for key,value in mapping.items(): inv_mapping[value] = key return inv_mapping
python
{ "resource": "" }
q32021
ethinca_order_from_string
train
def ethinca_order_from_string(order): """ Returns the integer giving twice the post-Newtonian order used by the ethinca calculation. Currently valid only for TaylorF2 metric Parameters ---------- order : string Returns ------- int """ if order in get_ethinca_orders().keys(): return get_ethinca_orders()[order] else: raise ValueError("Order "+str(order)+" is not valid for ethinca" "calculation! Valid orders: "+ str(get_ethinca_orders().keys()))
python
{ "resource": "" }
q32022
compile
train
def compile(source, name): """ Compile the string source code into a shared object linked against the static version of cufft for callback support. """ cache = os.path.join(pycbc._cache_dir_path, name) hash_file = cache + ".hash" lib_file = cache + ".so" obj_file = cache + ".o" try: if int(open(hash_file, "r").read()) == hash(source): return lib_file raise ValueError except: pass src_file = cache + ".cu" fsrc = open(src_file, "w") fsrc.write(source) fsrc.close() cmd = ["nvcc", "-ccbin", "g++", "-dc", "-m64", "--compiler-options", "'-fPIC'", "-o", obj_file, "-c", src_file] print(" ".join(cmd)) subprocess.check_call(cmd) cmd = ["nvcc", "-shared", "-ccbin", "g++", "-m64", "-o", lib_file, obj_file, "-lcufft_static", "-lculibos"] print(" ".join(cmd)) subprocess.check_call(cmd) hash_file = cache + ".hash" fhash = open(hash_file, "w") fhash.write(str(hash(source))) return lib_file
python
{ "resource": "" }
q32023
get_fn_plan
train
def get_fn_plan(callback=None, out_callback=None, name='pycbc_cufft', parameters=None): """ Get the IFFT execute and plan functions """ if parameters is None: parameters = [] source = fftsrc.render(input_callback=callback, output_callback=out_callback, parameters=parameters) path = compile(source, name) lib = ctypes.cdll.LoadLibrary(path) fn = lib.execute fn.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] plan = lib.create_plan plan.restype = ctypes.c_void_p plan.argyptes = [ctypes.c_uint] return fn, plan
python
{ "resource": "" }
q32024
determine_eigen_directions
train
def determine_eigen_directions(metricParams, preserveMoments=False, vary_fmax=False, vary_density=None): """ This function will calculate the coordinate transfomations that are needed to rotate from a coordinate system described by the various Lambda components in the frequency expansion, to a coordinate system where the metric is Cartesian. Parameters ----------- metricParams : metricParameters instance Structure holding all the options for construction of the metric. preserveMoments : boolean, optional (default False) Currently only used for debugging. If this is given then if the moments structure is already set within metricParams then they will not be recalculated. vary_fmax : boolean, optional (default False) If set to False the metric and rotations are calculated once, for the full range of frequency [f_low,f_upper). If set to True the metric and rotations are calculated multiple times, for frequency ranges [f_low,f_low + i*vary_density), where i starts at 1 and runs up until f_low + (i+1)*vary_density > f_upper. Thus values greater than f_upper are *not* computed. The calculation for the full range [f_low,f_upper) is also done. vary_density : float, optional If vary_fmax is True, this will be used in computing the frequency ranges as described for vary_fmax. Returns -------- metricParams : metricParameters instance Structure holding all the options for construction of the metric. **THIS FUNCTION ONLY RETURNS THE CLASS** The following will be **added** to this structure metricParams.evals : Dictionary of numpy.array Each entry in the dictionary corresponds to the different frequency ranges described in vary_fmax. If vary_fmax = False, the only entry will be f_upper, this corresponds to integrals in [f_low,f_upper). This entry is always present. Each other entry will use floats as keys to the dictionary. These floats give the upper frequency cutoff when it is varying. Each numpy.array contains the eigenvalues which, with the eigenvectors in evecs, are needed to rotate the coordinate system to one in which the metric is the identity matrix. metricParams.evecs : Dictionary of numpy.matrix Each entry in the dictionary is as described under evals. Each numpy.matrix contains the eigenvectors which, with the eigenvalues in evals, are needed to rotate the coordinate system to one in which the metric is the identity matrix. metricParams.metric : Dictionary of numpy.matrix Each entry in the dictionary is as described under evals. Each numpy.matrix contains the metric of the parameter space in the Lambda_i coordinate system. metricParams.moments : Moments structure See the structure documentation for a description of this. This contains the result of all the integrals used in computing the metrics above. It can be used for the ethinca components calculation, or other similar calculations. """ evals = {} evecs = {} metric = {} unmax_metric = {} # First step is to get the moments needed to calculate the metric if not (metricParams.moments and preserveMoments): get_moments(metricParams, vary_fmax=vary_fmax, vary_density=vary_density) # What values are going to be in the moments # J7 is the normalization factor so it *MUST* be present list = metricParams.moments['J7'].keys() # We start looping over every item in the list of metrics for item in list: # Here we convert the moments into a form easier to use here Js = {} for i in range(-7,18): Js[i] = metricParams.moments['J%d'%(i)][item] logJs = {} for i in range(-1,18): logJs[i] = metricParams.moments['log%d'%(i)][item] loglogJs = {} for i in range(-1,18): loglogJs[i] = metricParams.moments['loglog%d'%(i)][item] logloglogJs = {} for i in range(-1,18): logloglogJs[i] = metricParams.moments['logloglog%d'%(i)][item] loglogloglogJs = {} for i in range(-1,18): loglogloglogJs[i] = metricParams.moments['loglogloglog%d'%(i)][item] mapping = generate_mapping(metricParams.pnOrder) # Calculate the metric gs, unmax_metric_curr = calculate_metric(Js, logJs, loglogJs, logloglogJs, loglogloglogJs, mapping) metric[item] = numpy.matrix(gs) unmax_metric[item] = unmax_metric_curr # And the eigenvalues evals[item],evecs[item] = numpy.linalg.eig(gs) # Numerical error can lead to small negative eigenvalues. for i in range(len(evals[item])): if evals[item][i] < 0: # Due to numerical imprecision the very small eigenvalues can # be negative. Make these positive. evals[item][i] = -evals[item][i] if evecs[item][i,i] < 0: # We demand a convention that all diagonal terms in the matrix # of eigenvalues are positive. # This is done to help visualization of the spaces (increasing # mchirp always goes the same way) evecs[item][:,i] = - evecs[item][:,i] metricParams.evals = evals metricParams.evecs = evecs metricParams.metric = metric metricParams.time_unprojected_metric = unmax_metric return metricParams
python
{ "resource": "" }
q32025
interpolate_psd
train
def interpolate_psd(psd_f, psd_amp, deltaF): """ Function to interpolate a PSD to a different value of deltaF. Uses linear interpolation. Parameters ---------- psd_f : numpy.array or list or similar List of the frequencies contained within the PSD. psd_amp : numpy.array or list or similar List of the PSD values at the frequencies in psd_f. deltaF : float Value of deltaF to interpolate the PSD to. Returns -------- new_psd_f : numpy.array Array of the frequencies contained within the interpolated PSD new_psd_amp : numpy.array Array of the interpolated PSD values at the frequencies in new_psd_f. """ # In some cases this will be a no-op. I thought about removing this, but # this function can take unequally sampled PSDs and it is difficult to # check for this. As this function runs quickly anyway (compared to the # moment calculation) I decided to always interpolate. new_psd_f = [] new_psd_amp = [] fcurr = psd_f[0] for i in range(len(psd_f) - 1): f_low = psd_f[i] f_high = psd_f[i+1] amp_low = psd_amp[i] amp_high = psd_amp[i+1] while(1): if fcurr > f_high: break new_psd_f.append(fcurr) gradient = (amp_high - amp_low) / (f_high - f_low) fDiff = fcurr - f_low new_psd_amp.append(amp_low + fDiff * gradient) fcurr = fcurr + deltaF return numpy.asarray(new_psd_f), numpy.asarray(new_psd_amp)
python
{ "resource": "" }
q32026
calculate_moment
train
def calculate_moment(psd_f, psd_amp, fmin, fmax, f0, funct, norm=None, vary_fmax=False, vary_density=None): """ Function for calculating one of the integrals used to construct a template bank placement metric. The integral calculated will be \int funct(x) * (psd_x)**(-7./3.) * delta_x / PSD(x) where x = f / f0. The lower frequency cutoff is given by fmin, see the parameters below for details on how the upper frequency cutoff is chosen Parameters ----------- psd_f : numpy.array numpy array holding the set of evenly spaced frequencies used in the PSD psd_amp : numpy.array numpy array holding the PSD values corresponding to the psd_f frequencies fmin : float The lower frequency cutoff used in the calculation of the integrals used to obtain the metric. fmax : float The upper frequency cutoff used in the calculation of the integrals used to obtain the metric. This can be varied (see the vary_fmax option below). f0 : float This is an arbitrary scaling factor introduced to avoid the potential for numerical overflow when calculating this. Generally the default value (70) is safe here. **IMPORTANT, if you want to calculate the ethinca metric components later this MUST be set equal to f_low.** funct : Lambda function The function to use when computing the integral as described above. norm : Dictionary of floats If given then moment[f_cutoff] will be divided by norm[f_cutoff] vary_fmax : boolean, optional (default False) If set to False the metric and rotations are calculated once, for the full range of frequency [f_low,f_upper). If set to True the metric and rotations are calculated multiple times, for frequency ranges [f_low,f_low + i*vary_density), where i starts at 1 and runs up until f_low + (i+1)*vary_density > f_upper. Thus values greater than f_upper are *not* computed. The calculation for the full range [f_low,f_upper) is also done. vary_density : float, optional If vary_fmax is True, this will be used in computing the frequency ranges as described for vary_fmax. Returns -------- moment : Dictionary of floats moment[f_cutoff] will store the value of the moment at the frequency cutoff given by f_cutoff. """ # Must ensure deltaF in psd_f is constant psd_x = psd_f / f0 deltax = psd_x[1] - psd_x[0] mask = numpy.logical_and(psd_f > fmin, psd_f < fmax) psdf_red = psd_f[mask] comps_red = psd_x[mask] ** (-7./3.) * funct(psd_x[mask], f0) * deltax / \ psd_amp[mask] moment = {} moment[fmax] = comps_red.sum() if norm: moment[fmax] = moment[fmax] / norm[fmax] if vary_fmax: for t_fmax in numpy.arange(fmin + vary_density, fmax, vary_density): moment[t_fmax] = comps_red[psdf_red < t_fmax].sum() if norm: moment[t_fmax] = moment[t_fmax] / norm[t_fmax] return moment
python
{ "resource": "" }
q32027
calculate_metric
train
def calculate_metric(Js, logJs, loglogJs, logloglogJs, loglogloglogJs, \ mapping): """ This function will take the various integrals calculated by get_moments and convert this into a metric for the appropriate parameter space. Parameters ----------- Js : Dictionary The list of (log^0 x) * x**(-i/3) integrals computed by get_moments() The index is Js[i] logJs : Dictionary The list of (log^1 x) * x**(-i/3) integrals computed by get_moments() The index is logJs[i] loglogJs : Dictionary The list of (log^2 x) * x**(-i/3) integrals computed by get_moments() The index is loglogJs[i] logloglogJs : Dictionary The list of (log^3 x) * x**(-i/3) integrals computed by get_moments() The index is logloglogJs[i] loglogloglogJs : Dictionary The list of (log^4 x) * x**(-i/3) integrals computed by get_moments() The index is loglogloglogJs[i] mapping : dictionary Used to identify which Lambda components are active in this parameter space and map these to entries in the metric matrix. Returns -------- metric : numpy.matrix The resulting metric. """ # How many dimensions in the parameter space? maxLen = len(mapping.keys()) metric = numpy.matrix(numpy.zeros(shape=(maxLen,maxLen),dtype=float)) unmax_metric = numpy.matrix(numpy.zeros(shape=(maxLen+1,maxLen+1), dtype=float)) for i in range(16): for j in range(16): calculate_metric_comp(metric, unmax_metric, i, j, Js, logJs, loglogJs, logloglogJs, loglogloglogJs, mapping) return metric, unmax_metric
python
{ "resource": "" }
q32028
bank_bins_from_cli
train
def bank_bins_from_cli(opts): """ Parses the CLI options related to binning templates in the bank. Parameters ---------- opts : object Result of parsing the CLI with OptionParser. Results ------- bins_idx : dict A dict with bin names as key and an array of their indices as value. bank : dict A dict of the datasets from the bank file. """ bank = {} fp = h5py.File(opts.bank_file) for key in fp.keys(): bank[key] = fp[key][:] bank["f_lower"] = float(opts.f_lower) if opts.f_lower else None if opts.bank_bins: bins_idx = coinc.background_bin_from_string(opts.bank_bins, bank) else: bins_idx = {"all" : numpy.arange(0, len(bank[fp.keys()[0]]))} fp.close() return bins_idx, bank
python
{ "resource": "" }
q32029
get_found_param
train
def get_found_param(injfile, bankfile, trigfile, param, ifo, args=None): """ Translates some popular trigger parameters into functions that calculate them from an hdf found injection file Parameters ---------- injfile: hdf5 File object Injection file of format known to ANitz (DOCUMENTME) bankfile: hdf5 File object or None Template bank file trigfile: hdf5 File object or None Single-detector trigger file param: string Parameter to be calculated for the recovered triggers ifo: string or None Standard ifo name, ex. 'L1' args : Namespace object returned from ArgumentParser instance Calling code command line options, used for f_lower value Returns ------- [return value]: NumPy array of floats The calculated parameter values """ foundtmp = injfile["found_after_vetoes/template_id"][:] if trigfile is not None: # get the name of the ifo in the injection file, eg "detector_1" # and the integer from that name ifolabel = [name for name, val in injfile.attrs.items() if \ "detector" in name and val == ifo][0] foundtrg = injfile["found_after_vetoes/trigger_id" + ifolabel[-1]] if bankfile is not None and param in bankfile.keys(): return bankfile[param][:][foundtmp] elif trigfile is not None and param in trigfile[ifo].keys(): return trigfile[ifo][param][:][foundtrg] else: b = bankfile return get_param(param, args, b['mass1'][:], b['mass2'][:], b['spin1z'][:], b['spin2z'][:])[foundtmp]
python
{ "resource": "" }
q32030
get_inj_param
train
def get_inj_param(injfile, param, ifo, args=None): """ Translates some popular injection parameters into functions that calculate them from an hdf found injection file Parameters ---------- injfile: hdf5 File object Injection file of format known to ANitz (DOCUMENTME) param: string Parameter to be calculated for the injected signals ifo: string Standard detector name, ex. 'L1' args: Namespace object returned from ArgumentParser instance Calling code command line options, used for f_lower value Returns ------- [return value]: NumPy array of floats The calculated parameter values """ det = pycbc.detector.Detector(ifo) inj = injfile["injections"] if param in inj.keys(): return inj["injections/"+param] if param == "end_time_"+ifo[0].lower(): return inj['end_time'][:] + det.time_delay_from_earth_center( inj['longitude'][:], inj['latitude'][:], inj['end_time'][:]) else: return get_param(param, args, inj['mass1'][:], inj['mass2'][:], inj['spin1z'][:], inj['spin2z'][:])
python
{ "resource": "" }
q32031
ns_g_mass_to_ns_b_mass
train
def ns_g_mass_to_ns_b_mass(ns_g_mass, ns_sequence): """ Determines the baryonic mass of an NS given its gravitational mass and an NS equilibrium sequence. Parameters ----------- ns_g_mass: float NS gravitational mass (in solar masses) ns_sequence: 3D-array contains the sequence data in the form NS gravitational mass (in solar masses), NS baryonic mass (in solar masses), NS compactness (dimensionless) Returns ---------- float The NS baryonic mass (in solar massesr**3*(r**2*(r-6)+chi**2*(3*r+4))+chi**4*(3*r*(r-2)+chi**2)) """ x = ns_sequence[:,0] y = ns_sequence[:,1] f = scipy.interpolate.interp1d(x, y) return f(ns_g_mass)
python
{ "resource": "" }
q32032
low_frequency_cutoff_from_cli
train
def low_frequency_cutoff_from_cli(opts): """Parses the low frequency cutoff from the given options. Returns ------- dict Dictionary of instruments -> low frequency cutoff. """ # FIXME: this just uses the same frequency cutoff for every instrument for # now. We should allow for different frequency cutoffs to be used; that # will require (minor) changes to the Likelihood class instruments = opts.instruments if opts.instruments is not None else [] return {ifo: opts.low_frequency_cutoff for ifo in instruments}
python
{ "resource": "" }
q32033
data_from_cli
train
def data_from_cli(opts): """Loads the data needed for a model from the given command-line options. Gates specifed on the command line are also applied. Parameters ---------- opts : ArgumentParser parsed args Argument options parsed from a command line string (the sort of thing returned by `parser.parse_args`). Returns ------- strain_dict : dict Dictionary of instruments -> `TimeSeries` strain. stilde_dict : dict Dictionary of instruments -> `FrequencySeries` strain. psd_dict : dict Dictionary of instruments -> `FrequencySeries` psds. """ # get gates to apply gates = gates_from_cli(opts) psd_gates = psd_gates_from_cli(opts) # get strain time series instruments = opts.instruments if opts.instruments is not None else [] strain_dict = strain_from_cli_multi_ifos(opts, instruments, precision="double") # apply gates if not waiting to overwhiten if not opts.gate_overwhitened: strain_dict = apply_gates_to_td(strain_dict, gates) # get strain time series to use for PSD estimation # if user has not given the PSD time options then use same data as analysis if opts.psd_start_time and opts.psd_end_time: logging.info("Will generate a different time series for PSD " "estimation") psd_opts = opts psd_opts.gps_start_time = psd_opts.psd_start_time psd_opts.gps_end_time = psd_opts.psd_end_time psd_strain_dict = strain_from_cli_multi_ifos(psd_opts, instruments, precision="double") # apply any gates logging.info("Applying gates to PSD data") psd_strain_dict = apply_gates_to_td(psd_strain_dict, psd_gates) elif opts.psd_start_time or opts.psd_end_time: raise ValueError("Must give --psd-start-time and --psd-end-time") else: psd_strain_dict = strain_dict # FFT strain and save each of the length of the FFT, delta_f, and # low frequency cutoff to a dict stilde_dict = {} length_dict = {} delta_f_dict = {} low_frequency_cutoff_dict = low_frequency_cutoff_from_cli(opts) for ifo in instruments: stilde_dict[ifo] = strain_dict[ifo].to_frequencyseries() length_dict[ifo] = len(stilde_dict[ifo]) delta_f_dict[ifo] = stilde_dict[ifo].delta_f # get PSD as frequency series psd_dict = psd_from_cli_multi_ifos( opts, length_dict, delta_f_dict, low_frequency_cutoff_dict, instruments, strain_dict=psd_strain_dict, precision="double") # apply any gates to overwhitened data, if desired if opts.gate_overwhitened and opts.gate is not None: logging.info("Applying gates to overwhitened data") # overwhiten the data for ifo in gates: stilde_dict[ifo] /= psd_dict[ifo] stilde_dict = apply_gates_to_fd(stilde_dict, gates) # unwhiten the data for the model for ifo in gates: stilde_dict[ifo] *= psd_dict[ifo] return strain_dict, stilde_dict, psd_dict
python
{ "resource": "" }
q32034
add_plot_posterior_option_group
train
def add_plot_posterior_option_group(parser): """Adds the options needed to configure plots of posterior results. Parameters ---------- parser : object ArgumentParser instance. """ pgroup = parser.add_argument_group("Options for what plots to create and " "their formats.") pgroup.add_argument('--plot-marginal', action='store_true', default=False, help="Plot 1D marginalized distributions on the " "diagonal axes.") pgroup.add_argument('--marginal-percentiles', nargs='+', default=None, type=float, help="Percentiles to draw lines at on the 1D " "histograms.") pgroup.add_argument("--plot-scatter", action='store_true', default=False, help="Plot each sample point as a scatter plot.") pgroup.add_argument("--plot-density", action="store_true", default=False, help="Plot the posterior density as a color map.") pgroup.add_argument("--plot-contours", action="store_true", default=False, help="Draw contours showing the 50th and 90th " "percentile confidence regions.") pgroup.add_argument('--contour-percentiles', nargs='+', default=None, type=float, help="Percentiles to draw contours if different " "than 50th and 90th.") # add mins, maxs options pgroup.add_argument('--mins', nargs='+', metavar='PARAM:VAL', default=[], help="Specify minimum parameter values to plot. This " "should be done by specifying the parameter name " "followed by the value. Parameter names must be " "the same as the PARAM argument in --parameters " "(or, if no parameters are provided, the same as " "the parameter name specified in the variable " "args in the input file. If none provided, " "the smallest parameter value in the posterior " "will be used.") pgroup.add_argument('--maxs', nargs='+', metavar='PARAM:VAL', default=[], help="Same as mins, but for the maximum values to " "plot.") # add expected parameters options pgroup.add_argument('--expected-parameters', nargs='+', metavar='PARAM:VAL', default=[], help="Specify expected parameter values to plot. If " "provided, a cross will be plotted in each axis " "that an expected parameter is provided. " "Parameter names must be " "the same as the PARAM argument in --parameters " "(or, if no parameters are provided, the same as " "the parameter name specified in the variable " "args in the input file.") pgroup.add_argument('--expected-parameters-color', default='r', help="What to color the expected-parameters cross. " "Default is red.") pgroup.add_argument('--plot-injection-parameters', action='store_true', default=False, help="Get the expected parameters from the injection " "in the input file. There must be only a single " "injection in the file to work. Any values " "specified by expected-parameters will override " "the values obtained for the injection.") return pgroup
python
{ "resource": "" }
q32035
plot_ranges_from_cli
train
def plot_ranges_from_cli(opts): """Parses the mins and maxs arguments from the `plot_posterior` option group. Parameters ---------- opts : ArgumentParser The parsed arguments from the command line. Returns ------- mins : dict Dictionary of parameter name -> specified mins. Only parameters that were specified in the --mins option will be included; if no parameters were provided, will return an empty dictionary. maxs : dict Dictionary of parameter name -> specified maxs. Only parameters that were specified in the --mins option will be included; if no parameters were provided, will return an empty dictionary. """ mins = {} for x in opts.mins: x = x.split(':') if len(x) != 2: raise ValueError("option --mins not specified correctly; see help") mins[x[0]] = float(x[1]) maxs = {} for x in opts.maxs: x = x.split(':') if len(x) != 2: raise ValueError("option --maxs not specified correctly; see help") maxs[x[0]] = float(x[1]) return mins, maxs
python
{ "resource": "" }
q32036
add_scatter_option_group
train
def add_scatter_option_group(parser): """Adds the options needed to configure scatter plots. Parameters ---------- parser : object ArgumentParser instance. """ scatter_group = parser.add_argument_group("Options for configuring the " "scatter plot.") scatter_group.add_argument( '--z-arg', type=str, default=None, action=ParseParametersArg, help='What to color the scatter points by. Syntax is the same as the ' 'parameters option.') scatter_group.add_argument( "--vmin", type=float, help="Minimum value for the colorbar.") scatter_group.add_argument( "--vmax", type=float, help="Maximum value for the colorbar.") scatter_group.add_argument( "--scatter-cmap", type=str, default='plasma', help="Specify the colormap to use for points. Default is plasma.") return scatter_group
python
{ "resource": "" }
q32037
add_density_option_group
train
def add_density_option_group(parser): """Adds the options needed to configure contours and density colour map. Parameters ---------- parser : object ArgumentParser instance. """ density_group = parser.add_argument_group("Options for configuring the " "contours and density color map") density_group.add_argument( "--density-cmap", type=str, default='viridis', help="Specify the colormap to use for the density. " "Default is viridis.") density_group.add_argument( "--contour-color", type=str, default=None, help="Specify the color to use for the contour lines. Default is " "white for density plots and black for scatter plots.") density_group.add_argument( '--use-kombine-kde', default=False, action="store_true", help="Use kombine's KDE for determining contours. " "Default is to use scipy's gaussian_kde.") return density_group
python
{ "resource": "" }
q32038
prior_from_config
train
def prior_from_config(cp, prior_section='prior'): """Loads a prior distribution from the given config file. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser The config file to read. sections : list of str, optional The sections to retrieve the prior from. If ``None`` (the default), will look in sections starting with 'prior'. Returns ------- distributions.JointDistribution The prior distribution. """ # Read variable and static parameters from the config file variable_params, _ = distributions.read_params_from_config( cp, prior_section=prior_section, vargs_section='variable_params', sargs_section='static_params') # Read constraints to apply to priors from the config file constraints = distributions.read_constraints_from_config(cp) # Get PyCBC distribution instances for each variable parameter in the # config file dists = distributions.read_distributions_from_config(cp, prior_section) # construct class that will return draws from the prior return distributions.JointDistribution(variable_params, *dists, **{"constraints": constraints})
python
{ "resource": "" }
q32039
compute_search_efficiency_in_bins
train
def compute_search_efficiency_in_bins( found, total, ndbins, sim_to_bins_function=lambda sim: (sim.distance,)): """ Calculate search efficiency in the given ndbins. The first dimension of ndbins must be bins over injected distance. sim_to_bins_function must map an object to a tuple indexing the ndbins. """ bins = bin_utils.BinnedRatios(ndbins) # increment the numerator and denominator with found / found+missed injs [bins.incnumerator(sim_to_bins_function(sim)) for sim in found] [bins.incdenominator(sim_to_bins_function(sim)) for sim in total] # regularize by setting denoms to 1 to avoid nans bins.regularize() # efficiency array is the ratio eff = bin_utils.BinnedArray(bin_utils.NDBins(ndbins), array=bins.ratio()) # compute binomial uncertainties in each bin err_arr = numpy.sqrt(eff.array * (1-eff.array)/bins.denominator.array) err = bin_utils.BinnedArray(bin_utils.NDBins(ndbins), array=err_arr) return eff, err
python
{ "resource": "" }
q32040
compute_search_volume_in_bins
train
def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function): """ Calculate search sensitive volume by integrating efficiency in distance bins No cosmological corrections are applied: flat space is assumed. The first dimension of ndbins must be bins over injected distance. sim_to_bins_function must maps an object to a tuple indexing the ndbins. """ eff, err = compute_search_efficiency_in_bins( found, total, ndbins, sim_to_bins_function) dx = ndbins[0].upper() - ndbins[0].lower() r = ndbins[0].centres() # volume and errors have one fewer dimension than the input NDBins vol = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:])) errors = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:])) # integrate efficiency to obtain volume vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx) # propagate errors in eff to errors in V errors.array = numpy.sqrt( ((4 * numpy.pi * r**2 * err.array.T * dx)**2).sum(axis=-1) ) return vol, errors
python
{ "resource": "" }
q32041
volume_to_distance_with_errors
train
def volume_to_distance_with_errors(vol, vol_err): """ Return the distance and standard deviation upper and lower bounds Parameters ---------- vol: float vol_err: float Returns ------- dist: float ehigh: float elow: float """ dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0) ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist delta = numpy.where(vol >= vol_err, vol - vol_err, 0) elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0) return dist, ehigh, elow
python
{ "resource": "" }
q32042
volume_montecarlo
train
def volume_montecarlo(found_d, missed_d, found_mchirp, missed_mchirp, distribution_param, distribution, limits_param, min_param=None, max_param=None): """ Compute sensitive volume and standard error via direct Monte Carlo integral Injections should be made over a range of distances such that sensitive volume due to signals closer than D_min is negligible, and efficiency at distances above D_max is negligible TODO : Replace this function by Collin's formula given in Usman et al .. ? OR get that coded as a new function? Parameters ----------- found_d: numpy.ndarray The distances of found injections missed_d: numpy.ndarray The distances of missed injections found_mchirp: numpy.ndarray Chirp mass of found injections missed_mchirp: numpy.ndarray Chirp mass of missed injections distribution_param: string Parameter D of the injections used to generate a distribution over distance, may be 'distance', 'chirp_distance'. distribution: string form of the distribution over the parameter, may be 'log' (uniform in log D) 'uniform' (uniform in D) 'distancesquared' (uniform in D**2) 'volume' (uniform in D***3) limits_param: string Parameter Dlim specifying limits inside which injections were made may be 'distance', 'chirp distance' min_param: float minimum value of Dlim at which injections were made; only used for log distribution, then if None the minimum actually injected value will be used max_param: float maximum value of Dlim out to which injections were made; if None the maximum actually injected value will be used Returns -------- volume: float Volume estimate volume_error: float The standard error in the volume """ d_power = { 'log' : 3., 'uniform' : 2., 'distancesquared' : 1., 'volume' : 0. }[distribution] mchirp_power = { 'log' : 0., 'uniform' : 5. / 6., 'distancesquared' : 5. / 3., 'volume' : 15. / 6. }[distribution] # establish maximum physical distance: first for chirp distance distribution if limits_param == 'chirp_distance': mchirp_standard_bns = 1.4 * 2.**(-1. / 5.) all_mchirp = numpy.concatenate((found_mchirp, missed_mchirp)) max_mchirp = all_mchirp.max() if max_param is not None: # use largest actually injected mchirp for conversion max_distance = max_param * \ (max_mchirp / mchirp_standard_bns)**(5. / 6.) else: max_distance = max(found_d.max(), missed_d.max()) elif limits_param == 'distance': if max_param is not None: max_distance = max_param else: # if no max distance given, use max distance actually injected max_distance = max(found_d.max(), missed_d.max()) else: raise NotImplementedError("%s is not a recognized parameter" % limits_param) # volume of sphere montecarlo_vtot = (4. / 3.) * numpy.pi * max_distance**3. # arrays of weights for the MC integral if distribution_param == 'distance': found_weights = found_d ** d_power missed_weights = missed_d ** d_power elif distribution_param == 'chirp_distance': # weight by a power of mchirp to rescale injection density to the # target mass distribution found_weights = found_d ** d_power * \ found_mchirp ** mchirp_power missed_weights = missed_d ** d_power * \ missed_mchirp ** mchirp_power else: raise NotImplementedError("%s is not a recognized distance parameter" % distribution_param) all_weights = numpy.concatenate((found_weights, missed_weights)) # measured weighted efficiency is w_i for a found inj and 0 for missed # MC integral is volume of sphere * (sum of found weights)/(sum of all weights) # over injections covering the sphere mc_weight_samples = numpy.concatenate((found_weights, 0 * missed_weights)) mc_sum = sum(mc_weight_samples) if limits_param == 'distance': mc_norm = sum(all_weights) elif limits_param == 'chirp_distance': # if injections are made up to a maximum chirp distance, account for # extra missed injections that would occur when injecting up to # maximum physical distance : this works out to a 'chirp volume' factor mc_norm = sum(all_weights * (max_mchirp / all_mchirp) ** (5. / 2.)) # take out a constant factor mc_prefactor = montecarlo_vtot / mc_norm # count the samples if limits_param == 'distance': Ninj = len(mc_weight_samples) elif limits_param == 'chirp_distance': # find the total expected number after extending from maximum chirp # dist up to maximum physical distance if distribution == 'log': # only need minimum distance in this one case if min_param is not None: min_distance = min_param * \ (numpy.min(all_mchirp) / mchirp_standard_bns) ** (5. / 6.) else: min_distance = min(numpy.min(found_d), numpy.min(missed_d)) logrange = numpy.log(max_distance / min_distance) Ninj = len(mc_weight_samples) + (5. / 6.) * \ sum(numpy.log(max_mchirp / all_mchirp) / logrange) else: Ninj = sum((max_mchirp / all_mchirp) ** mchirp_power) # sample variance of efficiency: mean of the square - square of the mean mc_sample_variance = sum(mc_weight_samples ** 2.) / Ninj - \ (mc_sum / Ninj) ** 2. # return MC integral and its standard deviation; variance of mc_sum scales # relative to sample variance by Ninj (Bienayme' rule) vol = mc_prefactor * mc_sum vol_err = mc_prefactor * (Ninj * mc_sample_variance) ** 0.5 return vol, vol_err
python
{ "resource": "" }
q32043
volume_binned_pylal
train
def volume_binned_pylal(f_dist, m_dist, bins=15): """ Compute the sensitive volume using a distance binned efficiency estimate Parameters ----------- f_dist: numpy.ndarray The distances of found injections m_dist: numpy.ndarray The distances of missed injections Returns -------- volume: float Volume estimate volume_error: float The standard error in the volume """ def sims_to_bin(sim): return (sim, 0) total = numpy.concatenate([f_dist, m_dist]) ndbins = bin_utils.NDBins([bin_utils.LinearBins(min(total), max(total), bins), bin_utils.LinearBins(0., 1, 1)]) vol, verr = compute_search_volume_in_bins(f_dist, total, ndbins, sims_to_bin) return vol.array[0], verr.array[0]
python
{ "resource": "" }
q32044
volume_shell
train
def volume_shell(f_dist, m_dist): """ Compute the sensitive volume using sum over spherical shells. Parameters ----------- f_dist: numpy.ndarray The distances of found injections m_dist: numpy.ndarray The distances of missed injections Returns -------- volume: float Volume estimate volume_error: float The standard error in the volume """ f_dist.sort() m_dist.sort() distances = numpy.concatenate([f_dist, m_dist]) dist_sorting = distances.argsort() distances = distances[dist_sorting] low = 0 vol = 0 vol_err = 0 for i in range(len(distances)): if i == len(distances) - 1: break high = (distances[i+1] + distances[i]) / 2 bin_width = high - low if dist_sorting[i] < len(f_dist): vol += 4 * numpy.pi * distances[i]**2.0 * bin_width vol_err += (4 * numpy.pi * distances[i]**2.0 * bin_width)**2.0 low = high vol_err = vol_err ** 0.5 return vol, vol_err
python
{ "resource": "" }
q32045
raw_samples_to_dict
train
def raw_samples_to_dict(sampler, raw_samples): """Convenience function for converting ND array to a dict of samples. The samples are assumed to have dimension ``[sampler.base_shape x] niterations x len(sampler.sampling_params)``. Parameters ---------- sampler : sampler instance An instance of an MCMC sampler. raw_samples : array The array of samples to convert. Returns ------- dict : A dictionary mapping the raw samples to the variable params. If the sampling params are not the same as the variable params, they will also be included. Each array will have shape ``[sampler.base_shape x] niterations``. """ sampling_params = sampler.sampling_params # convert to dictionary samples = {param: raw_samples[..., ii] for ii, param in enumerate(sampling_params)} # apply boundary conditions samples = sampler.model.prior_distribution.apply_boundary_conditions( **samples) # apply transforms to go to model's variable params space if sampler.model.sampling_transforms is not None: samples = sampler.model.sampling_transforms.apply( samples, inverse=True) return samples
python
{ "resource": "" }
q32046
blob_data_to_dict
train
def blob_data_to_dict(stat_names, blobs): """Converts list of "blobs" to a dictionary of model stats. Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to a list called blobs. This is a list of lists of tuples with shape niterations x nwalkers x nstats, where nstats is the number of stats returned by the model's ``default_stats``. This converts that list to a dictionary of arrays keyed by the stat names. Parameters ---------- stat_names : list of str The list of the stat names. blobs : list of list of tuples The data to convert. Returns ------- dict : A dictionary mapping the model's ``default_stats`` to arrays of values. Each array will have shape ``nwalkers x niterations``. """ # get the dtypes of each of the stats; we'll just take this from the # first iteration and walker dtypes = [type(val) for val in blobs[0][0]] assert len(stat_names) == len(dtypes), ( "number of stat names must match length of tuples in the blobs") # convert to an array; to ensure that we get the dtypes correct, we'll # cast to a structured array raw_stats = numpy.array(blobs, dtype=zip(stat_names, dtypes)) # transpose so that it has shape nwalkers x niterations raw_stats = raw_stats.transpose() # now return as a dictionary return {stat: raw_stats[stat] for stat in stat_names}
python
{ "resource": "" }
q32047
get_optional_arg_from_config
train
def get_optional_arg_from_config(cp, section, arg, dtype=str): """Convenience function to retrieve an optional argument from a config file. Parameters ---------- cp : ConfigParser Open config parser to retrieve the argument from. section : str Name of the section to retrieve from. arg : str Name of the argument to retrieve. dtype : datatype, optional Cast the retrieved value (if it exists) to the given datatype. Default is ``str``. Returns ------- val : None or str If the argument is present, the value. Otherwise, None. """ if cp.has_option(section, arg): val = dtype(cp.get(section, arg)) else: val = None return val
python
{ "resource": "" }
q32048
BaseMCMC.niterations
train
def niterations(self): """The current number of iterations.""" itercounter = self._itercounter if itercounter is None: itercounter = 0 lastclear = self._lastclear if lastclear is None: lastclear = 0 return itercounter + lastclear
python
{ "resource": "" }
q32049
BaseMCMC.get_thin_interval
train
def get_thin_interval(self): """Gets the thin interval to use. If ``max_samples_per_chain`` is set, this will figure out what thin interval is needed to satisfy that criteria. In that case, the thin interval used must be a multiple of the currently used thin interval. """ if self.max_samples_per_chain is not None: # the extra factor of 2 is to account for the fact that the thin # interval will need to be at least twice as large as a previously # used interval thinfactor = 2 * self.niterations // self.max_samples_per_chain # make the new interval is a multiple of the previous, to ensure # that any samples currently on disk can be thinned accordingly thin_interval = (thinfactor // self.thin_interval) * \ self.thin_interval # make sure it's at least 1 thin_interval = max(thin_interval, 1) else: thin_interval = self.thin_interval return thin_interval
python
{ "resource": "" }
q32050
BaseMCMC.pos
train
def pos(self): """A dictionary of the current walker positions. If the sampler hasn't been run yet, returns p0. """ pos = self._pos if pos is None: return self.p0 # convert to dict pos = {param: self._pos[..., k] for (k, param) in enumerate(self.sampling_params)} return pos
python
{ "resource": "" }
q32051
BaseMCMC.p0
train
def p0(self): """A dictionary of the initial position of the walkers. This is set by using ``set_p0``. If not set yet, a ``ValueError`` is raised when the attribute is accessed. """ if self._p0 is None: raise ValueError("initial positions not set; run set_p0") # convert to dict p0 = {param: self._p0[..., k] for (k, param) in enumerate(self.sampling_params)} return p0
python
{ "resource": "" }
q32052
BaseMCMC.set_p0
train
def set_p0(self, samples_file=None, prior=None): """Sets the initial position of the walkers. Parameters ---------- samples_file : InferenceFile, optional If provided, use the last iteration in the given file for the starting positions. prior : JointDistribution, optional Use the given prior to set the initial positions rather than ``model``'s prior. Returns ------- p0 : dict A dictionary maping sampling params to the starting positions. """ # if samples are given then use those as initial positions if samples_file is not None: with self.io(samples_file, 'r') as fp: samples = fp.read_samples(self.variable_params, iteration=-1, flatten=False) # remove the (length 1) niterations dimension samples = samples[..., 0] # make sure we have the same shape assert samples.shape == self.base_shape, ( "samples in file {} have shape {}, but I have shape {}". format(samples_file, samples.shape, self.base_shape)) # transform to sampling parameter space if self.model.sampling_transforms is not None: samples = self.model.sampling_transforms.apply(samples) # draw random samples if samples are not provided else: nsamples = numpy.prod(self.base_shape) samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape( self.base_shape) # store as ND array with shape [base_shape] x nparams ndim = len(self.variable_params) p0 = numpy.ones(list(self.base_shape)+[ndim]) for i, param in enumerate(self.sampling_params): p0[..., i] = samples[param] self._p0 = p0 return self.p0
python
{ "resource": "" }
q32053
BaseMCMC.set_initial_conditions
train
def set_initial_conditions(self, initial_distribution=None, samples_file=None): """Sets the initial starting point for the MCMC. If a starting samples file is provided, will also load the random state from it. """ self.set_p0(samples_file=samples_file, prior=initial_distribution) # if a samples file was provided, use it to set the state of the # sampler if samples_file is not None: self.set_state_from_file(samples_file)
python
{ "resource": "" }
q32054
BaseMCMC.run
train
def run(self): """Runs the sampler.""" if self.target_eff_nsamples and self.checkpoint_interval is None: raise ValueError("A checkpoint interval must be set if " "targetting an effective number of samples") # get the starting number of samples: # "nsamples" keeps track of the number of samples we've obtained (if # target_eff_nsamples is not None, this is the effective number of # samples; otherwise, this is the total number of samples). # _lastclear is the number of iterations that the file already # contains (either due to sampler burn-in, or a previous checkpoint) if self.new_checkpoint: self._lastclear = 0 else: with self.io(self.checkpoint_file, "r") as fp: self._lastclear = fp.niterations if self.target_eff_nsamples is not None: target_nsamples = self.target_eff_nsamples with self.io(self.checkpoint_file, "r") as fp: nsamples = fp.effective_nsamples elif self.target_niterations is not None: # the number of samples is the number of iterations times the # number of walkers target_nsamples = self.nwalkers * self.target_niterations nsamples = self._lastclear * self.nwalkers else: raise ValueError("must set either target_eff_nsamples or " "target_niterations; see set_target") self._itercounter = 0 # figure out the interval to use iterinterval = self.checkpoint_interval if iterinterval is None: iterinterval = self.target_niterations # run sampler until we have the desired number of samples while nsamples < target_nsamples: # adjust the interval if we would go past the number of iterations if self.target_niterations is not None and ( self.niterations + iterinterval > self.target_niterations): iterinterval = self.target_niterations - self.niterations # run sampler and set initial values to None so that sampler # picks up from where it left off next call logging.info("Running sampler for {} to {} iterations".format( self.niterations, self.niterations + iterinterval)) # run the underlying sampler for the desired interval self.run_mcmc(iterinterval) # update the itercounter self._itercounter = self._itercounter + iterinterval # dump the current results self.checkpoint() # update nsamples for next loop if self.target_eff_nsamples is not None: nsamples = self.effective_nsamples logging.info("Have {} effective samples post burn in".format( nsamples)) else: nsamples += iterinterval * self.nwalkers
python
{ "resource": "" }
q32055
BaseMCMC.effective_nsamples
train
def effective_nsamples(self): """The effective number of samples post burn-in that the sampler has acquired so far.""" try: act = numpy.array(list(self.acts.values())).max() except (AttributeError, TypeError): act = numpy.inf if self.burn_in is None: nperwalker = max(int(self.niterations // act), 1) elif self.burn_in.is_burned_in: nperwalker = int( (self.niterations - self.burn_in.burn_in_iteration) // act) # after burn in, we always have atleast 1 sample per walker nperwalker = max(nperwalker, 1) else: nperwalker = 0 return self.nwalkers * nperwalker
python
{ "resource": "" }
q32056
BaseMCMC.checkpoint
train
def checkpoint(self): """Dumps current samples to the checkpoint file.""" # thin and write new samples for fn in [self.checkpoint_file, self.backup_file]: with self.io(fn, "a") as fp: # write the current number of iterations fp.write_niterations(self.niterations) thin_interval = self.get_thin_interval() # thin samples on disk if it changed if thin_interval > 1: # if this is the first time writing, set the file's # thinned_by if fp.last_iteration() == 0: fp.thinned_by = thin_interval else: # check if we need to thin the current samples on disk thin_by = thin_interval // fp.thinned_by if thin_by > 1: logging.info("Thinning samples in %s by a factor " "of %i", fn, int(thin_by)) fp.thin(thin_by) fp_lastiter = fp.last_iteration() logging.info("Writing samples to %s with thin interval %i", fn, thin_interval) self.write_results(fn) # see if we had anything to write after thinning; if not, don't try # to compute anything with self.io(self.checkpoint_file, "r") as fp: nsamples_written = fp.last_iteration() - fp_lastiter if nsamples_written == 0: logging.info("No samples written due to thinning") else: # check for burn in, compute the acls self.acls = None if self.burn_in is not None: logging.info("Updating burn in") self.burn_in.evaluate(self.checkpoint_file) burn_in_index = self.burn_in.burn_in_index logging.info("Is burned in: %r", self.burn_in.is_burned_in) if self.burn_in.is_burned_in: logging.info("Burn-in iteration: %i", int(self.burn_in.burn_in_iteration)) else: burn_in_index = 0 # Compute acls; the burn_in test may have calculated an acl and # saved it, in which case we don't need to do it again. if self.acls is None: logging.info("Computing acls") self.acls = self.compute_acl(self.checkpoint_file, start_index=burn_in_index) logging.info("ACT: %s", str(numpy.array(self.acts.values()).max())) # write for fn in [self.checkpoint_file, self.backup_file]: with self.io(fn, "a") as fp: if self.burn_in is not None: fp.write_burn_in(self.burn_in) if self.acls is not None: fp.write_acls(self.acls) # write effective number of samples fp.write_effective_nsamples(self.effective_nsamples) # check validity logging.info("Validating checkpoint and backup files") checkpoint_valid = validate_checkpoint_files( self.checkpoint_file, self.backup_file) if not checkpoint_valid: raise IOError("error writing to checkpoint file") elif self.checkpoint_signal: # kill myself with the specified signal logging.info("Exiting with SIG{}".format(self.checkpoint_signal)) kill_cmd="os.kill(os.getpid(), signal.SIG{})".format( self.checkpoint_signal) exec(kill_cmd) # clear the in-memory chain to save memory logging.info("Clearing samples from memory") self.clear_samples()
python
{ "resource": "" }
q32057
BaseMCMC.set_target_from_config
train
def set_target_from_config(self, cp, section): """Sets the target using the given config file. This looks for ``niterations`` to set the ``target_niterations``, and ``effective-nsamples`` to set the ``target_eff_nsamples``. Parameters ---------- cp : ConfigParser Open config parser to retrieve the argument from. section : str Name of the section to retrieve from. """ if cp.has_option(section, "niterations"): niterations = int(cp.get(section, "niterations")) else: niterations = None if cp.has_option(section, "effective-nsamples"): nsamples = int(cp.get(section, "effective-nsamples")) else: nsamples = None self.set_target(niterations=niterations, eff_nsamples=nsamples)
python
{ "resource": "" }
q32058
BaseMCMC.set_burn_in_from_config
train
def set_burn_in_from_config(self, cp): """Sets the burn in class from the given config file. If no burn-in section exists in the file, then this just set the burn-in class to None. """ try: bit = self.burn_in_class.from_config(cp, self) except ConfigParser.Error: bit = None self.set_burn_in(bit)
python
{ "resource": "" }
q32059
BaseMCMC.set_thin_interval_from_config
train
def set_thin_interval_from_config(self, cp, section): """Sets thinning options from the given config file. """ if cp.has_option(section, "thin-interval"): thin_interval = int(cp.get(section, "thin-interval")) logging.info("Will thin samples using interval %i", thin_interval) else: thin_interval = None if cp.has_option(section, "max-samples-per-chain"): max_samps_per_chain = int(cp.get(section, "max-samples-per-chain")) logging.info("Setting max samples per chain to %i", max_samps_per_chain) else: max_samps_per_chain = None # check for consistency if thin_interval is not None and max_samps_per_chain is not None: raise ValueError("provide either thin-interval or " "max-samples-per-chain, not both") # check that the thin interval is < then the checkpoint interval if thin_interval is not None and self.checkpoint_interval is not None \ and thin_interval >= self.checkpoint_interval: raise ValueError("thin interval must be less than the checkpoint " "interval") self.thin_interval = thin_interval self.max_samples_per_chain = max_samps_per_chain
python
{ "resource": "" }
q32060
BaseMCMC.acts
train
def acts(self): """The autocorrelation times of each parameter. The autocorrelation time is defined as the ACL times the ``thin_interval``. It gives the number of iterations between independent samples. """ if self.acls is None: return None return {p: acl * self.get_thin_interval() for (p, acl) in self.acls.items()}
python
{ "resource": "" }
q32061
MCMCAutocorrSupport.compute_acl
train
def compute_acl(cls, filename, start_index=None, end_index=None, min_nsamples=10): """Computes the autocorrleation length for all model params in the given file. Parameter values are averaged over all walkers at each iteration. The ACL is then calculated over the averaged chain. If an ACL cannot be calculated because there are not enough samples, it will be set to ``inf``. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : int, optional The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : int, optional The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary giving the ACL for each parameter. """ acls = {} with cls._io(filename, 'r') as fp: for param in fp.variable_params: samples = fp.read_raw_samples( param, thin_start=start_index, thin_interval=1, thin_end=end_index, flatten=False)[param] samples = samples.mean(axis=0) # if < min number of samples, just set to inf if samples.size < min_nsamples: acl = numpy.inf else: acl = autocorrelation.calculate_acl(samples) if acl <= 0: acl = numpy.inf acls[param] = acl return acls
python
{ "resource": "" }
q32062
LiveSingleFarThreshold.check
train
def check(self, triggers, data_reader): """ Look for a single detector trigger that passes the thresholds in the current data. """ if len(triggers['snr']) == 0: return None i = triggers['snr'].argmax() # This uses the pycbc live convention of chisq always meaning the # reduced chisq. rchisq = triggers['chisq'][i] nsnr = ranking.newsnr(triggers['snr'][i], rchisq) dur = triggers['template_duration'][i] if nsnr > self.newsnr_threshold and \ rchisq < self.reduced_chisq_threshold and \ dur > self.duration_threshold: fake_coinc = {'foreground/%s/%s' % (self.ifo, k): triggers[k][i] for k in triggers} fake_coinc['foreground/stat'] = nsnr fake_coinc['foreground/ifar'] = self.fixed_ifar fake_coinc['HWINJ'] = data_reader.near_hwinj() return fake_coinc return None
python
{ "resource": "" }
q32063
InferenceTXTFile.write
train
def write(cls, output_file, samples, labels, delimiter=None): """ Writes a text file with samples. Parameters ----------- output_file : str The path of the file to write. samples : FieldArray Samples to write to file. labels : list A list of strings to include as header in TXT file. delimiter : str Delimiter to use in TXT file. """ delimiter = delimiter if delimiter is not None else cls.delimiter header = delimiter.join(labels) numpy.savetxt(output_file, samples, comments=cls.comments, header=header, delimiter=delimiter)
python
{ "resource": "" }
q32064
get_topclasses
train
def get_topclasses(cls): """Gets the base classes that are in pycbc.""" bases = [c for c in inspect.getmro(cls) if c.__module__.startswith('pycbc') and c != cls] return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases])
python
{ "resource": "" }
q32065
int_gps_time_to_str
train
def int_gps_time_to_str(t): """Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.""" if isinstance(t, int): return str(t) elif isinstance(t, float): # Wouldn't this just work generically? int_t = int(t) if abs(t - int_t) > 0.: raise ValueError('Need an integer GPS time, got %s' % str(t)) return str(int_t) elif isinstance(t, lal.LIGOTimeGPS): if t.gpsNanoSeconds == 0: return str(t.gpsSeconds) else: raise ValueError('Need an integer GPS time, got %s' % str(t)) else: err_msg = "Didn't understand input type {}".format(type(t)) raise ValueError(err_msg)
python
{ "resource": "" }
q32066
select_tmpltbank_class
train
def select_tmpltbank_class(curr_exe): """ This function returns a class that is appropriate for setting up template bank jobs within workflow. Parameters ---------- curr_exe : string The name of the executable to be used for generating template banks. Returns -------- exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility functions appropriate for the given executable. Instances of the class ('jobs') **must** have methods * job.create_node() and * job.get_valid_times(ifo, ) """ exe_to_class_map = { 'pycbc_geom_nonspinbank' : PyCBCTmpltbankExecutable, 'pycbc_aligned_stoch_bank': PyCBCTmpltbankExecutable } try: return exe_to_class_map[curr_exe] except KeyError: raise NotImplementedError( "No job class exists for executable %s, exiting" % curr_exe)
python
{ "resource": "" }
q32067
select_matchedfilter_class
train
def select_matchedfilter_class(curr_exe): """ This function returns a class that is appropriate for setting up matched-filtering jobs within workflow. Parameters ---------- curr_exe : string The name of the matched filter executable to be used. Returns -------- exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility functions appropriate for the given executable. Instances of the class ('jobs') **must** have methods * job.create_node() and * job.get_valid_times(ifo, ) """ exe_to_class_map = { 'pycbc_inspiral' : PyCBCInspiralExecutable, 'pycbc_inspiral_skymax' : PyCBCInspiralExecutable, 'pycbc_multi_inspiral' : PyCBCMultiInspiralExecutable, } try: return exe_to_class_map[curr_exe] except KeyError: # also conceivable to introduce a default class?? raise NotImplementedError( "No job class exists for executable %s, exiting" % curr_exe)
python
{ "resource": "" }
q32068
select_generic_executable
train
def select_generic_executable(workflow, exe_tag): """ Returns a class that is appropriate for setting up jobs to run executables having specific tags in the workflow config. Executables should not be "specialized" jobs fitting into one of the select_XXX_class functions above, i.e. not a matched filter or template bank job, which require extra setup. Parameters ---------- workflow : pycbc.workflow.core.Workflow The Workflow instance. exe_tag : string The name of the config section storing options for this executable and the option giving the executable path in the [executables] section. Returns -------- exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility functions appropriate for the given executable. Instances of the class ('jobs') **must** have a method job.create_node() """ exe_path = workflow.cp.get("executables", exe_tag) exe_name = os.path.basename(exe_path) exe_to_class_map = { 'ligolw_add' : LigolwAddExecutable, 'ligolw_cbc_sstinca' : LigolwSSthincaExecutable, 'pycbc_sqlite_simplify' : PycbcSqliteSimplifyExecutable, 'ligolw_cbc_cluster_coincs': SQLInOutExecutable, 'ligolw_cbc_repop_coinc' : SQLInOutExecutable, 'repop_coinc_expfit' : SQLInOutExecutable, 'ligolw_cbc_dbinjfind' : SQLInOutExecutable, 'lalapps_inspinj' : LalappsInspinjExecutable, 'pycbc_dark_vs_bright_injections' : PycbcDarkVsBrightInjectionsExecutable, 'pycbc_timeslides' : PycbcTimeslidesExecutable, 'pycbc_compute_durations' : ComputeDurationsExecutable, 'pycbc_calculate_far' : PycbcCalculateFarExecutable, "pycbc_run_sqlite" : SQLInOutExecutable, # FIXME: We may end up with more than one class for using ligolw_sqlite # How to deal with this? "ligolw_sqlite" : ExtractToXMLExecutable, "pycbc_inspinjfind" : InspinjfindExecutable, "pycbc_pickle_horizon_distances" : PycbcPickleHorizonDistsExecutable, "pycbc_combine_likelihood" : PycbcCombineLikelihoodExecutable, "pycbc_gen_ranking_data" : PycbcGenerateRankingDataExecutable, "pycbc_calculate_likelihood" : PycbcCalculateLikelihoodExecutable, "gstlal_inspiral_marginalize_likelihood" : GstlalMarginalizeLikelihoodExecutable, "pycbc_compute_far_from_snr_chisq_histograms" : GstlalFarfromsnrchisqhistExecutable, "gstlal_inspiral_plot_sensitivity" : GstlalPlotSensitivity, "gstlal_inspiral_plot_background" : GstlalPlotBackground, "gstlal_inspiral_plotsummary" : GstlalPlotSummary, "gstlal_inspiral_summary_page" : GstlalSummaryPage, "pycbc_condition_strain" : PycbcConditionStrainExecutable } try: return exe_to_class_map[exe_name] except KeyError: # Should we try some sort of default class?? raise NotImplementedError( "No job class exists for executable %s, exiting" % exe_name)
python
{ "resource": "" }
q32069
multi_ifo_coherent_job_setup
train
def multi_ifo_coherent_job_setup(workflow, out_files, curr_exe_job, science_segs, datafind_outs, output_dir, parents=None, slide_dict=None, tags=None): """ Method for setting up coherent inspiral jobs. """ if tags is None: tags = [] data_seg, job_valid_seg = curr_exe_job.get_valid_times() curr_out_files = FileList([]) if 'IPN' in datafind_outs[-1].description \ and 'bank_veto_bank' in datafind_outs[-2].description: ipn_sky_points = datafind_outs[-1] bank_veto = datafind_outs[-2] frame_files = datafind_outs[:-2] else: ipn_sky_points = None bank_veto = datafind_outs[-1] frame_files = datafind_outs[:-1] split_bank_counter = 0 if curr_exe_job.injection_file is None: for split_bank in parents: tag = list(tags) tag.append(split_bank.tag_str) node = curr_exe_job.create_node(data_seg, job_valid_seg, parent=split_bank, dfParents=frame_files, bankVetoBank=bank_veto, ipn_file=ipn_sky_points, slide=slide_dict, tags=tag) workflow.add_node(node) split_bank_counter += 1 curr_out_files.extend(node.output_files) else: for inj_file in curr_exe_job.injection_file: for split_bank in parents: tag = list(tags) tag.append(inj_file.tag_str) tag.append(split_bank.tag_str) node = curr_exe_job.create_node(data_seg, job_valid_seg, parent=split_bank, inj_file=inj_file, tags=tag, dfParents=frame_files, bankVetoBank=bank_veto, ipn_file=ipn_sky_points) workflow.add_node(node) split_bank_counter += 1 curr_out_files.extend(node.output_files) # FIXME: Here we remove PSD files if they are coming # through. This should be done in a better way. On # to-do list. curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\ not in i.tags] out_files += curr_out_files return out_files
python
{ "resource": "" }
q32070
JobSegmenter.pick_tile_size
train
def pick_tile_size(self, seg_size, data_lengths, valid_chunks, valid_lengths): """ Choose job tiles size based on science segment length """ if len(valid_lengths) == 1: return data_lengths[0], valid_chunks[0], valid_lengths[0] else: # Pick the tile size that is closest to 1/3 of the science segment target_size = seg_size / 3 pick, pick_diff = 0, abs(valid_lengths[0] - target_size) for i, size in enumerate(valid_lengths): if abs(size - target_size) < pick_diff: pick, pick_diff = i, abs(size - target_size) return data_lengths[pick], valid_chunks[pick], valid_lengths[pick]
python
{ "resource": "" }
q32071
JobSegmenter.get_valid_times_for_job
train
def get_valid_times_for_job(self, num_job, allow_overlap=True): """ Get the times for which this job is valid. """ if self.compatibility_mode: return self.get_valid_times_for_job_legacy(num_job) else: return self.get_valid_times_for_job_workflow(num_job, allow_overlap=allow_overlap)
python
{ "resource": "" }
q32072
JobSegmenter.get_valid_times_for_job_workflow
train
def get_valid_times_for_job_workflow(self, num_job, allow_overlap=True): """ Get the times for which the job num_job will be valid, using workflow's method. """ # small factor of 0.0001 to avoid float round offs causing us to # miss a second at end of segments. shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job\ + 0.0001) job_valid_seg = self.valid_chunk.shift(shift_dur) # If we need to recalculate the valid times to avoid overlap if not allow_overlap: data_per_job = (self.curr_seg_length - self.data_loss) / \ float(self.num_jobs) lower_boundary = num_job*data_per_job + \ self.valid_chunk[0] + self.curr_seg[0] upper_boundary = data_per_job + lower_boundary # NOTE: Convert to int after calculating both boundaries # small factor of 0.0001 to avoid float round offs causing us to # miss a second at end of segments. lower_boundary = int(lower_boundary) upper_boundary = int(upper_boundary + 0.0001) if lower_boundary < job_valid_seg[0] or \ upper_boundary > job_valid_seg[1]: err_msg = ("Workflow is attempting to generate output " "from a job at times where it is not valid.") raise ValueError(err_msg) job_valid_seg = segments.segment([lower_boundary, upper_boundary]) return job_valid_seg
python
{ "resource": "" }
q32073
JobSegmenter.get_valid_times_for_job_legacy
train
def get_valid_times_for_job_legacy(self, num_job): """ Get the times for which the job num_job will be valid, using the method use in inspiral hipe. """ # All of this should be integers, so no rounding factors needed. shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job) job_valid_seg = self.valid_chunk.shift(shift_dur) # If this is the last job, push the end back if num_job == (self.num_jobs - 1): dataPushBack = self.data_length - self.valid_chunk[1] job_valid_seg = segments.segment(job_valid_seg[0], self.curr_seg[1] - dataPushBack) return job_valid_seg
python
{ "resource": "" }
q32074
JobSegmenter.get_data_times_for_job
train
def get_data_times_for_job(self, num_job): """ Get the data that this job will read in. """ if self.compatibility_mode: job_data_seg = self.get_data_times_for_job_legacy(num_job) else: job_data_seg = self.get_data_times_for_job_workflow(num_job) # Sanity check that all data is used if num_job == 0: if job_data_seg[0] != self.curr_seg[0]: err= "Job is not using data from the start of the " err += "science segment. It should be using all data." raise ValueError(err) if num_job == (self.num_jobs - 1): if job_data_seg[1] != self.curr_seg[1]: err = "Job is not using data from the end of the " err += "science segment. It should be using all data." raise ValueError(err) if hasattr(self.exe_class, 'zero_pad_data_extend'): job_data_seg = self.exe_class.zero_pad_data_extend(job_data_seg, self.curr_seg) return job_data_seg
python
{ "resource": "" }
q32075
PyCBCInspiralExecutable.get_valid_times
train
def get_valid_times(self): """ Determine possible dimensions of needed input and valid output """ if self.cp.has_option('workflow-matchedfilter', 'min-analysis-segments'): min_analysis_segs = int(self.cp.get('workflow-matchedfilter', 'min-analysis-segments')) else: min_analysis_segs = 0 if self.cp.has_option('workflow-matchedfilter', 'max-analysis-segments'): max_analysis_segs = int(self.cp.get('workflow-matchedfilter', 'max-analysis-segments')) else: # Choose ridiculously large default value max_analysis_segs = 1000 if self.cp.has_option('workflow-matchedfilter', 'min-analysis-length'): min_analysis_length = int(self.cp.get('workflow-matchedfilter', 'min-analysis-length')) else: min_analysis_length = 0 if self.cp.has_option('workflow-matchedfilter', 'max-analysis-length'): max_analysis_length = int(self.cp.get('workflow-matchedfilter', 'max-analysis-length')) else: # Choose a ridiculously large default value max_analysis_length = 100000 segment_length = int(self.get_opt('segment-length')) pad_data = 0 if self.has_opt('pad-data'): pad_data += int(self.get_opt('pad-data')) # NOTE: Currently the tapered data is ignored as it is short and # will lie within the segment start/end pad. This means that # the tapered data *will* be used for PSD estimation (but this # effect should be small). It will also be in the data segments # used for SNR generation (when in the middle of a data segment # where zero-padding is not being used) but the templates should # not be long enough to use this data assuming segment start/end # pad take normal values. When using zero-padding this data will # be used for SNR generation. #if self.has_opt('taper-data'): # pad_data += int(self.get_opt( 'taper-data' )) if self.has_opt('allow-zero-padding'): self.zero_padding=True else: self.zero_padding=False start_pad = int(self.get_opt( 'segment-start-pad')) end_pad = int(self.get_opt('segment-end-pad')) seg_ranges = range(min_analysis_segs, max_analysis_segs + 1) data_lengths = [] valid_regions = [] for nsegs in seg_ranges: analysis_length = (segment_length - start_pad - end_pad) * nsegs if not self.zero_padding: data_length = analysis_length + pad_data * 2 \ + start_pad + end_pad start = pad_data + start_pad end = data_length - pad_data - end_pad else: data_length = analysis_length + pad_data * 2 start = pad_data end = data_length - pad_data if data_length > max_analysis_length: continue if data_length < min_analysis_length: continue data_lengths += [data_length] valid_regions += [segments.segment(start, end)] # If min_analysis_length is given, ensure that it is added as an option # for job analysis length. if min_analysis_length: data_length = min_analysis_length if not self.zero_padding: start = pad_data + start_pad end = data_length - pad_data - end_pad else: start = pad_data end = data_length - pad_data if end > start: data_lengths += [data_length] valid_regions += [segments.segment(start, end)] return data_lengths, valid_regions
python
{ "resource": "" }
q32076
PyCBCTmpltbankExecutable.create_nodata_node
train
def create_nodata_node(self, valid_seg, tags=None): """ A simplified version of create_node that creates a node that does not need to read in data. Parameters ----------- valid_seg : glue.segment The segment over which to declare the node valid. Usually this would be the duration of the analysis. Returns -------- node : pycbc.workflow.core.Node The instance corresponding to the created node. """ if tags is None: tags = [] node = Node(self) # Set the output file # Add the PSD file if needed if self.write_psd: node.new_output_file_opt(valid_seg, '.txt', '--psd-output', tags=tags+['PSD_FILE'], store_file=self.retain_files) node.new_output_file_opt(valid_seg, '.xml.gz', '--output-file', store_file=self.retain_files) if self.psd_files is not None: should_add = False # If any of the ifos for this job are in the set # of ifos for which a static psd was provided. for ifo in self.ifo_list: for psd_file in self.psd_files: if ifo in psd_file.ifo_list: should_add = True if should_add: node.add_input_opt('--psd-file', psd_file) return node
python
{ "resource": "" }
q32077
PycbcSplitBankExecutable.create_node
train
def create_node(self, bank, tags=None): """ Set up a CondorDagmanNode class to run splitbank code Parameters ---------- bank : pycbc.workflow.core.File The File containing the template bank to be split Returns -------- node : pycbc.workflow.core.Node The node to run the job """ if tags is None: tags = [] node = Node(self) node.add_input_opt('--bank-file', bank) # Get the output (taken from inspiral.py) out_files = FileList([]) for i in range( 0, self.num_banks): curr_tag = 'bank%d' %(i) # FIXME: What should the tags actually be? The job.tags values are # currently ignored. curr_tags = bank.tags + [curr_tag] + tags job_tag = bank.description + "_" + self.name.upper() out_file = File(bank.ifo_list, job_tag, bank.segment, extension=self.extension, directory=self.out_dir, tags=curr_tags, store_file=self.retain_files) out_files.append(out_file) node.add_output_list_opt('--output-filenames', out_files) return node
python
{ "resource": "" }
q32078
PycbcCreateInjectionsExecutable.create_node
train
def create_node(self, config_file=None, seed=None, tags=None): """ Set up a CondorDagmanNode class to run ``pycbc_create_injections``. Parameters ---------- config_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for inference configuration file to be used with ``--config-files`` option. seed : int Seed to use for generating injections. tags : list A list of tags to include in filenames. Returns -------- node : pycbc.workflow.core.Node The node to run the job. """ # default for tags is empty list tags = [] if tags is None else tags # get analysis start and end time start_time = self.cp.get("workflow", "start-time") end_time = self.cp.get("workflow", "end-time") analysis_time = segments.segment(int(start_time), int(end_time)) # make node for running executable node = Node(self) node.add_input_opt("--config-file", config_file) if seed: node.add_opt("--seed", seed) injection_file = node.new_output_file_opt(analysis_time, ".hdf", "--output-file", tags=tags) return node, injection_file
python
{ "resource": "" }
q32079
PycbcInferenceExecutable.create_node
train
def create_node(self, channel_names, config_file, injection_file=None, seed=None, fake_strain_seed=None, tags=None): """ Set up a CondorDagmanNode class to run ``pycbc_inference``. Parameters ---------- channel_names : dict A ``dict`` of ``str`` to use for ``--channel-name`` option. config_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for inference configuration file to be used with ``--config-files`` option. injection_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for injection file to be used with ``--injection-file`` option. seed : int An ``int`` to be used with ``--seed`` option. fake_strain_seed : dict An ``int`` to be used with ``--fake-strain-seed`` option. tags : list A list of tags to include in filenames. Returns -------- node : pycbc.workflow.core.Node The node to run the job. """ # default for tags is empty list tags = [] if tags is None else tags # get analysis start and end time start_time = self.cp.get("workflow", "start-time") end_time = self.cp.get("workflow", "end-time") analysis_time = segments.segment(int(start_time), int(end_time)) # get multi-IFO opts channel_names_opt = " ".join(["{}:{}".format(k, v) for k, v in channel_names.iteritems()]) if fake_strain_seed is not None: fake_strain_seed_opt = " ".join([ "{}:{}".format(k, v) for k, v in fake_strain_seed.iteritems()]) # make node for running executable node = Node(self) node.add_opt("--instruments", " ".join(self.ifo_list)) node.add_opt("--gps-start-time", start_time) node.add_opt("--gps-end-time", end_time) node.add_opt("--channel-name", channel_names_opt) node.add_input_opt("--config-file", config_file) if fake_strain_seed is not None: node.add_opt("--fake-strain-seed", fake_strain_seed_opt) if injection_file: node.add_input_opt("--injection-file", injection_file) if seed: node.add_opt("--seed", seed) inference_file = node.new_output_file_opt(analysis_time, ".hdf", "--output-file", tags=tags) if self.cp.has_option("pegasus_profile-inference", "condor|+CheckpointSig"): ckpt_file_name = "{}.checkpoint".format(inference_file.name) ckpt_file = dax.File(ckpt_file_name) node._dax_node.uses(ckpt_file, link=dax.Link.OUTPUT, register=False, transfer=False) return node, inference_file
python
{ "resource": "" }
q32080
ensurearray
train
def ensurearray(*args): """Apply numpy's broadcast rules to the given arguments. This will ensure that all of the arguments are numpy arrays and that they all have the same shape. See ``numpy.broadcast_arrays`` for more details. It also returns a boolean indicating whether any of the inputs were originally arrays. Parameters ---------- *args : The arguments to check. Returns ------- list : A list with length ``N+1`` where ``N`` is the number of given arguments. The first N values are the input arguments as ``ndarrays``s. The last value is a boolean indicating whether any of the inputs was an array. """ input_is_array = any(isinstance(arg, numpy.ndarray) for arg in args) args = numpy.broadcast_arrays(*args) args.append(input_is_array) return args
python
{ "resource": "" }
q32081
_mass2_from_mchirp_mass1
train
def _mass2_from_mchirp_mass1(mchirp, mass1): r"""Returns the secondary mass from the chirp mass and primary mass. As this is a cubic equation this requires finding the roots and returning the one that is real. Basically it can be shown that: .. math:: m_2^3 - a(m_2 + m_1) = 0, where .. math:: a = \frac{\mathcal{M}^5}{m_1^3}. This has 3 solutions but only one will be real. """ a = mchirp**5 / mass1**3 roots = numpy.roots([1,0,-a,-a*mass1]) # Find the real one real_root = roots[(abs(roots - roots.real)).argmin()] return real_root.real
python
{ "resource": "" }
q32082
_mass_from_knownmass_eta
train
def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False, force_real=True): r"""Returns the other component mass given one of the component masses and the symmetric mass ratio. This requires finding the roots of the quadratic equation: .. math:: \eta m_2^2 + (2\eta - 1)m_1 m_2 + \eta m_1^2 = 0. This has two solutions which correspond to :math:`m_1` being the heavier mass or it being the lighter mass. By default, `known_mass` is assumed to be the heavier (primary) mass, and the smaller solution is returned. Use the `other_is_secondary` to invert. Parameters ---------- known_mass : float The known component mass. eta : float The symmetric mass ratio. known_is_secondary : {False, bool} Whether the known component mass is the primary or the secondary. If True, `known_mass` is assumed to be the secondary (lighter) mass and the larger solution is returned. Otherwise, the smaller solution is returned. Default is False. force_real : {True, bool} Force the returned mass to be real. Returns ------- float The other component mass. """ roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.]) if force_real: roots = numpy.real(roots) if known_is_secondary: return roots[roots.argmax()] else: return roots[roots.argmin()]
python
{ "resource": "" }
q32083
mass2_from_mass1_eta
train
def mass2_from_mass1_eta(mass1, eta, force_real=True): """Returns the secondary mass from the primary mass and symmetric mass ratio. """ return mass_from_knownmass_eta(mass1, eta, known_is_secondary=False, force_real=force_real)
python
{ "resource": "" }
q32084
mass1_from_mass2_eta
train
def mass1_from_mass2_eta(mass2, eta, force_real=True): """Returns the primary mass from the secondary mass and symmetric mass ratio. """ return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True, force_real=force_real)
python
{ "resource": "" }
q32085
lambda_tilde
train
def lambda_tilde(mass1, mass2, lambda1, lambda2): """ The effective lambda parameter The mass-weighted dominant effective lambda parameter defined in https://journals.aps.org/prd/pdf/10.1103/PhysRevD.91.043002 """ m1, m2, lambda1, lambda2, input_is_array = ensurearray( mass1, mass2, lambda1, lambda2) lsum = lambda1 + lambda2 ldiff, _ = ensurearray(lambda1 - lambda2) mask = m1 < m2 ldiff[mask] = -ldiff[mask] eta = eta_from_mass1_mass2(m1, m2) p1 = (lsum) * (1 + 7. * eta - 31 * eta ** 2.0) p2 = (1 - 4 * eta)**0.5 * (1 + 9 * eta - 11 * eta ** 2.0) * (ldiff) return formatreturn(8.0 / 13.0 * (p1 + p2), input_is_array)
python
{ "resource": "" }
q32086
chi_eff
train
def chi_eff(mass1, mass2, spin1z, spin2z): """Returns the effective spin from mass1, mass2, spin1z, and spin2z.""" return (spin1z * mass1 + spin2z * mass2) / (mass1 + mass2)
python
{ "resource": "" }
q32087
chi_a
train
def chi_a(mass1, mass2, spin1z, spin2z): """ Returns the aligned mass-weighted spin difference from mass1, mass2, spin1z, and spin2z. """ return (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1)
python
{ "resource": "" }
q32088
chi_p
train
def chi_p(mass1, mass2, spin1x, spin1y, spin2x, spin2y): """Returns the effective precession spin from mass1, mass2, spin1x, spin1y, spin2x, and spin2y. """ xi1 = secondary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y) xi2 = primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y) return chi_p_from_xi1_xi2(xi1, xi2)
python
{ "resource": "" }
q32089
phi_a
train
def phi_a(mass1, mass2, spin1x, spin1y, spin2x, spin2y): """ Returns the angle between the in-plane perpendicular spins.""" phi1 = phi_from_spinx_spiny(primary_spin(mass1, mass2, spin1x, spin2x), primary_spin(mass1, mass2, spin1y, spin2y)) phi2 = phi_from_spinx_spiny(secondary_spin(mass1, mass2, spin1x, spin2x), secondary_spin(mass1, mass2, spin1y, spin2y)) return (phi1 - phi2) % (2 * numpy.pi)
python
{ "resource": "" }
q32090
phi_s
train
def phi_s(spin1x, spin1y, spin2x, spin2y): """ Returns the sum of the in-plane perpendicular spins.""" phi1 = phi_from_spinx_spiny(spin1x, spin1y) phi2 = phi_from_spinx_spiny(spin2x, spin2y) return (phi1 + phi2) % (2 * numpy.pi)
python
{ "resource": "" }
q32091
chi_eff_from_spherical
train
def chi_eff_from_spherical(mass1, mass2, spin1_a, spin1_polar, spin2_a, spin2_polar): """Returns the effective spin using spins in spherical coordinates.""" spin1z = spin1_a * numpy.cos(spin1_polar) spin2z = spin2_a * numpy.cos(spin2_polar) return chi_eff(mass1, mass2, spin1z, spin2z)
python
{ "resource": "" }
q32092
chi_p_from_spherical
train
def chi_p_from_spherical(mass1, mass2, spin1_a, spin1_azimuthal, spin1_polar, spin2_a, spin2_azimuthal, spin2_polar): """Returns the effective precession spin using spins in spherical coordinates. """ spin1x, spin1y, _ = _spherical_to_cartesian( spin1_a, spin1_azimuthal, spin1_polar) spin2x, spin2y, _ = _spherical_to_cartesian( spin2_a, spin2_azimuthal, spin2_polar) return chi_p(mass1, mass2, spin1x, spin1y, spin2x, spin2y)
python
{ "resource": "" }
q32093
primary_spin
train
def primary_spin(mass1, mass2, spin1, spin2): """Returns the dimensionless spin of the primary mass.""" mass1, mass2, spin1, spin2, input_is_array = ensurearray( mass1, mass2, spin1, spin2) sp = copy.copy(spin1) mask = mass1 < mass2 sp[mask] = spin2[mask] return formatreturn(sp, input_is_array)
python
{ "resource": "" }
q32094
secondary_spin
train
def secondary_spin(mass1, mass2, spin1, spin2): """Returns the dimensionless spin of the secondary mass.""" mass1, mass2, spin1, spin2, input_is_array = ensurearray( mass1, mass2, spin1, spin2) ss = copy.copy(spin2) mask = mass1 < mass2 ss[mask] = spin1[mask] return formatreturn(ss, input_is_array)
python
{ "resource": "" }
q32095
primary_xi
train
def primary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y): """Returns the effective precession spin argument for the larger mass. """ spinx = primary_spin(mass1, mass2, spin1x, spin2x) spiny = primary_spin(mass1, mass2, spin1y, spin2y) return chi_perp_from_spinx_spiny(spinx, spiny)
python
{ "resource": "" }
q32096
secondary_xi
train
def secondary_xi(mass1, mass2, spin1x, spin1y, spin2x, spin2y): """Returns the effective precession spin argument for the smaller mass. """ spinx = secondary_spin(mass1, mass2, spin1x, spin2x) spiny = secondary_spin(mass1, mass2, spin1y, spin2y) return xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spinx, spiny)
python
{ "resource": "" }
q32097
xi2_from_mass1_mass2_spin2x_spin2y
train
def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y): """Returns the effective precession spin argument for the smaller mass. This function assumes it's given spins of the secondary mass. """ q = q_from_mass1_mass2(mass1, mass2) a1 = 2 + 3 * q / 2 a2 = 2 + 3 / (2 * q) return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y)
python
{ "resource": "" }
q32098
chi_perp_from_mass1_mass2_xi2
train
def chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2): """Returns the in-plane spin from mass1, mass2, and xi2 for the secondary mass. """ q = q_from_mass1_mass2(mass1, mass2) a1 = 2 + 3 * q / 2 a2 = 2 + 3 / (2 * q) return q**2 * a2 / a1 * xi2
python
{ "resource": "" }
q32099
chi_p_from_xi1_xi2
train
def chi_p_from_xi1_xi2(xi1, xi2): """Returns effective precession spin from xi1 and xi2. """ xi1, xi2, input_is_array = ensurearray(xi1, xi2) chi_p = copy.copy(xi1) mask = xi1 < xi2 chi_p[mask] = xi2[mask] return formatreturn(chi_p, input_is_array)
python
{ "resource": "" }