_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q32100
phi_from_spinx_spiny
train
def phi_from_spinx_spiny(spinx, spiny): """Returns the angle between the x-component axis and the in-plane spin. """ phi = numpy.arctan2(spiny, spinx) return phi % (2 * numpy.pi)
python
{ "resource": "" }
q32101
spin1z_from_mass1_mass2_chi_eff_chi_a
train
def spin1z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, chi_eff, chi_a): """Returns spin1z. """ return (mass1 + mass2) / (2.0 * mass1) * (chi_eff - chi_a)
python
{ "resource": "" }
q32102
spin2z_from_mass1_mass2_chi_eff_chi_a
train
def spin2z_from_mass1_mass2_chi_eff_chi_a(mass1, mass2, chi_eff, chi_a): """Returns spin2z. """ return (mass1 + mass2) / (2.0 * mass2) * (chi_eff + chi_a)
python
{ "resource": "" }
q32103
spin1x_from_xi1_phi_a_phi_s
train
def spin1x_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s): """Returns x-component spin for primary mass. """ phi1 = phi1_from_phi_a_phi_s(phi_a, phi_s) return xi1 * numpy.cos(phi1)
python
{ "resource": "" }
q32104
spin1y_from_xi1_phi_a_phi_s
train
def spin1y_from_xi1_phi_a_phi_s(xi1, phi_a, phi_s): """Returns y-component spin for primary mass. """ phi1 = phi1_from_phi_a_phi_s(phi_s, phi_a) return xi1 * numpy.sin(phi1)
python
{ "resource": "" }
q32105
spin2x_from_mass1_mass2_xi2_phi_a_phi_s
train
def spin2x_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s): """Returns x-component spin for secondary mass. """ chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2) phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s) return chi_perp * numpy.cos(phi2)
python
{ "resource": "" }
q32106
spin2y_from_mass1_mass2_xi2_phi_a_phi_s
train
def spin2y_from_mass1_mass2_xi2_phi_a_phi_s(mass1, mass2, xi2, phi_a, phi_s): """Returns y-component spin for secondary mass. """ chi_perp = chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2) phi2 = phi2_from_phi_a_phi_s(phi_a, phi_s) return chi_perp * numpy.sin(phi2)
python
{ "resource": "" }
q32107
dquadmon_from_lambda
train
def dquadmon_from_lambda(lambdav): r"""Return the quadrupole moment of a neutron star given its lambda We use the relations defined here. https://arxiv.org/pdf/1302.4499.pdf. Note that the convention we use is that: .. math:: \mathrm{dquadmon} = \bar{Q} - 1. Where :math:`\bar{Q}` (dimensionless) is the reduced quadrupole moment. """ ll = numpy.log(lambdav) ai = .194 bi = .0936 ci = 0.0474 di = -4.21 * 10**-3.0 ei = 1.23 * 10**-4.0 ln_quad_moment = ai + bi*ll + ci*ll**2.0 + di*ll**3.0 + ei*ll**4.0 return numpy.exp(ln_quad_moment) - 1
python
{ "resource": "" }
q32108
_det_tc
train
def _det_tc(detector_name, ra, dec, tc, ref_frame='geocentric'): """Returns the coalescence time of a signal in the given detector. Parameters ---------- detector_name : string The name of the detector, e.g., 'H1'. ra : float The right ascension of the signal, in radians. dec : float The declination of the signal, in radians. tc : float The GPS time of the coalescence of the signal in the `ref_frame`. ref_frame : {'geocentric', string} The reference frame that the given coalescence time is defined in. May specify 'geocentric', or a detector name; default is 'geocentric'. Returns ------- float : The GPS time of the coalescence in detector `detector_name`. """ if ref_frame == detector_name: return tc detector = Detector(detector_name) if ref_frame == 'geocentric': return tc + detector.time_delay_from_earth_center(ra, dec, tc) else: other = Detector(ref_frame) return tc + detector.time_delay_from_detector(other, ra, dec, tc)
python
{ "resource": "" }
q32109
_optimal_orientation_from_detector
train
def _optimal_orientation_from_detector(detector_name, tc): """ Low-level function to be called from _optimal_dec_from_detector and _optimal_ra_from_detector""" d = Detector(detector_name) ra, dec = d.optimal_orientation(tc) return ra, dec
python
{ "resource": "" }
q32110
_genqnmfreq
train
def _genqnmfreq(mass, spin, l, m, nmodes, qnmfreq=None): """Convenience function to generate QNM frequencies from lalsimulation. Parameters ---------- mass : float The mass of the black hole (in solar masses). spin : float The dimensionless spin of the black hole. l : int l-index of the harmonic. m : int m-index of the harmonic. nmodes : int The number of overtones to generate. qnmfreq : lal.COMPLEX16Vector, optional LAL vector to write the results into. Must be the same length as ``nmodes``. If None, will create one. Returns ------- lal.COMPLEX16Vector LAL vector containing the complex QNM frequencies. """ if qnmfreq is None: qnmfreq = lal.CreateCOMPLEX16Vector(int(nmodes)) lalsim.SimIMREOBGenerateQNMFreqV2fromFinal( qnmfreq, float(mass), float(spin), int(l), int(m), int(nmodes)) return qnmfreq
python
{ "resource": "" }
q32111
get_lm_f0tau
train
def get_lm_f0tau(mass, spin, l, m, nmodes): """Return the f0 and the tau of each overtone for a given l, m mode. Parameters ---------- mass : float or array Mass of the black hole (in solar masses). spin : float or array Dimensionless spin of the final black hole. l : int or array l-index of the harmonic. m : int or array m-index of the harmonic. nmodes : int The number of overtones to generate. Returns ------- f0 : float or array The frequency of the QNM(s), in Hz. If only a single mode is requested (and mass, spin, l, and m are not arrays), this will be a float. If multiple modes requested, will be an array with shape ``[input shape x] nmodes``, where ``input shape`` is the broadcasted shape of the inputs. tau : float or array The damping time of the QNM(s), in seconds. Return type is same as f0. """ # convert to arrays mass, spin, l, m, input_is_array = ensurearray( mass, spin, l, m) # we'll ravel the arrays so we can evaluate each parameter combination # one at a a time origshape = mass.shape if nmodes < 1: raise ValueError("nmodes must be >= 1") if nmodes > 1: newshape = tuple(list(origshape)+[nmodes]) else: newshape = origshape f0s = numpy.zeros((mass.size, nmodes)) taus = numpy.zeros((mass.size, nmodes)) mass = mass.ravel() spin = spin.ravel() l = l.ravel() m = m.ravel() qnmfreq = None modes = range(nmodes) for ii in range(mass.size): qnmfreq = _genqnmfreq(mass[ii], spin[ii], l[ii], m[ii], nmodes, qnmfreq=qnmfreq) f0s[ii, :] = [qnmfreq.data[n].real/(2 * numpy.pi) for n in modes] taus[ii, :] = [1./qnmfreq.data[n].imag for n in modes] f0s = f0s.reshape(newshape) taus = taus.reshape(newshape) return (formatreturn(f0s, input_is_array), formatreturn(taus, input_is_array))
python
{ "resource": "" }
q32112
get_lm_f0tau_allmodes
train
def get_lm_f0tau_allmodes(mass, spin, modes): """Returns a dictionary of all of the frequencies and damping times for the requested modes. Parameters ---------- mass : float or array Mass of the black hole (in solar masses). spin : float or array Dimensionless spin of the final black hole. modes : list of str The modes to get. Each string in the list should be formatted 'lmN', where l (m) is the l (m) index of the harmonic and N is the number of overtones to generate (note, N is not the index of the overtone). For example, '221' will generate the 0th overtone of the l = m = 2 mode. Returns ------- f0 : dict Dictionary mapping the modes to the frequencies. The dictionary keys are 'lmn' string, where l (m) is the l (m) index of the harmonic and n is the index of the overtone. For example, '220' is the l = m = 2 mode and the 0th overtone. tau : dict Dictionary mapping the modes to the damping times. The keys are the same as ``f0``. """ f0, tau = {}, {} key = '{}{}{}' for lmn in modes: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) tmp_f0, tmp_tau = get_lm_f0tau(mass, spin, l, m, nmodes) if nmodes == 1: # in this case, tmp_f0 and tmp_tau will just be floats f0[key.format(l, m, '0')] = tmp_f0 tau[key.format(l, m, '0')] = tmp_tau else: for n in range(nmodes): # we need to wrap tmp_f0 with formatreturn to ensure that if # only a mass, spin pair was requested, the value stored to # the dict is a float f0[key.format(l, m, n)] = formatreturn(tmp_f0[..., n]) tau[key.format(l, m, n)] = formatreturn(tmp_tau[..., n]) return f0, tau
python
{ "resource": "" }
q32113
freq_from_final_mass_spin
train
def freq_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1): """Returns QNM frequency for the given mass and spin and mode. Parameters ---------- final_mass : float or array Mass of the black hole (in solar masses). final_spin : float or array Dimensionless spin of the final black hole. l : int or array, optional l-index of the harmonic. Default is 2. m : int or array, optional m-index of the harmonic. Default is 2. nmodes : int, optional The number of overtones to generate. Default is 1. Returns ------- float or array The frequency of the QNM(s), in Hz. If only a single mode is requested (and mass, spin, l, and m are not arrays), this will be a float. If multiple modes requested, will be an array with shape ``[input shape x] nmodes``, where ``input shape`` is the broadcasted shape of the inputs. """ return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[0]
python
{ "resource": "" }
q32114
tau_from_final_mass_spin
train
def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1): """Returns QNM damping time for the given mass and spin and mode. Parameters ---------- final_mass : float or array Mass of the black hole (in solar masses). final_spin : float or array Dimensionless spin of the final black hole. l : int or array, optional l-index of the harmonic. Default is 2. m : int or array, optional m-index of the harmonic. Default is 2. nmodes : int, optional The number of overtones to generate. Default is 1. Returns ------- float or array The damping time of the QNM(s), in seconds. If only a single mode is requested (and mass, spin, l, and m are not arrays), this will be a float. If multiple modes requested, will be an array with shape ``[input shape x] nmodes``, where ``input shape`` is the broadcasted shape of the inputs. """ return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[1]
python
{ "resource": "" }
q32115
final_spin_from_f0_tau
train
def final_spin_from_f0_tau(f0, tau, l=2, m=2): """Returns the final spin based on the given frequency and damping time. .. note:: Currently, only l = m = 2 is supported. Any other indices will raise a ``KeyError``. Parameters ---------- f0 : float or array Frequency of the QNM (in Hz). tau : float or array Damping time of the QNM (in seconds). l : int, optional l-index of the harmonic. Default is 2. m : int, optional m-index of the harmonic. Default is 2. Returns ------- float or array The spin of the final black hole. If the combination of frequency and damping times give an unphysical result, ``numpy.nan`` will be returned. """ f0, tau, input_is_array = ensurearray(f0, tau) # from Berti et al. 2006 a, b, c = _berti_spin_constants[l,m] origshape = f0.shape # flatten inputs for storing results f0 = f0.ravel() tau = tau.ravel() spins = numpy.zeros(f0.size) for ii in range(spins.size): Q = f0[ii] * tau[ii] * numpy.pi try: s = 1. - ((Q-a)/b)**(1./c) except ValueError: s = numpy.nan spins[ii] = s spins = spins.reshape(origshape) return formatreturn(spins, input_is_array)
python
{ "resource": "" }
q32116
get_final_from_initial
train
def get_final_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0., spin2x=0., spin2y=0., spin2z=0., approximant='SEOBNRv4'): """Estimates the final mass and spin from the given initial parameters. This uses the fits used by the EOBNR models for converting from initial parameters to final. Which version used can be controlled by the ``approximant`` argument. Parameters ---------- mass1 : float The mass of one of the components, in solar masses. mass2 : float The mass of the other component, in solar masses. spin1x : float, optional The dimensionless x-component of the spin of mass1. Default is 0. spin1y : float, optional The dimensionless y-component of the spin of mass1. Default is 0. spin1z : float, optional The dimensionless z-component of the spin of mass1. Default is 0. spin2x : float, optional The dimensionless x-component of the spin of mass2. Default is 0. spin2y : float, optional The dimensionless y-component of the spin of mass2. Default is 0. spin2z : float, optional The dimensionless z-component of the spin of mass2. Default is 0. approximant : str, optional The waveform approximant to use for the fit function. Default is "SEOBNRv4". Returns ------- final_mass : float The final mass, in solar masses. final_spin : float The dimensionless final spin. """ args = (mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z) args = ensurearray(*args) input_is_array = args[-1] origshape = args[0].shape # flatten inputs for storing results args = [a.ravel() for a in args[:-1]] mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z = args final_mass = numpy.zeros(mass1.shape) final_spin = numpy.zeros(mass1.shape) for ii in range(final_mass.size): m1 = mass1[ii] m2 = mass2[ii] spin1 = [spin1x[ii], spin1y[ii], spin1z[ii]] spin2 = [spin2x[ii], spin2y[ii], spin2z[ii]] _, fm, fs = lalsim.SimIMREOBFinalMassSpin(m1, m2, spin1, spin2, getattr(lalsim, approximant)) final_mass[ii] = fm * (m1 + m2) final_spin[ii] = fs final_mass = final_mass.reshape(origshape) final_spin = final_spin.reshape(origshape) return (formatreturn(final_mass, input_is_array), formatreturn(final_spin, input_is_array))
python
{ "resource": "" }
q32117
final_mass_from_initial
train
def final_mass_from_initial(mass1, mass2, spin1x=0., spin1y=0., spin1z=0., spin2x=0., spin2y=0., spin2z=0., approximant='SEOBNRv4'): """Estimates the final mass from the given initial parameters. This uses the fits used by the EOBNR models for converting from initial parameters to final. Which version used can be controlled by the ``approximant`` argument. Parameters ---------- mass1 : float The mass of one of the components, in solar masses. mass2 : float The mass of the other component, in solar masses. spin1x : float, optional The dimensionless x-component of the spin of mass1. Default is 0. spin1y : float, optional The dimensionless y-component of the spin of mass1. Default is 0. spin1z : float, optional The dimensionless z-component of the spin of mass1. Default is 0. spin2x : float, optional The dimensionless x-component of the spin of mass2. Default is 0. spin2y : float, optional The dimensionless y-component of the spin of mass2. Default is 0. spin2z : float, optional The dimensionless z-component of the spin of mass2. Default is 0. approximant : str, optional The waveform approximant to use for the fit function. Default is "SEOBNRv4". Returns ------- float The final mass, in solar masses. """ return get_final_from_initial(mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z, approximant)[0]
python
{ "resource": "" }
q32118
nltides_gw_phase_diff_isco
train
def nltides_gw_phase_diff_isco(f_low, f0, amplitude, n, m1, m2): """Calculate the gravitational-wave phase shift bwtween f_low and f_isco due to non-linear tides. Parameters ---------- f_low: float Frequency from which to compute phase. If the other arguments are passed as numpy arrays then the value of f_low is duplicated for all elements in the array f0: float or numpy.array Frequency that NL effects switch on amplitude: float or numpy.array Amplitude of effect n: float or numpy.array Growth dependence of effect m1: float or numpy.array Mass of component 1 m2: float or numpy.array Mass of component 2 Returns ------- delta_phi: float or numpy.array Phase in radians """ f0, amplitude, n, m1, m2, input_is_array = ensurearray( f0, amplitude, n, m1, m2) f_low = numpy.zeros(m1.shape) + f_low phi_l = nltides_gw_phase_difference( f_low, f0, amplitude, n, m1, m2) f_isco = f_schwarzchild_isco(m1+m2) phi_i = nltides_gw_phase_difference( f_isco, f0, amplitude, n, m1, m2) return formatreturn(phi_i - phi_l, input_is_array)
python
{ "resource": "" }
q32119
EmceeEnsembleSampler.model_stats
train
def model_stats(self): """A dict mapping the model's ``default_stats`` to arrays of values. The returned array has shape ``nwalkers x niterations``. """ stats = self.model.default_stats return blob_data_to_dict(stats, self._sampler.blobs)
python
{ "resource": "" }
q32120
EmceeEnsembleSampler.set_state_from_file
train
def set_state_from_file(self, filename): """Sets the state of the sampler back to the instance saved in a file. """ with self.io(filename, 'r') as fp: rstate = fp.read_random_state() # set the numpy random state numpy.random.set_state(rstate) # set emcee's generator to the same state self._sampler.random_state = rstate
python
{ "resource": "" }
q32121
EmceeEnsembleSampler.write_results
train
def write_results(self, filename): """Writes samples, model stats, acceptance fraction, and random state to the given file. Parameters ----------- filename : str The file to write to. The file is opened using the ``io`` class in an an append state. """ with self.io(filename, 'a') as fp: # write samples fp.write_samples(self.samples, self.model.variable_params, last_iteration=self.niterations) # write stats fp.write_samples(self.model_stats, last_iteration=self.niterations) # write accpetance fp.write_acceptance_fraction(self._sampler.acceptance_fraction) # write random state fp.write_random_state(state=self._sampler.random_state)
python
{ "resource": "" }
q32122
default_empty
train
def default_empty(shape, dtype): """Numpy's empty array can have random values in it. To prevent that, we define here a default emtpy array. This default empty is a numpy.zeros array, except that objects are set to None, and all ints to ID_NOT_SET. """ default = numpy.zeros(shape, dtype=dtype) set_default_empty(default) return default
python
{ "resource": "" }
q32123
lstring_as_obj
train
def lstring_as_obj(true_or_false=None): """Toggles whether lstrings should be treated as strings or as objects. When FieldArrays is first loaded, the default is True. Parameters ---------- true_or_false : {None|bool} Pass True to map lstrings to objects; False otherwise. If None provided, just returns the current state. Return ------ current_stat : bool The current state of lstring_as_obj. Examples -------- >>> from pycbc.io import FieldArray >>> FieldArray.lstring_as_obj() True >>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')]) FieldArray([(0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,), (0.0,)], dtype=[('foo', 'O')]) >>> FieldArray.lstring_as_obj(False) False >>> FieldArray.FieldArray.from_arrays([numpy.zeros(10)], dtype=[('foo', 'lstring')]) FieldArray([('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',), ('0.0',)], dtype=[('foo', 'S50')]) """ if true_or_false is not None: _default_types_status['lstring_as_obj'] = true_or_false # update the typeDict numpy.typeDict[u'lstring'] = numpy.object_ \ if _default_types_status['lstring_as_obj'] \ else 'S%i' % _default_types_status['default_strlen'] return _default_types_status['lstring_as_obj']
python
{ "resource": "" }
q32124
get_needed_fieldnames
train
def get_needed_fieldnames(arr, names): """Given a FieldArray-like array and a list of names, determines what fields are needed from the array so that using the names does not result in an error. Parameters ---------- arr : instance of a FieldArray or similar The array from which to determine what fields to get. names : (list of) strings A list of the names that are desired. The names may be either a field, a virtualfield, a property, a method of ``arr``, or any function of these. If a virtualfield/property or a method, the source code of that property/method will be analyzed to pull out what fields are used in it. Returns ------- set The set of the fields needed to evaluate the names. """ fieldnames = set([]) # we'll need the class that the array is an instance of to evaluate some # things cls = arr.__class__ if isinstance(names, string_types): names = [names] # parse names for variables, incase some of them are functions of fields parsed_names = set([]) for name in names: parsed_names.update(get_fields_from_arg(name)) # only include things that are in the array's namespace names = list(parsed_names & (set(dir(arr)) | set(arr.fieldnames))) for name in names: if name in arr.fieldnames: # is a field, just add the name fieldnames.update([name]) else: # the name is either a virtualfield, a method, or some other # property; we need to evaluate the source code to figure out what # fields we need try: # the underlying functions of properties need to be retrieved # using their fget attribute func = getattr(cls, name).fget except AttributeError: # no fget attribute, assume is an instance method func = getattr(arr, name) # evaluate the source code of the function try: sourcecode = inspect.getsource(func) except TypeError: # not a function, just pass continue # evaluate the source code for the fields possible_fields = get_instance_fields_from_arg(sourcecode) # some of the variables returned by possible fields may themselves # be methods/properties that depend on other fields. For instance, # mchirp relies on eta and mtotal, which each use mass1 and mass2; # we therefore need to anayze each of the possible fields fieldnames.update(get_needed_fieldnames(arr, possible_fields)) return fieldnames
python
{ "resource": "" }
q32125
combine_fields
train
def combine_fields(dtypes): """Combines the fields in the list of given dtypes into a single dtype. Parameters ---------- dtypes : (list of) numpy.dtype(s) Either a numpy.dtype, or a list of numpy.dtypes. Returns ------- numpy.dtype A new dtype combining the fields in the list of dtypes. """ if not isinstance(dtypes, list): dtypes = [dtypes] # Note: incase any of the dtypes have offsets, we won't include any fields # that have no names and are void new_dt = numpy.dtype([dt for dtype in dtypes \ for dt in get_dtype_descr(dtype)]) return new_dt
python
{ "resource": "" }
q32126
_ensure_array_list
train
def _ensure_array_list(arrays): """Ensures that every element in a list is an instance of a numpy array.""" # Note: the isinstance test is needed below so that instances of FieldArray # are not converted to numpy arrays return [numpy.array(arr, ndmin=1) if not isinstance(arr, numpy.ndarray) else arr for arr in arrays]
python
{ "resource": "" }
q32127
merge_arrays
train
def merge_arrays(merge_list, names=None, flatten=True, outtype=None): """Merges the given arrays into a single array. The arrays must all have the same shape. If one or more of the given arrays has multiple fields, all of the fields will be included as separate fields in the new array. Parameters ---------- merge_list : list of arrays The list of arrays to merge. names : {None | sequence of strings} Optional, the names of the fields in the output array. If flatten is True, must be the same length as the total number of fields in merge_list. Otherise, must be the same length as the number of arrays in merge_list. If None provided, and flatten is True, names used will be the same as the name of the fields in the given arrays. If the datatype has no name, or flatten is False, the new field will be `fi` where i is the index of the array in arrays. flatten : bool Make all of the fields in the given arrays separate fields in the new array. Otherwise, each array will be added as a field. If an array has fields, they will be subfields in the output array. Default is True. outtype : {None | class} Cast the new array to the given type. Default is to return a numpy structured array. Returns ------- new array : {numpy.ndarray | outtype} A new array with all of the fields in all of the arrays merged into a single array. """ # make sure everything in merge_list is an array merge_list = _ensure_array_list(merge_list) if not all(merge_list[0].shape == arr.shape for arr in merge_list): raise ValueError("all of the arrays in merge_list must have the " + "same shape") if flatten: new_dt = combine_fields([arr.dtype for arr in merge_list]) else: new_dt = numpy.dtype([('f%i' %ii, arr.dtype.descr) \ for ii,arr in enumerate(merge_list)]) new_arr = merge_list[0].__class__(merge_list[0].shape, dtype=new_dt) # ii is a counter to keep track of which fields from the new array # go with which arrays in merge list ii = 0 for arr in merge_list: if arr.dtype.names is None: new_arr[new_dt.names[ii]] = arr ii += 1 else: for field in arr.dtype.names: new_arr[field] = arr[field] ii += 1 # set the names if desired if names is not None: new_arr.dtype.names = names # ditto the outtype if outtype is not None: new_arr = new_arr.view(type=outtype) return new_arr
python
{ "resource": "" }
q32128
_isstring
train
def _isstring(dtype): """Given a numpy dtype, determines whether it is a string. Returns True if the dtype is string or unicode. """ return dtype.type == numpy.unicode_ or dtype.type == numpy.string_
python
{ "resource": "" }
q32129
fields_from_names
train
def fields_from_names(fields, names=None): """Given a dictionary of fields and a list of names, will return a dictionary consisting of the fields specified by names. Names can be either the names of fields, or their aliases. """ if names is None: return fields if isinstance(names, string_types): names = [names] aliases_to_names = aliases_from_fields(fields) names_to_aliases = dict(zip(aliases_to_names.values(), aliases_to_names.keys())) outfields = {} for name in names: try: outfields[name] = fields[name] except KeyError: if name in aliases_to_names: key = (name, aliases_to_names[name]) elif name in names_to_aliases: key = (names_to_aliases[name], name) else: raise KeyError('default fields has no field %s' % name) outfields[key] = fields[key] return outfields
python
{ "resource": "" }
q32130
FieldArray.sort
train
def sort(self, axis=-1, kind='quicksort', order=None): """Sort an array, in-place. This function extends the standard numpy record array in-place sort to allow the basic use of Field array virtual fields. Only a single field is currently supported when referencing a virtual field. Parameters ---------- axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. Default is 'quicksort'. order : list, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. """ try: numpy.recarray.sort(self, axis=axis, kind=kind, order=order) except ValueError: if isinstance(order, list): raise ValueError("Cannot process more than one order field") self[:] = self[numpy.argsort(self[order])]
python
{ "resource": "" }
q32131
FieldArray.addattr
train
def addattr(self, attrname, value=None, persistent=True): """Adds an attribute to self. If persistent is True, the attribute will be made a persistent attribute. Persistent attributes are copied whenever a view or copy of this array is created. Otherwise, new views or copies of this will not have the attribute. """ setattr(self, attrname, value) # add as persistent if persistent and attrname not in self.__persistent_attributes__: self.__persistent_attributes__.append(attrname)
python
{ "resource": "" }
q32132
FieldArray.add_properties
train
def add_properties(self, names, methods): """Returns a view of self with the given methods added as properties. From: <http://stackoverflow.com/a/2954373/1366472>. """ cls = type(self) cls = type(cls.__name__, (cls,), dict(cls.__dict__)) if isinstance(names, string_types): names = [names] methods = [methods] for name,method in zip(names, methods): setattr(cls, name, property(method)) return self.view(type=cls)
python
{ "resource": "" }
q32133
FieldArray.add_virtualfields
train
def add_virtualfields(self, names, methods): """Returns a view of this array with the given methods added as virtual fields. Specifically, the given methods are added using add_properties and their names are added to the list of virtual fields. Virtual fields are properties that are assumed to operate on one or more of self's fields, thus returning an array of values. """ if isinstance(names, string_types): names = [names] methods = [methods] out = self.add_properties(names, methods) if out._virtualfields is None: out._virtualfields = [] out._virtualfields.extend(names) return out
python
{ "resource": "" }
q32134
FieldArray.add_functions
train
def add_functions(self, names, functions): """Adds the given functions to the function library. Functions are added to this instance of the array; all copies of and slices of this array will also have the new functions included. Parameters ---------- names : (list of) string(s) Name or list of names of the functions. functions : (list of) function(s) The function(s) to call. """ if isinstance(names, string_types): names = [names] functions = [functions] if len(functions) != len(names): raise ValueError("number of provided names must be same as number " "of functions") self._functionlib.update(dict(zip(names, functions)))
python
{ "resource": "" }
q32135
FieldArray.del_functions
train
def del_functions(self, names): """Removes the specified function names from the function library. Functions are removed from this instance of the array; all copies and slices of this array will also have the functions removed. Parameters ---------- names : (list of) string(s) Name or list of names of the functions to remove. """ if isinstance(names, string_types): names = [names] for name in names: self._functionlib.pop(name)
python
{ "resource": "" }
q32136
FieldArray.from_ligolw_table
train
def from_ligolw_table(cls, table, columns=None, cast_to_dtypes=None): """Converts the given ligolw table into an FieldArray. The `tableName` attribute is copied to the array's `name`. Parameters ---------- table : LIGOLw table instance The table to convert. columns : {None|list} Optionally specify a list of columns to retrieve. All of the columns must be in the table's validcolumns attribute. If None provided, all the columns in the table will be converted. dtype : {None | dict} Override the columns' dtypes using the given dictionary. The dictionary should be keyed by the column names, with the values a tuple that can be understood by numpy.dtype. For example, to cast a ligolw column called "foo" to a field called "bar" with type float, cast_to_dtypes would be: ``{"foo": ("bar", float)}``. Returns ------- array : FieldArray The input table as an FieldArray. """ name = table.tableName.split(':')[0] if columns is None: # get all the columns columns = table.validcolumns else: # note: this will raise a KeyError if one or more columns is # not in the table's validcolumns new_columns = {} for col in columns: new_columns[col] = table.validcolumns[col] columns = new_columns if cast_to_dtypes is not None: dtype = [cast_to_dtypes[col] for col in columns] else: dtype = columns.items() # get the values if _default_types_status['ilwd_as_int']: input_array = \ [tuple(getattr(row, col) if dt != 'ilwd:char' else int(getattr(row, col)) for col,dt in columns.items()) for row in table] else: input_array = \ [tuple(getattr(row, col) for col in columns) for row in table] # return the values as an instance of cls return cls.from_records(input_array, dtype=dtype, name=name)
python
{ "resource": "" }
q32137
FieldArray.to_array
train
def to_array(self, fields=None, axis=0): """Returns an `numpy.ndarray` of self in which the fields are included as an extra dimension. Parameters ---------- fields : {None, (list of) strings} The fields to get. All of the fields must have the same datatype. If None, will try to return all of the fields. axis : {0, int} Which dimension to put the fields in in the returned array. For example, if `self` has shape `(l,m,n)` and `k` fields, the returned array will have shape `(k,l,m,n)` if `axis=0`, `(l,k,m,n)` if `axis=1`, etc. Setting `axis=-1` will put the fields in the last dimension. Default is 0. Returns ------- numpy.ndarray The desired fields as a numpy array. """ if fields is None: fields = self.fieldnames if isinstance(fields, string_types): fields = [fields] return numpy.stack([self[f] for f in fields], axis=axis)
python
{ "resource": "" }
q32138
FieldArray.virtualfields
train
def virtualfields(self): """Returns a tuple listing the names of virtual fields in self. """ if self._virtualfields is None: vfs = tuple() else: vfs = tuple(self._virtualfields) return vfs
python
{ "resource": "" }
q32139
FieldArray.parse_boolargs
train
def parse_boolargs(self, args): """Returns an array populated by given values, with the indices of those values dependent on given boolen tests on self. The given `args` should be a list of tuples, with the first element the return value and the second argument a string that evaluates to either True or False for each element in self. Each boolean argument is evaluated on elements for which every prior boolean argument was False. For example, if array `foo` has a field `bar`, and `args = [(1, 'bar < 10'), (2, 'bar < 20'), (3, 'bar < 30')]`, then the returned array will have `1`s at the indices for which `foo.bar < 10`, `2`s where `foo.bar < 20 and not foo.bar < 10`, and `3`s where `foo.bar < 30 and not (foo.bar < 10 or foo.bar < 20)`. The last argument in the list may have "else", an empty string, None, or simply list a return value. In any of these cases, any element not yet populated will be assigned the last return value. Parameters ---------- args : {(list of) tuples, value} One or more return values and boolean argument determining where they should go. Returns ------- return_values : array An array with length equal to self, with values populated with the return values. leftover_indices : array An array of indices that evaluated to False for all arguments. These indices will not have been popluated with any value, defaulting to whatever numpy uses for a zero for the return values' dtype. If there are no leftovers, an empty array is returned. Examples -------- Given the following array: >>> arr = FieldArray(5, dtype=[('mtotal', float)]) >>> arr['mtotal'] = numpy.array([3., 5., 2., 1., 4.]) Return `"TaylorF2"` for all elements with `mtotal < 4` (note that the elements 1 and 4 are leftover): >>> arr.parse_boolargs(('TaylorF2', 'mtotal<4')) (array(['TaylorF2', '', 'TaylorF2', 'TaylorF2', ''], dtype='|S8'), array([1, 4])) Return `"TaylorF2"` for all elements with `mtotal < 4`, `"SEOBNR_ROM_DoubleSpin"` otherwise: >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', 'else')]) (array(['TaylorF2', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2', 'SEOBNRv2_ROM_DoubleSpin'], dtype='|S23'), array([], dtype=int64)) The following will also return the same: >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin',)]) >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', '')]) >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin']) Return `"TaylorF2"` for all elements with `mtotal < 3`, `"IMRPhenomD"` for all elements with `3 <= mtotal < 4`, `"SEOBNRv2_ROM_DoubleSpin"` otherwise: >>> arr.parse_boolargs([('TaylorF2', 'mtotal<3'), ('IMRPhenomD', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin']) (array(['IMRPhenomD', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2', 'SEOBNRv2_ROM_DoubleSpin'], dtype='|S23'), array([], dtype=int64)) Just return `"TaylorF2"` for all elements: >>> arr.parse_boolargs('TaylorF2') (array(['TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2'], dtype='|S8'), array([], dtype=int64)) """ if not isinstance(args, list): args = [args] # format the arguments return_vals = [] bool_args = [] for arg in args: if not isinstance(arg, tuple): return_val = arg bool_arg = None elif len(arg) == 1: return_val = arg[0] bool_arg = None elif len(arg) == 2: return_val, bool_arg = arg else: raise ValueError("argument not formatted correctly") return_vals.append(return_val) bool_args.append(bool_arg) # get the output dtype outdtype = numpy.array(return_vals).dtype out = numpy.zeros(self.size, dtype=outdtype) mask = numpy.zeros(self.size, dtype=bool) leftovers = numpy.ones(self.size, dtype=bool) for ii,(boolarg,val) in enumerate(zip(bool_args, return_vals)): if boolarg is None or boolarg == '' or boolarg.lower() == 'else': if ii+1 != len(bool_args): raise ValueError("only the last item may not provide " "any boolean arguments") mask = leftovers else: mask = leftovers & self[boolarg] out[mask] = val leftovers &= ~mask return out, numpy.where(leftovers)[0]
python
{ "resource": "" }
q32140
FieldArray.append
train
def append(self, other): """Appends another array to this array. The returned array will have all of the class methods and virutal fields of this array, including any that were added using `add_method` or `add_virtualfield`. If this array and other array have one or more string fields, the dtype for those fields are updated to a string length that can encompass the longest string in both arrays. .. note:: Increasing the length of strings only works for fields, not sub-fields. Parameters ---------- other : array The array to append values from. It must have the same fields and dtype as this array, modulo the length of strings. If the other array does not have the same dtype, a TypeError is raised. Returns ------- array An array with others values appended to this array's values. The returned array is an instance of the same class as this array, including all methods and virtual fields. """ try: return numpy.append(self, other).view(type=self.__class__) except TypeError: # see if the dtype error was due to string fields having different # lengths; if so, we'll make the joint field the larger of the # two str_fields = [name for name in self.fieldnames if _isstring(self.dtype[name])] # get the larger of the two new_strlens = dict( [[name, max(self.dtype[name].itemsize, other.dtype[name].itemsize)] for name in str_fields] ) # cast both to the new string lengths new_dt = [] for dt in self.dtype.descr: name = dt[0] if name in new_strlens: dt = (name, self.dtype[name].type, new_strlens[name]) new_dt.append(dt) new_dt = numpy.dtype(new_dt) return numpy.append( self.astype(new_dt), other.astype(new_dt) ).view(type=self.__class__)
python
{ "resource": "" }
q32141
_FieldArrayWithDefaults.add_default_fields
train
def add_default_fields(self, names, **kwargs): """ Adds one or more empty default fields to self. Parameters ---------- names : (list of) string(s) The names of the fields to add. Must be a field in self's default fields. Other keyword args are any arguments passed to self's default fields. Returns ------- new array : instance of this array A copy of this array with the field added. """ if isinstance(names, string_types): names = [names] default_fields = self.default_fields(include_virtual=False, **kwargs) # parse out any virtual fields arr = self.__class__(1, field_kwargs=kwargs) # try to perserve order sortdict = dict([[nm, ii] for ii,nm in enumerate(names)]) names = list(get_needed_fieldnames(arr, names)) names.sort(key=lambda x: sortdict[x] if x in sortdict else len(names)) fields = [(name, default_fields[name]) for name in names] arrays = [] names = [] for name,dt in fields: arrays.append(default_empty(self.size, dtype=[(name, dt)])) names.append(name) return self.add_fields(arrays, names)
python
{ "resource": "" }
q32142
WaveformArray.chi_eff
train
def chi_eff(self): """Returns the effective spin.""" return conversions.chi_eff(self.mass1, self.mass2, self.spin1z, self.spin2z)
python
{ "resource": "" }
q32143
WaveformArray.spin_px
train
def spin_px(self): """Returns the x-component of the spin of the primary mass.""" return conversions.primary_spin(self.mass1, self.mass2, self.spin1x, self.spin2x)
python
{ "resource": "" }
q32144
WaveformArray.spin_py
train
def spin_py(self): """Returns the y-component of the spin of the primary mass.""" return conversions.primary_spin(self.mass1, self.mass2, self.spin1y, self.spin2y)
python
{ "resource": "" }
q32145
WaveformArray.spin_pz
train
def spin_pz(self): """Returns the z-component of the spin of the primary mass.""" return conversions.primary_spin(self.mass1, self.mass2, self.spin1z, self.spin2z)
python
{ "resource": "" }
q32146
WaveformArray.spin_sx
train
def spin_sx(self): """Returns the x-component of the spin of the secondary mass.""" return conversions.secondary_spin(self.mass1, self.mass2, self.spin1x, self.spin2x)
python
{ "resource": "" }
q32147
WaveformArray.spin_sy
train
def spin_sy(self): """Returns the y-component of the spin of the secondary mass.""" return conversions.secondary_spin(self.mass1, self.mass2, self.spin1y, self.spin2y)
python
{ "resource": "" }
q32148
WaveformArray.spin_sz
train
def spin_sz(self): """Returns the z-component of the spin of the secondary mass.""" return conversions.secondary_spin(self.mass1, self.mass2, self.spin1z, self.spin2z)
python
{ "resource": "" }
q32149
WaveformArray.spin1_a
train
def spin1_a(self): """Returns the dimensionless spin magnitude of mass 1.""" return coordinates.cartesian_to_spherical_rho( self.spin1x, self.spin1y, self.spin1z)
python
{ "resource": "" }
q32150
WaveformArray.spin1_polar
train
def spin1_polar(self): """Returns the polar spin angle of mass 1.""" return coordinates.cartesian_to_spherical_polar( self.spin1x, self.spin1y, self.spin1z)
python
{ "resource": "" }
q32151
WaveformArray.spin2_a
train
def spin2_a(self): """Returns the dimensionless spin magnitude of mass 2.""" return coordinates.cartesian_to_spherical_rho( self.spin1x, self.spin1y, self.spin1z)
python
{ "resource": "" }
q32152
WaveformArray.spin2_polar
train
def spin2_polar(self): """Returns the polar spin angle of mass 2.""" return coordinates.cartesian_to_spherical_polar( self.spin2x, self.spin2y, self.spin2z)
python
{ "resource": "" }
q32153
setup_splittable_workflow
train
def setup_splittable_workflow(workflow, input_tables, out_dir=None, tags=None): ''' This function aims to be the gateway for code that is responsible for taking some input file containing some table, and splitting into multiple files containing different parts of that table. For now the only supported operation is using lalapps_splitbank to split a template bank xml file into multiple template bank xml files. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job. ''' if tags is None: tags = [] logging.info("Entering split output files module.") make_analysis_dir(out_dir) # Parse for options in .ini file splitMethod = workflow.cp.get_opt_tags("workflow-splittable", "splittable-method", tags) if splitMethod == "IN_WORKFLOW": # Scope here for choosing different options logging.info("Adding split output file jobs to workflow.") split_table_outs = setup_splittable_dax_generated(workflow, input_tables, out_dir, tags) elif splitMethod == "NOOP": # Probably better not to call the module at all, but this option will # return the input file list. split_table_outs = input_tables else: errMsg = "Splittable method not recognized. Must be one of " errMsg += "IN_WORKFLOW or NOOP." raise ValueError(errMsg) logging.info("Leaving split output files module.") return split_table_outs
python
{ "resource": "" }
q32154
setup_splittable_dax_generated
train
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags): ''' Function for setting up the splitting jobs as part of the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job. ''' cp = workflow.cp # Get values from ini file try: num_splits = cp.get_opt_tags("workflow-splittable", "splittable-num-banks", tags) except BaseException: inj_interval = int(cp.get_opt_tags("workflow-splittable", "splitinjtable-interval", tags)) if cp.has_option_tags("em_bright_filter", "max-keep", tags) and \ cp.has_option("workflow-injections", "em-bright-only"): num_injs = int(cp.get_opt_tags("em_bright_filter", "max-keep", tags)) else: num_injs = int(cp.get_opt_tags("workflow-injections", "num-injs", tags)) inj_tspace = float(abs(workflow.analysis_time)) / num_injs num_splits = int(inj_interval // inj_tspace) + 1 split_exe_tag = cp.get_opt_tags("workflow-splittable", "splittable-exe-tag", tags) split_exe = os.path.basename(cp.get("executables", split_exe_tag)) # Select the appropriate class exe_class = select_splitfilejob_instance(split_exe) # Set up output structure out_file_groups = FileList([]) # Set up the condorJob class for the current executable curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits, out_dir=out_dir) for input in input_tables: node = curr_exe_job.create_node(input, tags=tags) workflow.add_node(node) out_file_groups += node.output_files return out_file_groups
python
{ "resource": "" }
q32155
get_summary_page_link
train
def get_summary_page_link(ifo, utc_time): """Return a string that links to the summary page and aLOG for this ifo Parameters ---------- ifo : string The detector name utc_time : sequence First three elements must be strings giving year, month, day resp. Returns ------- return_string : string String containing HTML for links to summary page and aLOG search """ search_form = search_form_string data = {'H1': data_h1_string, 'L1': data_l1_string} if ifo not in data: return ifo else: # alog format is day-month-year alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0]) # summary page is exactly the reverse ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2]) return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc) return return_string + data[ifo] % ext
python
{ "resource": "" }
q32156
pkg_config
train
def pkg_config(pkg_libraries): """Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list) """ libraries=[] library_dirs=[] include_dirs=[] # Check that we have the packages for pkg in pkg_libraries: if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0: pass else: print("Could not find library {0}".format(pkg)) sys.exit(1) # Get the pck-config flags if len(pkg_libraries)>0 : # PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths. # On system-wide LAL installs, this is needed for swig to find lalswig.i for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split(): if token.startswith("-l"): libraries.append(token[2:]) elif token.startswith("-L"): library_dirs.append(token[2:]) elif token.startswith("-I"): include_dirs.append(token[2:]) return libraries, library_dirs, include_dirs
python
{ "resource": "" }
q32157
pkg_config_header_strings
train
def pkg_config_header_strings(pkg_libraries): """ Returns a list of header strings that could be passed to a compiler """ _, _, header_dirs = pkg_config(pkg_libraries) header_strings = [] for header_dir in header_dirs: header_strings.append("-I" + header_dir) return header_strings
python
{ "resource": "" }
q32158
pkg_config_libdirs
train
def pkg_config_libdirs(packages): """ Returns a list of all library paths that pkg-config says should be included when linking against the list of packages given as 'packages'. An empty return list means that the package may be found in the standard system locations, irrespective of pkg-config. """ # don't try calling pkg-config if NO_PKGCONFIG is set in environment if os.environ.get("NO_PKGCONFIG", None): return [] # if calling pkg-config failes, don't continue and don't try again. try: FNULL = open(os.devnull, 'w') subprocess.check_call(["pkg-config", "--version"], stdout=FNULL, close_fds=True) except: print("PyCBC.libutils: pkg-config call failed, setting NO_PKGCONFIG=1", file=sys.stderr) os.environ['NO_PKGCONFIG'] = "1" return [] # First, check that we can call pkg-config on each package in the list for pkg in packages: if not pkg_config_check_exists(pkg): raise ValueError("Package {0} cannot be found on the pkg-config search path".format(pkg)) libdirs = [] for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs-only-L {0}".format(' '.join(packages))).split(): if token.startswith("-L"): libdirs.append(token[2:]) return libdirs
python
{ "resource": "" }
q32159
get_libpath_from_dirlist
train
def get_libpath_from_dirlist(libname, dirs): """ This function tries to find the architecture-independent library given by libname in the first available directory in the list dirs. 'Architecture-independent' means omitting any prefix such as 'lib' or suffix such as 'so' or 'dylib' or version number. Within the first directory in which a matching pattern can be found, the lexicographically first such file is returned, as a string giving the full path name. The only supported OSes at the moment are posix and mac, and this function does not attempt to determine which is being run. So if for some reason your directory has both '.so' and '.dylib' libraries, who knows what will happen. If the library cannot be found, None is returned. """ dirqueue = deque(dirs) while (len(dirqueue) > 0): nextdir = dirqueue.popleft() possible = [] # Our directory might be no good, so try/except try: for libfile in os.listdir(nextdir): if fnmatch.fnmatch(libfile,'lib'+libname+'.so*') or \ fnmatch.fnmatch(libfile,'lib'+libname+'.dylib*') or \ fnmatch.fnmatch(libfile,libname+'.dll') or \ fnmatch.fnmatch(libfile,'cyg'+libname+'-*.dll'): possible.append(libfile) except OSError: pass # There might be more than one library found, we want the highest-numbered if (len(possible) > 0): possible.sort() return os.path.join(nextdir,possible[-1]) # If we get here, we didn't find it... return None
python
{ "resource": "" }
q32160
thin_samples_for_writing
train
def thin_samples_for_writing(fp, samples, parameters, last_iteration): """Thins samples for writing to disk. The thinning interval to use is determined by the given file handler's ``thinned_by`` attribute. If that attribute is 1, just returns the samples. Parameters ---------- fp : MCMCMetadataIO instance The file the sampels will be written to. Needed to determine the thin interval used on disk. samples : dict Dictionary mapping parameter names to arrays of (unthinned) samples. The arrays are thinned along their last dimension. parameters : list of str The parameters to thin in ``samples`` before writing. All listed parameters must be in ``samples``. last_iteration : int The iteration that the last sample in ``samples`` occurred at. This is needed to figure out where to start the thinning in ``samples``, such that the interval between the last sample on disk and the first new sample is the same as all of the other samples. Returns ------- dict : Dictionary of the thinned samples to write. """ if fp.thinned_by > 1: if last_iteration is None: raise ValueError("File's thinned_by attribute is > 1 ({}), " "but last_iteration not provided." .format(fp.thinned_by)) thinned_samples = {} for param in parameters: data = samples[param] nsamples = data.shape[-1] # To figure out where to start: # the last iteration in the file + the file's thinning interval # gives the iteration of the next sample that should be written; # last_iteration - nsamples gives the iteration of the first # sample in samples. Subtracting the latter from the former - 1 # (-1 to convert from iteration to index) therefore gives the index # in the samples data to start using samples. thin_start = fp.last_iteration(param) + fp.thinned_by \ - (last_iteration - nsamples) - 1 thinned_samples[param] = data[..., thin_start::fp.thinned_by] else: thinned_samples = samples return thinned_samples
python
{ "resource": "" }
q32161
MCMCMetadataIO.write_resume_point
train
def write_resume_point(self): """Keeps a list of the number of iterations that were in a file when a run was resumed from a checkpoint.""" try: resume_pts = self.attrs["resume_points"].tolist() except KeyError: resume_pts = [] try: niterations = self.niterations except KeyError: niterations = 0 resume_pts.append(niterations) self.attrs["resume_points"] = resume_pts
python
{ "resource": "" }
q32162
MCMCMetadataIO.thin
train
def thin(self, thin_interval): """Thins the samples on disk using the given thinning interval. Parameters ---------- thin_interval : int The interval to thin by. """ # read thinned samples into memory params = self[self.samples_group].keys() samples = self.read_raw_samples(params, thin_start=0, thin_interval=thin_interval, thin_end=None, flatten=False) # now resize and write the data back to disk group = self[self.samples_group] for param in params: data = samples[param] # resize the arrays on disk group[param].resize(data.shape) # and write group[param][:] = data # store the interval that samples were thinned by self.thinned_by *= thin_interval # If a default thin interval and thin start exist, reduce them by the # thinned interval. If the thin interval is not an integer multiple # of the original, we'll round up, to avoid getting samples from # before the burn in / at an interval less than the ACL. self.thin_start = int(numpy.ceil(self.thin_start/thin_interval)) self.thin_interval = int(numpy.ceil(self.thin_interval/thin_interval))
python
{ "resource": "" }
q32163
MCMCMetadataIO.last_iteration
train
def last_iteration(self, parameter=None): """Returns the iteration of the last sample of the given parameter. If parameter is ``None``, will just use the first parameter in the samples group. """ if parameter is None: try: parameter = list(self[self.samples_group].keys())[0] except (IndexError, KeyError): # nothing has been written yet, just return 0 return 0 try: lastiter = self[self.samples_group][parameter].shape[-1] except KeyError: # no samples have been written, just return 0 lastiter = 0 # account for thinning return lastiter * self.thinned_by
python
{ "resource": "" }
q32164
MCMCMetadataIO.iterations
train
def iterations(self, parameter): """Returns the iteration each sample occurred at.""" return numpy.arange(0, self.last_iteration(parameter), self.thinned_by)
python
{ "resource": "" }
q32165
MCMCMetadataIO.write_sampler_metadata
train
def write_sampler_metadata(self, sampler): """Writes the sampler's metadata.""" self.attrs['sampler'] = sampler.name self[self.sampler_group].attrs['nwalkers'] = sampler.nwalkers # write the model's metadata sampler.model.write_metadata(self)
python
{ "resource": "" }
q32166
MCMCMetadataIO.write_acls
train
def write_acls(self, acls): """Writes the given autocorrelation lengths. The ACL of each parameter is saved to ``[sampler_group]/acls/{param}']``. The maximum over all the parameters is saved to the file's 'acl' attribute. Parameters ---------- acls : dict A dictionary of ACLs keyed by the parameter. Returns ------- ACL The maximum of the acls that was written to the file. """ group = self.sampler_group + '/acls/{}' # write the individual acls for param in acls: try: # we need to use the write_direct function because it's # apparently the only way to update scalars in h5py self[group.format(param)].write_direct( numpy.array(acls[param])) except KeyError: # dataset doesn't exist yet self[group.format(param)] = acls[param] # write the maximum over all params acl = numpy.array(acls.values()).max() self[self.sampler_group].attrs['acl'] = acl # set the default thin interval to be the acl (if it is finite) if numpy.isfinite(acl): self.thin_interval = int(numpy.ceil(acl))
python
{ "resource": "" }
q32167
MCMCMetadataIO.read_acls
train
def read_acls(self): """Reads the acls of all the parameters. Returns ------- dict A dictionary of the ACLs, keyed by the parameter name. """ group = self[self.sampler_group]['acls'] return {param: group[param].value for param in group.keys()}
python
{ "resource": "" }
q32168
MCMCMetadataIO.write_burn_in
train
def write_burn_in(self, burn_in): """Write the given burn-in data to the given filename.""" group = self[self.sampler_group] group.attrs['burn_in_test'] = burn_in.burn_in_test group.attrs['is_burned_in'] = burn_in.is_burned_in group.attrs['burn_in_iteration'] = burn_in.burn_in_iteration # set the defaut thin_start to be the burn_in_index self.thin_start = burn_in.burn_in_index # write individual test data for tst in burn_in.burn_in_data: key = 'burn_in_tests/{}'.format(tst) try: attrs = group[key].attrs except KeyError: group.create_group(key) attrs = group[key].attrs self.write_kwargs_to_attrs(attrs, **burn_in.burn_in_data[tst])
python
{ "resource": "" }
q32169
MCMCMetadataIO.extra_args_parser
train
def extra_args_parser(parser=None, skip_args=None, **kwargs): """Create a parser to parse sampler-specific arguments for loading samples. Parameters ---------- parser : argparse.ArgumentParser, optional Instead of creating a parser, add arguments to the given one. If none provided, will create one. skip_args : list, optional Don't parse the given options. Options should be given as the option string, minus the '--'. For example, ``skip_args=['iteration']`` would cause the ``--iteration`` argument not to be included. \**kwargs : All other keyword arguments are passed to the parser that is created. Returns ------- parser : argparse.ArgumentParser An argument parser with th extra arguments added. actions : list of argparse.Action A list of the actions that were added. """ if parser is None: parser = argparse.ArgumentParser(**kwargs) elif kwargs: raise ValueError("No other keyword arguments should be provded if " "a parser is provided.") if skip_args is None: skip_args = [] actions = [] if 'thin-start' not in skip_args: act = parser.add_argument( "--thin-start", type=int, default=None, help="Sample number to start collecting samples to plot. If " "none provided, will use the input file's `thin_start` " "attribute.") actions.append(act) if 'thin-interval' not in skip_args: act = parser.add_argument( "--thin-interval", type=int, default=None, help="Interval to use for thinning samples. If none provided, " "will use the input file's `thin_interval` attribute.") actions.append(act) if 'thin-end' not in skip_args: act = parser.add_argument( "--thin-end", type=int, default=None, help="Sample number to stop collecting samples to plot. If " "none provided, will use the input file's `thin_end` " "attribute.") actions.append(act) if 'iteration' not in skip_args: act = parser.add_argument( "--iteration", type=int, default=None, help="Only retrieve the given iteration. To load " "the last n-th sampe use -n, e.g., -1 will " "load the last iteration. This overrides " "the thin-start/interval/end options.") actions.append(act) if 'walkers' not in skip_args: act = parser.add_argument( "--walkers", type=int, nargs="+", default=None, help="Only retrieve samples from the listed " "walkers. Default is to retrieve from all " "walkers.") actions.append(act) return parser, actions
python
{ "resource": "" }
q32170
SingleTempMCMCIO.write_samples
train
def write_samples(self, samples, parameters=None, last_iteration=None): """Writes samples to the given file. Results are written to ``samples_group/{vararg}``, where ``{vararg}`` is the name of a model params. The samples are written as an ``nwalkers x niterations`` array. If samples already exist, the new samples are appended to the current. If the current samples on disk have been thinned (determined by the ``thinned_by`` attribute in the samples group), then the samples will be thinned by the same amount before being written. The thinning is started at the sample in ``samples`` that occured at the iteration equal to the last iteration on disk plus the ``thinned_by`` interval. If this iteration is larger than the iteration of the last given sample, then none of the samples will be written. Parameters ----------- samples : dict The samples to write. Each array in the dictionary should have shape nwalkers x niterations. parameters : list, optional Only write the specified parameters to the file. If None, will write all of the keys in the ``samples`` dict. last_iteration : int, optional The iteration of the last sample. If the file's ``thinned_by`` attribute is > 1, this is needed to determine where to start thinning the samples such that the interval between the last sample currently on disk and the first new sample is the same as all of the other samples. """ nwalkers, nsamples = list(samples.values())[0].shape assert all(p.shape == (nwalkers, nsamples) for p in samples.values()), ( "all samples must have the same shape") group = self.samples_group + '/{name}' if parameters is None: parameters = samples.keys() # thin the samples samples = thin_samples_for_writing(self, samples, parameters, last_iteration) # loop over number of dimensions for param in parameters: dataset_name = group.format(name=param) data = samples[param] # check that there's something to write after thinning if data.shape[1] == 0: # nothing to write, move along continue try: fp_nsamples = self[dataset_name].shape[-1] istart = fp_nsamples istop = istart + data.shape[1] if istop > fp_nsamples: # resize the dataset self[dataset_name].resize(istop, axis=1) except KeyError: # dataset doesn't exist yet istart = 0 istop = istart + data.shape[1] self.create_dataset(dataset_name, (nwalkers, istop), maxshape=(nwalkers, None), dtype=data.dtype, fletcher32=True) self[dataset_name][:, istart:istop] = data
python
{ "resource": "" }
q32171
SingleTempMCMCIO.read_raw_samples
train
def read_raw_samples(self, fields, thin_start=None, thin_interval=None, thin_end=None, iteration=None, walkers=None, flatten=True): """Base function for reading samples. Parameters ----------- fields : list The list of field names to retrieve. Must be names of datasets in the ``samples_group``. thin_start : int, optional Start reading from the given iteration. Default is to start from the first iteration. thin_interval : int, optional Only read every ``thin_interval`` -th sample. Default is 1. thin_end : int, optional Stop reading at the given iteration. Default is to end at the last iteration. iteration : int, optional Only read the given iteration. If this provided, it overrides the ``thin_(start|interval|end)`` options. walkers : int, optional Only read from the given walkers. Default is to read all. flatten : bool, optional Flatten the samples to 1D arrays before returning. Otherwise, the returned arrays will have shape (requested walkers x requested iteration(s)). Default is True. Returns ------- dict A dictionary of field name -> numpy array pairs. """ if isinstance(fields, (str, unicode)): fields = [fields] # walkers to load if walkers is not None: widx = numpy.zeros(self.nwalkers, dtype=bool) widx[walkers] = True nwalkers = widx.sum() else: widx = slice(0, None) nwalkers = self.nwalkers # get the slice to use if iteration is not None: get_index = int(iteration) niterations = 1 else: get_index = self.get_slice(thin_start=thin_start, thin_end=thin_end, thin_interval=thin_interval) # we'll just get the number of iterations from the returned shape niterations = None # load group = self.samples_group + '/{name}' arrays = {} for name in fields: arr = self[group.format(name=name)][widx, get_index] if niterations is None: niterations = arr.shape[-1] if flatten: arr = arr.flatten() else: # ensure that the returned array is 2D arr = arr.reshape((nwalkers, niterations)) arrays[name] = arr return arrays
python
{ "resource": "" }
q32172
setup_datafind_runtime_frames_single_call_perifo
train
def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs, outputDir, tags=None): """ This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_generated this call will only run one call to datafind per ifo, spanning the whole time. This function will return a list of files corresponding to the individual frames returned by the datafind query. This will allow pegasus to more easily identify all the files used as input, but may cause problems for codes that need to take frame cache files as input. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. """ datafindcaches, _ = \ setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs, outputDir, tags=tags) datafindouts = convert_cachelist_to_filelist(datafindcaches) return datafindcaches, datafindouts
python
{ "resource": "" }
q32173
setup_datafind_runtime_frames_multi_calls_perifo
train
def setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs, outputDir, tags=None): """ This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of files corresponding to the individual frames returned by the datafind query. This will allow pegasus to more easily identify all the files used as input, but may cause problems for codes that need to take frame cache files as input. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. """ datafindcaches, _ = \ setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, outputDir, tags=tags) datafindouts = convert_cachelist_to_filelist(datafindcaches) return datafindcaches, datafindouts
python
{ "resource": "" }
q32174
setup_datafind_from_pregenerated_lcf_files
train
def setup_datafind_from_pregenerated_lcf_files(cp, ifos, outputDir, tags=None): """ This function is used if you want to run with pregenerated lcf frame cache files. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files ifos : list of ifo strings List of ifos to get pregenerated files for. outputDir : path All output files written by datafind processes will be written to this directory. Currently this sub-module writes no output. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. """ if tags is None: tags = [] datafindcaches = [] for ifo in ifos: search_string = "datafind-pregenerated-cache-file-%s" %(ifo.lower(),) frame_cache_file_name = cp.get_opt_tags("workflow-datafind", search_string, tags=tags) curr_cache = lal.Cache.fromfilenames([frame_cache_file_name], coltype=lal.LIGOTimeGPS) curr_cache.ifo = ifo datafindcaches.append(curr_cache) datafindouts = convert_cachelist_to_filelist(datafindcaches) return datafindcaches, datafindouts
python
{ "resource": "" }
q32175
convert_cachelist_to_filelist
train
def convert_cachelist_to_filelist(datafindcache_list): """ Take as input a list of glue.lal.Cache objects and return a pycbc FileList containing all frames within those caches. Parameters ----------- datafindcache_list : list of glue.lal.Cache objects The list of cache files to convert. Returns -------- datafind_filelist : FileList of frame File objects The list of frame files. """ prev_file = None prev_name = None this_name = None datafind_filelist = FileList([]) for cache in datafindcache_list: # sort the cache into time sequential order cache.sort() curr_ifo = cache.ifo for frame in cache: # Pegasus doesn't like "localhost" in URLs. frame.url = frame.url.replace('file://localhost','file://') # Create one File() object for each unique frame file that we # get back in the cache. if prev_file: prev_name = os.path.basename(prev_file.cache_entry.url) this_name = os.path.basename(frame.url) if (prev_file is None) or (prev_name != this_name): currFile = File(curr_ifo, frame.description, frame.segment, file_url=frame.url, use_tmp_subdirs=True) datafind_filelist.append(currFile) prev_file = currFile # Populate the PFNs for the File() we just created if frame.url.startswith('file://'): currFile.PFN(frame.url, site='local') if frame.url.startswith( 'file:///cvmfs/oasis.opensciencegrid.org/ligo/frames'): # Datafind returned a URL valid on the osg as well # so add the additional PFNs to allow OSG access. currFile.PFN(frame.url, site='osg') currFile.PFN(frame.url.replace( 'file:///cvmfs/oasis.opensciencegrid.org/', 'root://xrootd-local.unl.edu/user/'), site='osg') currFile.PFN(frame.url.replace( 'file:///cvmfs/oasis.opensciencegrid.org/', 'gsiftp://red-gridftp.unl.edu/user/'), site='osg') currFile.PFN(frame.url.replace( 'file:///cvmfs/oasis.opensciencegrid.org/', 'gsiftp://ldas-grid.ligo.caltech.edu/hdfs/'), site='osg') elif frame.url.startswith( 'file:///cvmfs/gwosc.osgstorage.org/'): # Datafind returned a URL valid on the osg as well # so add the additional PFNs to allow OSG access. for s in ['osg', 'orangegrid', 'osgconnect']: currFile.PFN(frame.url, site=s) currFile.PFN(frame.url, site="{}-scratch".format(s)) else: currFile.PFN(frame.url, site='notlocal') return datafind_filelist
python
{ "resource": "" }
q32176
get_science_segs_from_datafind_outs
train
def get_science_segs_from_datafind_outs(datafindcaches): """ This function will calculate the science segments that are covered in the OutGroupList containing the frame files returned by various calls to the datafind server. This can then be used to check whether this list covers what it is expected to cover. Parameters ---------- datafindcaches : OutGroupList List of all the datafind output files. Returns -------- newScienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances The times covered by the frames found in datafindOuts. """ newScienceSegs = {} for cache in datafindcaches: if len(cache) > 0: groupSegs = segments.segmentlist(e.segment for e in cache).coalesce() ifo = cache.ifo if ifo not in newScienceSegs: newScienceSegs[ifo] = groupSegs else: newScienceSegs[ifo].extend(groupSegs) newScienceSegs[ifo].coalesce() return newScienceSegs
python
{ "resource": "" }
q32177
get_missing_segs_from_frame_file_cache
train
def get_missing_segs_from_frame_file_cache(datafindcaches): """ This function will use os.path.isfile to determine if all the frame files returned by the local datafind server actually exist on the disk. This can then be used to update the science times if needed. Parameters ----------- datafindcaches : OutGroupList List of all the datafind output files. Returns -------- missingFrameSegs : Dict. of ifo keyed glue.segment.segmentlist instances The times corresponding to missing frames found in datafindOuts. missingFrames: Dict. of ifo keyed lal.Cache instances The list of missing frames """ missingFrameSegs = {} missingFrames = {} for cache in datafindcaches: if len(cache) > 0: # Don't bother if these are not file:// urls, assume all urls in # one cache file must be the same type if not cache[0].scheme == 'file': warn_msg = "We have %s entries in the " %(cache[0].scheme,) warn_msg += "cache file. I do not check if these exist." logging.info(warn_msg) continue _, currMissingFrames = cache.checkfilesexist(on_missing="warn") missingSegs = segments.segmentlist(e.segment \ for e in currMissingFrames).coalesce() ifo = cache.ifo if ifo not in missingFrameSegs: missingFrameSegs[ifo] = missingSegs missingFrames[ifo] = lal.Cache(currMissingFrames) else: missingFrameSegs[ifo].extend(missingSegs) # NOTE: This .coalesce probably isn't needed as the segments # should be disjoint. If speed becomes an issue maybe remove it? missingFrameSegs[ifo].coalesce() missingFrames[ifo].extend(currMissingFrames) return missingFrameSegs, missingFrames
python
{ "resource": "" }
q32178
setup_datafind_server_connection
train
def setup_datafind_server_connection(cp, tags=None): """ This function is resposible for setting up the connection with the datafind server. Parameters ----------- cp : pycbc.workflow.configuration.WorkflowConfigParser The memory representation of the ConfigParser Returns -------- connection The open connection to the datafind server. """ if tags is None: tags = [] if cp.has_option_tags("workflow-datafind", "datafind-ligo-datafind-server", tags): datafind_server = cp.get_opt_tags("workflow-datafind", "datafind-ligo-datafind-server", tags) else: datafind_server = None return datafind_connection(datafind_server)
python
{ "resource": "" }
q32179
get_segment_summary_times
train
def get_segment_summary_times(scienceFile, segmentName): """ This function will find the times for which the segment_summary is set for the flag given by segmentName. Parameters ----------- scienceFile : SegFile The segment file that we want to use to determine this. segmentName : string The DQ flag to search for times in the segment_summary table. Returns --------- summSegList : ligo.segments.segmentlist The times that are covered in the segment summary table. """ # Parse the segmentName segmentName = segmentName.split(':') if not len(segmentName) in [2,3]: raise ValueError("Invalid channel name %s." %(segmentName)) ifo = segmentName[0] channel = segmentName[1] version = '' if len(segmentName) == 3: version = int(segmentName[2]) # Load the filename xmldoc = utils.load_filename(scienceFile.cache_entry.path, gz=scienceFile.cache_entry.path.endswith("gz"), contenthandler=ContentHandler) # Get the segment_def_id for the segmentName segmentDefTable = table.get_table(xmldoc, "segment_definer") for entry in segmentDefTable: if (entry.ifos == ifo) and (entry.name == channel): if len(segmentName) == 2 or (entry.version==version): segDefID = entry.segment_def_id break else: raise ValueError("Cannot find channel %s in segment_definer table."\ %(segmentName)) # Get the segmentlist corresponding to this segmentName in segment_summary segmentSummTable = table.get_table(xmldoc, "segment_summary") summSegList = segments.segmentlist([]) for entry in segmentSummTable: if entry.segment_def_id == segDefID: segment = segments.segment(entry.start_time, entry.end_time) summSegList.append(segment) summSegList.coalesce() return summSegList
python
{ "resource": "" }
q32180
run_datafind_instance
train
def run_datafind_instance(cp, outputDir, connection, observatory, frameType, startTime, endTime, ifo, tags=None): """ This function will query the datafind server once to find frames between the specified times for the specified frame type and observatory. Parameters ---------- cp : ConfigParser instance Source for any kwargs that should be sent to the datafind module outputDir : Output cache files will be written here. We also write the commands for reproducing what is done in this function to this directory. connection : datafind connection object Initialized through the glue.datafind module, this is the open connection to the datafind server. observatory : string The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not 'H1', 'L1', 'V1' which denote interferometers. frameType : string The frame type to query for. startTime : int Integer start time to query the datafind server for frames. endTime : int Integer end time to query the datafind server for frames. ifo : string The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'. Maybe this could be merged with the observatory string, but this could cause issues if running on old 'H2' and 'H1' data. tags : list of string, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniquify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- dfCache : glue.lal.Cache instance The glue.lal.Cache representation of the call to the datafind server and the returned frame files. cacheFile : pycbc.workflow.core.File Cache file listing all of the datafind output files for use later in the pipeline. """ if tags is None: tags = [] seg = segments.segment([startTime, endTime]) # Take the datafind kwargs from config (usually urltype=file is # given). dfKwargs = {} # By default ignore missing frames, this case is dealt with outside of here dfKwargs['on_gaps'] = 'ignore' if cp.has_section("datafind"): for item, value in cp.items("datafind"): dfKwargs[item] = value for tag in tags: if cp.has_section('datafind-%s' %(tag)): for item, value in cp.items("datafind-%s" %(tag)): dfKwargs[item] = value # It is useful to print the corresponding command to the logs # directory to check if this was expected. log_datafind_command(observatory, frameType, startTime, endTime, os.path.join(outputDir,'logs'), **dfKwargs) logging.debug("Asking datafind server for frames.") dfCache = connection.find_frame_urls(observatory, frameType, startTime, endTime, **dfKwargs) logging.debug("Frames returned") # workflow format output file cache_file = File(ifo, 'DATAFIND', seg, extension='lcf', directory=outputDir, tags=tags) cache_file.PFN(cache_file.cache_entry.path, site='local') dfCache.ifo = ifo # Dump output to file fP = open(cache_file.storage_path, "w") # FIXME: CANNOT use dfCache.tofile because it will print 815901601.00000 # as a gps time which is incompatible with the lal cache format # (and the C codes) which demand an integer. #dfCache.tofile(fP) for entry in dfCache: start = str(int(entry.segment[0])) duration = str(int(abs(entry.segment))) print("%s %s %s %s %s" \ % (entry.observatory, entry.description, start, duration, entry.url), file=fP) entry.segment = segments.segment(int(entry.segment[0]), int(entry.segment[1])) fP.close() return dfCache, cache_file
python
{ "resource": "" }
q32181
log_datafind_command
train
def log_datafind_command(observatory, frameType, startTime, endTime, outputDir, **dfKwargs): """ This command will print an equivalent gw_data_find command to disk that can be used to debug why the internal datafind module is not working. """ # FIXME: This does not accurately reproduce the call as assuming the # kwargs will be the same is wrong, so some things need to be converted # "properly" to the command line equivalent. gw_command = ['gw_data_find', '--observatory', observatory, '--type', frameType, '--gps-start-time', str(startTime), '--gps-end-time', str(endTime)] for name, value in dfKwargs.items(): if name == 'match': gw_command.append("--match") gw_command.append(str(value)) elif name == 'urltype': gw_command.append("--url-type") gw_command.append(str(value)) elif name == 'on_gaps': pass else: errMsg = "Unknown datafind kwarg given: %s. " %(name) errMsg+= "This argument is stripped in the logged .sh command." logging.warn(errMsg) fileName = "%s-%s-%d-%d.sh" \ %(observatory, frameType, startTime, endTime-startTime) filePath = os.path.join(outputDir, fileName) fP = open(filePath, 'w') fP.write(' '.join(gw_command)) fP.close()
python
{ "resource": "" }
q32182
segment_snrs
train
def segment_snrs(filters, stilde, psd, low_frequency_cutoff): """ This functions calculates the snr of each bank veto template against the segment Parameters ---------- filters: list of FrequencySeries The list of bank veto templates filters. stilde: FrequencySeries The current segment of data. psd: FrequencySeries low_frequency_cutoff: float Returns ------- snr (list): List of snr time series. norm (list): List of normalizations factors for the snr time series. """ snrs = [] norms = [] for bank_template in filters: # For every template compute the snr against the stilde segment snr, _, norm = matched_filter_core( bank_template, stilde, h_norm=bank_template.sigmasq(psd), psd=None, low_frequency_cutoff=low_frequency_cutoff) # SNR time series stored here snrs.append(snr) # Template normalization factor stored here norms.append(norm) return snrs, norms
python
{ "resource": "" }
q32183
template_overlaps
train
def template_overlaps(bank_filters, template, psd, low_frequency_cutoff): """ This functions calculates the overlaps between the template and the bank veto templates. Parameters ---------- bank_filters: List of FrequencySeries template: FrequencySeries psd: FrequencySeries low_frequency_cutoff: float Returns ------- overlaps: List of complex overlap values. """ overlaps = [] template_ow = template / psd for bank_template in bank_filters: overlap = overlap_cplx(template_ow, bank_template, low_frequency_cutoff=low_frequency_cutoff, normalized=False) norm = sqrt(1 / template.sigmasq(psd) / bank_template.sigmasq(psd)) overlaps.append(overlap * norm) if (abs(overlaps[-1]) > 0.99): errMsg = "Overlap > 0.99 between bank template and filter. " errMsg += "This bank template will not be used to calculate " errMsg += "bank chisq for this filter template. The expected " errMsg += "value will be added to the chisq to account for " errMsg += "the removal of this template.\n" errMsg += "Masses of filter template: %e %e\n" \ %(template.params.mass1, template.params.mass2) errMsg += "Masses of bank filter template: %e %e\n" \ %(bank_template.params.mass1, bank_template.params.mass2) errMsg += "Overlap: %e" %(abs(overlaps[-1])) logging.debug(errMsg) return overlaps
python
{ "resource": "" }
q32184
bank_chisq_from_filters
train
def bank_chisq_from_filters(tmplt_snr, tmplt_norm, bank_snrs, bank_norms, tmplt_bank_matches, indices=None): """ This function calculates and returns a TimeSeries object containing the bank veto calculated over a segment. Parameters ---------- tmplt_snr: TimeSeries The SNR time series from filtering the segment against the current search template tmplt_norm: float The normalization factor for the search template bank_snrs: list of TimeSeries The precomputed list of SNR time series between each of the bank veto templates and the segment bank_norms: list of floats The normalization factors for the list of bank veto templates (usually this will be the same for all bank veto templates) tmplt_bank_matches: list of floats The complex overlap between the search template and each of the bank templates indices: {None, Array}, optional Array of indices into the snr time series. If given, the bank chisq will only be calculated at these values. Returns ------- bank_chisq: TimeSeries of the bank vetos """ if indices is not None: tmplt_snr = Array(tmplt_snr, copy=False) bank_snrs_tmp = [] for bank_snr in bank_snrs: bank_snrs_tmp.append(bank_snr.take(indices)) bank_snrs=bank_snrs_tmp # Initialise bank_chisq as 0s everywhere bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr)) # Loop over all the bank templates for i in range(len(bank_snrs)): bank_match = tmplt_bank_matches[i] if (abs(bank_match) > 0.99): # Not much point calculating bank_chisquared if the bank template # is very close to the filter template. Can also hit numerical # error due to approximations made in this calculation. # The value of 2 is the expected addition to the chisq for this # template bank_chisq += 2. continue bank_norm = sqrt((1 - bank_match*bank_match.conj()).real) bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm) tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm) bank_SNR = Array(bank_SNR, copy=False) tmplt_SNR = Array(tmplt_SNR, copy=False) bank_chisq += (bank_SNR - tmplt_SNR).squared_norm() if indices is not None: return bank_chisq else: return TimeSeries(bank_chisq, delta_t=tmplt_snr.delta_t, epoch=tmplt_snr.start_time, copy=False)
python
{ "resource": "" }
q32185
start_end_from_segments
train
def start_end_from_segments(segment_file): """ Return the start and end time arrays from a segment file. Parameters ---------- segment_file: xml segment file Returns ------- start: numpy.ndarray end: numpy.ndarray """ from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, lsctables.SegmentTable.tableName) start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) return start + start_ns * 1e-9, end + end_ns * 1e-9
python
{ "resource": "" }
q32186
indices_within_times
train
def indices_within_times(times, start, end): """ Return an index array into times that lie within the durations defined by start end arrays Parameters ---------- times: numpy.ndarray Array of times start: numpy.ndarray Array of duration start times end: numpy.ndarray Array of duration end times Returns ------- indices: numpy.ndarray Array of indices into times """ # coalesce the start/end segments start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce()) tsort = times.argsort() times_sorted = times[tsort] left = numpy.searchsorted(times_sorted, start) right = numpy.searchsorted(times_sorted, end) if len(left) == 0: return numpy.array([], dtype=numpy.uint32) return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))]
python
{ "resource": "" }
q32187
indices_outside_times
train
def indices_outside_times(times, start, end): """ Return an index array into times that like outside the durations defined by start end arrays Parameters ---------- times: numpy.ndarray Array of times start: numpy.ndarray Array of duration start times end: numpy.ndarray Array of duration end times Returns ------- indices: numpy.ndarray Array of indices into times """ exclude = indices_within_times(times, start, end) indices = numpy.arange(0, len(times)) return numpy.delete(indices, exclude)
python
{ "resource": "" }
q32188
select_segments_by_definer
train
def select_segments_by_definer(segment_file, segment_name=None, ifo=None): """ Return the list of segments that match the segment name Parameters ---------- segment_file: str path to segment xml file segment_name: str Name of segment ifo: str, optional Returns ------- seg: list of segments """ from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, 'segment') seg_def_table = table.get_table(indoc, 'segment_definer') def_ifos = seg_def_table.getColumnByName('ifos') def_names = seg_def_table.getColumnByName('name') def_ids = seg_def_table.getColumnByName('segment_def_id') valid_id = [] for def_ifo, def_name, def_id in zip(def_ifos, def_names, def_ids): if ifo and ifo != def_ifo: continue if segment_name and segment_name != def_name: continue valid_id += [def_id] start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) start, end = start + 1e-9 * start_ns, end + 1e-9 * end_ns did = segment_table.getColumnByName('segment_def_id') keep = numpy.array([d in valid_id for d in did]) if sum(keep) > 0: return start_end_to_segments(start[keep], end[keep]) else: return segmentlist([])
python
{ "resource": "" }
q32189
indices_within_segments
train
def indices_within_segments(times, segment_files, ifo=None, segment_name=None): """ Return the list of indices that should be vetoed by the segments in the list of veto_files. Parameters ---------- times: numpy.ndarray of integer type Array of gps start times segment_files: string or list of strings A string or list of strings that contain the path to xml files that contain a segment table ifo: string, optional The ifo to retrieve segments for from the segment files segment_name: str, optional name of segment Returns ------- indices: numpy.ndarray The array of index values within the segments segmentlist: The segment list corresponding to the selected time. """ veto_segs = segmentlist([]) indices = numpy.array([], dtype=numpy.uint32) for veto_file in segment_files: veto_segs += select_segments_by_definer(veto_file, segment_name, ifo) veto_segs.coalesce() start, end = segments_to_start_end(veto_segs) if len(start) > 0: idx = indices_within_times(times, start, end) indices = numpy.union1d(indices, idx) return indices, veto_segs.coalesce()
python
{ "resource": "" }
q32190
indices_outside_segments
train
def indices_outside_segments(times, segment_files, ifo=None, segment_name=None): """ Return the list of indices that are outside the segments in the list of segment files. Parameters ---------- times: numpy.ndarray of integer type Array of gps start times segment_files: string or list of strings A string or list of strings that contain the path to xml files that contain a segment table ifo: string, optional The ifo to retrieve segments for from the segment files segment_name: str, optional name of segment Returns -------- indices: numpy.ndarray The array of index values outside the segments segmentlist: The segment list corresponding to the selected time. """ exclude, segs = indices_within_segments(times, segment_files, ifo=ifo, segment_name=segment_name) indices = numpy.arange(0, len(times)) return numpy.delete(indices, exclude), segs
python
{ "resource": "" }
q32191
get_segment_definer_comments
train
def get_segment_definer_comments(xml_file, include_version=True): """Returns a dict with the comment column as the value for each segment""" from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) # read segment definer table xmldoc, _ = ligolw_utils.load_fileobj(xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) # put comment column into a dict comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
python
{ "resource": "" }
q32192
Merger.strain
train
def strain(self, ifo, duration=32, sample_rate=4096): """ Return strain around the event Currently this will return the strain around the event in the smallest format available. Selection of other data is not yet available. Parameters ---------- ifo: str The name of the observatory you want strain for. Ex. H1, L1, V1 Returns ------- strain: pycbc.types.TimeSeries Strain around the event. """ from astropy.utils.data import download_file from pycbc.frame import read_frame # Information is currently wrong on GWOSC! # channels = self.data['files']['FrameChannels'] # for channel in channels: # if ifo in channel: # break length = "{}sec".format(duration) if sample_rate == 4096: sampling = "4KHz" elif sample_rate == 16384: sampling = "16KHz" channel = "{}:GWOSC-{}_R1_STRAIN".format(ifo, sampling.upper()) url = self.data['files'][ifo][length][sampling]['GWF'] filename = download_file(url, cache=True) return read_frame(str(filename), str(channel))
python
{ "resource": "" }
q32193
make_inference_prior_plot
train
def make_inference_prior_plot(workflow, config_file, output_dir, sections=None, name="inference_prior", analysis_seg=None, tags=None): """ Sets up the corner plot of the priors in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating config_file: pycbc.workflow.File The WorkflowConfigParser parasable inference configuration file.. output_dir: str The directory to store result plots and files. sections : list A list of subsections to use. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files. """ # default values tags = [] if tags is None else tags analysis_seg = workflow.analysis_time \ if analysis_seg is None else analysis_seg # make the directory that will contain the output files makedir(output_dir) # make a node for plotting the posterior as a corner plot node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, out_dir=output_dir, universe="local", tags=tags).create_node() # add command line options node.add_input_opt("--config-file", config_file) node.new_output_file_opt(analysis_seg, ".png", "--output-file") if sections is not None: node.add_opt("--sections", " ".join(sections)) # add node to workflow workflow += node return node.output_files
python
{ "resource": "" }
q32194
make_inference_summary_table
train
def make_inference_summary_table(workflow, inference_file, output_dir, variable_args=None, name="inference_table", analysis_seg=None, tags=None): """ Sets up the corner plot of the posteriors in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_file: pycbc.workflow.File The file with posterior samples. output_dir: str The directory to store result plots and files. variable_args : list A list of parameters to use instead of [variable_args]. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files. """ # default values tags = [] if tags is None else tags analysis_seg = workflow.analysis_time \ if analysis_seg is None else analysis_seg # make the directory that will contain the output files makedir(output_dir) # make a node for plotting the posterior as a corner plot node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, out_dir=output_dir, tags=tags).create_node() # add command line options node.add_input_opt("--input-file", inference_file) node.new_output_file_opt(analysis_seg, ".html", "--output-file") node.add_opt("--parameters", " ".join(variable_args)) # add node to workflow workflow += node return node.output_files
python
{ "resource": "" }
q32195
make_inference_acceptance_rate_plot
train
def make_inference_acceptance_rate_plot(workflow, inference_file, output_dir, name="inference_rate", analysis_seg=None, tags=None): """ Sets up the acceptance rate plot in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_file: pycbc.workflow.File The file with posterior samples. output_dir: str The directory to store result plots and files. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files. """ # default values tags = [] if tags is None else tags analysis_seg = workflow.analysis_time \ if analysis_seg is None else analysis_seg # make the directory that will contain the output files makedir(output_dir) # make a node for plotting the acceptance rate node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, out_dir=output_dir, tags=tags).create_node() # add command line options node.add_input_opt("--input-file", inference_file) node.new_output_file_opt(analysis_seg, ".png", "--output-file") # add node to workflow workflow += node return node.output_files
python
{ "resource": "" }
q32196
make_inference_inj_plots
train
def make_inference_inj_plots(workflow, inference_files, output_dir, parameters, name="inference_recovery", analysis_seg=None, tags=None): """ Sets up the recovered versus injected parameter plot in the workflow. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating inference_files: pycbc.workflow.FileList The files with posterior samples. output_dir: str The directory to store result plots and files. parameters : list A ``list`` of parameters. Each parameter gets its own plot. name: str The name in the [executables] section of the configuration file to use. analysis_segs: {None, ligo.segments.Segment} The segment this job encompasses. If None then use the total analysis time from the workflow. tags: {None, optional} Tags to add to the inference executables. Returns ------- pycbc.workflow.FileList A list of result and output files. """ # default values tags = [] if tags is None else tags analysis_seg = workflow.analysis_time \ if analysis_seg is None else analysis_seg output_files = FileList([]) # make the directory that will contain the output files makedir(output_dir) # add command line options for (ii, param) in enumerate(parameters): plot_exe = PlotExecutable(workflow.cp, name, ifos=workflow.ifos, out_dir=output_dir, tags=tags+['param{}'.format(ii)]) node = plot_exe.create_node() node.add_input_list_opt("--input-file", inference_files) node.new_output_file_opt(analysis_seg, ".png", "--output-file") node.add_opt("--parameters", param) workflow += node output_files += node.output_files return output_files
python
{ "resource": "" }
q32197
get_science_segments
train
def get_science_segments(workflow, out_dir, tags=None): """ Get the analyzable segments after applying ini specified vetoes. Parameters ----------- workflow : Workflow object Instance of the workflow object out_dir : path Location to store output files tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- sci_seg_file : workflow.core.SegFile instance The segment file combined from all ifos containing the science segments. sci_segs : Ifo keyed dict of ligo.segments.segmentlist instances The science segs for each ifo, keyed by ifo sci_seg_name : str The name with which science segs are stored in the output XML file. """ if tags is None: tags = [] logging.info('Starting generation of science segments') make_analysis_dir(out_dir) start_time = workflow.analysis_time[0] end_time = workflow.analysis_time[1] # NOTE: Should this be overrideable in the config file? sci_seg_name = "SCIENCE" sci_segs = {} sci_seg_dict = segments.segmentlistdict() sci_seg_summ_dict = segments.segmentlistdict() for ifo in workflow.ifos: curr_sci_segs, curr_sci_xml, curr_seg_name = get_sci_segs_for_ifo(ifo, workflow.cp, start_time, end_time, out_dir, tags) sci_seg_dict[ifo + ':' + sci_seg_name] = curr_sci_segs sci_segs[ifo] = curr_sci_segs sci_seg_summ_dict[ifo + ':' + sci_seg_name] = \ curr_sci_xml.seg_summ_dict[ifo + ':' + curr_seg_name] sci_seg_file = SegFile.from_segment_list_dict(sci_seg_name, sci_seg_dict, extension='xml', valid_segment=workflow.analysis_time, seg_summ_dict=sci_seg_summ_dict, directory=out_dir, tags=tags) logging.info('Done generating science segments') return sci_seg_file, sci_segs, sci_seg_name
python
{ "resource": "" }
q32198
get_files_for_vetoes
train
def get_files_for_vetoes(workflow, out_dir, runtime_names=None, in_workflow_names=None, tags=None): """ Get the various sets of veto segments that will be used in this analysis. Parameters ----------- workflow : Workflow object Instance of the workflow object out_dir : path Location to store output files runtime_names : list Veto category groups with these names in the [workflow-segment] section of the ini file will be generated now. in_workflow_names : list Veto category groups with these names in the [workflow-segment] section of the ini file will be generated in the workflow. If a veto category appears here and in runtime_names, it will be generated now. tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- veto_seg_files : FileList List of veto segment files generated """ if tags is None: tags = [] if runtime_names is None: runtime_names = [] if in_workflow_names is None: in_workflow_names = [] logging.info('Starting generating veto files for analysis') make_analysis_dir(out_dir) start_time = workflow.analysis_time[0] end_time = workflow.analysis_time[1] save_veto_definer(workflow.cp, out_dir, tags) now_cat_sets = [] for name in runtime_names: cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags( 'workflow-segments', name, tags)) now_cat_sets.extend(cat_sets) now_cats = set() for cset in now_cat_sets: now_cats = now_cats.union(cset) later_cat_sets = [] for name in in_workflow_names: cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags( 'workflow-segments', name, tags)) later_cat_sets.extend(cat_sets) later_cats = set() for cset in later_cat_sets: later_cats = later_cats.union(cset) # Avoid duplication later_cats = later_cats - now_cats veto_gen_job = create_segs_from_cats_job(workflow.cp, out_dir, workflow.ifo_string, tags=tags) cat_files = FileList() for ifo in workflow.ifos: for category in now_cats: cat_files.append(get_veto_segs(workflow, ifo, cat_to_veto_def_cat(category), start_time, end_time, out_dir, veto_gen_job, execute_now=True, tags=tags)) for category in later_cats: cat_files.append(get_veto_segs(workflow, ifo, cat_to_veto_def_cat(category), start_time, end_time, out_dir, veto_gen_job, tags=tags, execute_now=False)) logging.info('Done generating veto segments') return cat_files
python
{ "resource": "" }
q32199
get_cumulative_veto_group_files
train
def get_cumulative_veto_group_files(workflow, option, cat_files, out_dir, execute_now=True, tags=None): """ Get the cumulative veto files that define the different backgrounds we want to analyze, defined by groups of vetos. Parameters ----------- workflow : Workflow object Instance of the workflow object option : str ini file option to use to get the veto groups cat_files : FileList of SegFiles The category veto files generated by get_veto_segs out_dir : path Location to store output files execute_now : Boolean If true outputs are generated at runtime. Else jobs go into the workflow and are generated then. tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- seg_files : workflow.core.FileList instance The cumulative segment files for each veto group. names : list of strings The segment names for the corresponding seg_file cat_files : workflow.core.FileList instance The list of individual category veto files """ if tags is None: tags = [] logging.info("Starting generating vetoes for groups in %s" %(option)) make_analysis_dir(out_dir) cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('workflow-segments', option, tags)) cum_seg_files = FileList() names = [] for cat_set in cat_sets: segment_name = "CUMULATIVE_CAT_%s" % (''.join(sorted(cat_set))) logging.info('getting information for %s' % segment_name) categories = [cat_to_veto_def_cat(c) for c in cat_set] cum_seg_files += [get_cumulative_segs(workflow, categories, cat_files, out_dir, execute_now=execute_now, segment_name=segment_name, tags=tags)] names.append(segment_name) logging.info("Done generating vetoes for groups in %s" %(option)) return cum_seg_files, names, cat_files
python
{ "resource": "" }