signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def inverse_transform(self, maps):
out = {}<EOL>m1 = maps[parameters.mass1]<EOL>m2 = maps[parameters.mass2]<EOL>out[parameters.mchirp] = conversions.mchirp_from_mass1_mass2(m1, m2)<EOL>out[parameters.eta] = conversions.eta_from_mass1_mass2(m1, m2)<EOL>return self.format_output(maps, out)<EOL>
This function transforms from component masses to chirp mass and symmetric mass ratio. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy >>> from pycbc import transforms >>> t = transforms.MchirpQToMass1Mass2() >>> t.inverse_transform({'mass1': numpy.array([8.2]), 'mass2': numpy.array([8.2])}) {'mass1': array([ 8.2]), 'mass2': array([ 8.2]), 'mchirp': array([ 9.97717521]), 'eta': 0.25} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c3:m1
def jacobian(self, maps):
mchirp = maps[parameters.mchirp]<EOL>eta = maps[parameters.eta]<EOL>m1 = conversions.mass1_from_mchirp_eta(mchirp, eta)<EOL>m2 = conversions.mass2_from_mchirp_eta(mchirp, eta)<EOL>return mchirp * (m1 - m2) / (m1 + m2)**<NUM_LIT:3><EOL>
Returns the Jacobian for transforming mchirp and eta to mass1 and mass2.
f15991:c3:m2
def inverse_jacobian(self, maps):
m1 = maps[parameters.mass1]<EOL>m2 = maps[parameters.mass2]<EOL>mchirp = conversions.mchirp_from_mass1_mass2(m1, m2)<EOL>eta = conversions.eta_from_mass1_mass2(m1, m2)<EOL>return -<NUM_LIT:1.> * mchirp / eta**(<NUM_LIT>/<NUM_LIT:5>)<EOL>
Returns the Jacobian for transforming mass1 and mass2 to mchirp and eta.
f15991:c3:m3
def transform(self, maps):
out = {}<EOL>out[parameters.distance] =conversions.distance_from_chirp_distance_mchirp(<EOL>maps[parameters.chirp_distance],<EOL>maps[parameters.mchirp],<EOL>ref_mass=self.ref_mass)<EOL>return self.format_output(maps, out)<EOL>
This function transforms from chirp distance to luminosity distance, given the chirp mass. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy as np >>> from pycbc import transforms >>> t = transforms.ChirpDistanceToDistance() >>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])}) {'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c4:m1
def inverse_transform(self, maps):
out = {}<EOL>out[parameters.chirp_distance] =conversions.chirp_distance(maps[parameters.distance],<EOL>maps[parameters.mchirp], ref_mass=self.ref_mass)<EOL>return self.format_output(maps, out)<EOL>
This function transforms from luminosity distance to chirp distance, given the chirp mass. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy as np >>> from pycbc import transforms >>> t = transforms.ChirpDistanceToDistance() >>> t.inverse_transform({'distance': np.array([40.]), 'mchirp': np.array([1.2])}) {'distance': array([ 40.]), 'chirp_distance': array([ 40.52073522]), 'mchirp': array([ 1.2])} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c4:m2
def jacobian(self, maps):
ref_mass=<NUM_LIT><EOL>mchirp = maps['<STR_LIT>']<EOL>return (<NUM_LIT>**(-<NUM_LIT:1.>/<NUM_LIT:5>) * self.ref_mass / mchirp)**(-<NUM_LIT>/<NUM_LIT:6>)<EOL>
Returns the Jacobian for transforming chirp distance to luminosity distance, given the chirp mass.
f15991:c4:m3
def inverse_jacobian(self, maps):
ref_mass=<NUM_LIT><EOL>mchirp = maps['<STR_LIT>']<EOL>return (<NUM_LIT>**(-<NUM_LIT:1.>/<NUM_LIT:5>) * self.ref_mass / mchirp)**(<NUM_LIT>/<NUM_LIT:6>)<EOL>
Returns the Jacobian for transforming luminosity distance to chirp distance, given the chirp mass.
f15991:c4:m4
def transform(self, maps):
a, az, po = self._inputs<EOL>data = coordinates.spherical_to_cartesian(maps[a], maps[az], maps[po])<EOL>out = {param : val for param, val in zip(self._outputs, data)}<EOL>return self.format_output(maps, out)<EOL>
This function transforms from spherical to cartesian spins. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy >>> from pycbc import transforms >>> t = transforms.SphericalSpin1ToCartesianSpin1() >>> t.transform({'spin1_a': numpy.array([0.1]), 'spin1_azimuthal': numpy.array([0.1]), 'spin1_polar': numpy.array([0.1])}) {'spin1_a': array([ 0.1]), 'spin1_azimuthal': array([ 0.1]), 'spin1_polar': array([ 0.1]), 'spin2x': array([ 0.00993347]), 'spin2y': array([ 0.00099667]), 'spin2z': array([ 0.09950042])} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c5:m0
def inverse_transform(self, maps):
sx, sy, sz = self._outputs<EOL>data = coordinates.cartesian_to_spherical(maps[sx], maps[sy], maps[sz])<EOL>out = {param : val for param, val in zip(self._outputs, data)}<EOL>return self.format_output(maps, out)<EOL>
This function transforms from cartesian to spherical spins. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c5:m1
def transform(self, maps):
out = {parameters.redshift : cosmology.redshift(<EOL>maps[parameters.distance])}<EOL>return self.format_output(maps, out)<EOL>
This function transforms from distance to redshift. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy >>> from pycbc import transforms >>> t = transforms.DistanceToRedshift() >>> t.transform({'distance': numpy.array([1000])}) {'distance': array([1000]), 'redshift': 0.19650987609144363} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c7:m0
def transform(self, maps):
mass1 = maps[parameters.mass1]<EOL>mass2 = maps[parameters.mass2]<EOL>out = {}<EOL>out[parameters.spin1z] =conversions.spin1z_from_mass1_mass2_chi_eff_chi_a(<EOL>mass1, mass2,<EOL>maps[parameters.chi_eff], maps["<STR_LIT>"])<EOL>out[parameters.spin2z] =conversions.spin2z_from_mass1_mass2_chi_eff_chi_a(<EOL>mass1, mass2,<EOL>maps[parameters.chi_eff], maps["<STR_LIT>"])<EOL>return self.format_output(maps, out)<EOL>
This function transforms from aligned mass-weighted spins to cartesian spins aligned along the z-axis. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c8:m0
def inverse_transform(self, maps):
mass1 = maps[parameters.mass1]<EOL>spin1z = maps[parameters.spin1z]<EOL>mass2 = maps[parameters.mass2]<EOL>spin2z = maps[parameters.spin2z]<EOL>out = {<EOL>parameters.chi_eff : conversions.chi_eff(mass1, mass2,<EOL>spin1z, spin2z),<EOL>"<STR_LIT>" : conversions.chi_a(mass1, mass2, spin1z, spin2z),<EOL>}<EOL>return self.format_output(maps, out)<EOL>
This function transforms from component masses and cartesian spins to mass-weighted spin parameters aligned with the angular momentum. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c8:m1
def transform(self, maps):
<EOL>m_p = conversions.primary_mass(maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>m_s = conversions.secondary_mass(maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>xi_p = conversions.primary_spin(maps["<STR_LIT>"], maps["<STR_LIT>"],<EOL>maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>xi_s = conversions.secondary_spin(maps["<STR_LIT>"], maps["<STR_LIT>"],<EOL>maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>spinx_p = conversions.spin1x_from_xi1_phi_a_phi_s(<EOL>xi_p, maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>spiny_p = conversions.spin1y_from_xi1_phi_a_phi_s(<EOL>xi_p, maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>spinx_s = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s(<EOL>m_p, m_s, xi_s, maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>spiny_s = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s(<EOL>m_p, m_s, xi_s, maps["<STR_LIT>"], maps["<STR_LIT>"])<EOL>out = {}<EOL>if isinstance(m_p, numpy.ndarray):<EOL><INDENT>mass1, mass2 = map(numpy.array, [maps["<STR_LIT>"], maps["<STR_LIT>"]])<EOL>mask_mass1_gte_mass2 = mass1 >= mass2<EOL>mask_mass1_lt_mass2 = mass1 < mass2<EOL>out[parameters.spin1x] = numpy.concatenate((<EOL>spinx_p[mask_mass1_gte_mass2],<EOL>spinx_s[mask_mass1_lt_mass2]))<EOL>out[parameters.spin1y] = numpy.concatenate((<EOL>spiny_p[mask_mass1_gte_mass2],<EOL>spiny_s[mask_mass1_lt_mass2]))<EOL>out[parameters.spin2x] = numpy.concatenate((<EOL>spinx_p[mask_mass1_lt_mass2],<EOL>spinx_s[mask_mass1_gte_mass2]))<EOL>out[parameters.spin2y] = numpy.concatenate((<EOL>spinx_p[mask_mass1_lt_mass2],<EOL>spinx_s[mask_mass1_gte_mass2]))<EOL><DEDENT>elif maps["<STR_LIT>"] > maps["<STR_LIT>"]:<EOL><INDENT>out[parameters.spin1x] = spinx_p<EOL>out[parameters.spin1y] = spiny_p<EOL>out[parameters.spin2x] = spinx_s<EOL>out[parameters.spin2y] = spiny_s<EOL><DEDENT>else:<EOL><INDENT>out[parameters.spin1x] = spinx_s<EOL>out[parameters.spin1y] = spiny_s<EOL>out[parameters.spin2x] = spinx_p<EOL>out[parameters.spin2y] = spiny_p<EOL><DEDENT>return self.format_output(maps, out)<EOL>
This function transforms from mass-weighted spins to caretsian spins in the x-y plane. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c9:m0
def inverse_transform(self, maps):
<EOL>out = {}<EOL>xi1 = conversions.primary_xi(<EOL>maps[parameters.mass1], maps[parameters.mass2],<EOL>maps[parameters.spin1x], maps[parameters.spin1y],<EOL>maps[parameters.spin2x], maps[parameters.spin2y])<EOL>xi2 = conversions.secondary_xi(<EOL>maps[parameters.mass1], maps[parameters.mass2],<EOL>maps[parameters.spin1x], maps[parameters.spin1y],<EOL>maps[parameters.spin2x], maps[parameters.spin2y])<EOL>out["<STR_LIT>"] = conversions.phi_a(<EOL>maps[parameters.mass1], maps[parameters.mass2],<EOL>maps[parameters.spin1x], maps[parameters.spin1y],<EOL>maps[parameters.spin2x], maps[parameters.spin2y])<EOL>out["<STR_LIT>"] = conversions.phi_s(<EOL>maps[parameters.spin1x], maps[parameters.spin1y],<EOL>maps[parameters.spin2x], maps[parameters.spin2y])<EOL>if isinstance(xi1, numpy.ndarray):<EOL><INDENT>mass1, mass2 = map(numpy.array, [maps[parameters.mass1],<EOL>maps[parameters.mass2]])<EOL>mask_mass1_gte_mass2 = mass1 >= mass2<EOL>mask_mass1_lt_mass2 = mass1 < mass2<EOL>out["<STR_LIT>"] = numpy.concatenate((<EOL>xi1[mask_mass1_gte_mass2],<EOL>xi2[mask_mass1_lt_mass2]))<EOL>out["<STR_LIT>"] = numpy.concatenate((<EOL>xi1[mask_mass1_gte_mass2],<EOL>xi2[mask_mass1_lt_mass2]))<EOL><DEDENT>elif maps["<STR_LIT>"] > maps["<STR_LIT>"]:<EOL><INDENT>out["<STR_LIT>"] = xi1<EOL>out["<STR_LIT>"] = xi2<EOL><DEDENT>else:<EOL><INDENT>out["<STR_LIT>"] = xi2<EOL>out["<STR_LIT>"] = xi1<EOL><DEDENT>return self.format_output(maps, out)<EOL>
This function transforms from component masses and cartesian spins to mass-weighted spin parameters perpendicular with the angular momentum. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c9:m1
def transform(self, maps):
out = {}<EOL>out["<STR_LIT>"] = conversions.chi_p(<EOL>maps[parameters.mass1], maps[parameters.mass2],<EOL>maps[parameters.spin1x], maps[parameters.spin1y],<EOL>maps[parameters.spin2x], maps[parameters.spin2y])<EOL>return self.format_output(maps, out)<EOL>
This function transforms from component masses and caretsian spins to chi_p. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c10:m0
@property<EOL><INDENT>def inputvar(self):<DEDENT>
return self._inputvar<EOL>
Returns the input parameter.
f15991:c11:m1
@property<EOL><INDENT>def outputvar(self):<DEDENT>
return self._outputvar<EOL>
Returns the output parameter.
f15991:c11:m2
@property<EOL><INDENT>def bounds(self):<DEDENT>
return self._bounds<EOL>
Returns the domain of the input parameter.
f15991:c11:m3
@staticmethod<EOL><INDENT>def logit(x, a=<NUM_LIT:0.>, b=<NUM_LIT:1.>):<DEDENT>
return numpy.log(x-a) - numpy.log(b-x)<EOL>
r"""Computes the logit function with domain :math:`x \in (a, b)`. This is given by: .. math:: \mathrm{logit}(x; a, b) = \log\left(\frac{x-a}{b-x}\right). Note that this is also the inverse of the logistic function with range :math:`(a, b)`. Parameters ---------- x : float The value to evaluate. a : float, optional The minimum bound of the domain of x. Default is 0. b : float, optional The maximum bound of the domain of x. Default is 1. Returns ------- float The logit of x.
f15991:c11:m4
@staticmethod<EOL><INDENT>def logistic(x, a=<NUM_LIT:0.>, b=<NUM_LIT:1.>):<DEDENT>
expx = numpy.exp(x)<EOL>return (a + b*expx)/(<NUM_LIT:1.> + expx)<EOL>
r"""Computes the logistic function with range :math:`\in (a, b)`. This is given by: .. math:: \mathrm{logistic}(x; a, b) = \frac{a + b e^x}{1 + e^x}. Note that this is also the inverse of the logit function with domain :math:`(a, b)`. Parameters ---------- x : float The value to evaluate. a : float, optional The minimum bound of the range of the logistic function. Default is 0. b : float, optional The maximum bound of the range of the logistic function. Default is 1. Returns ------- float The logistic of x.
f15991:c11:m5
def transform(self, maps):
x = maps[self._inputvar]<EOL>isin = self._bounds.__contains__(x)<EOL>if isinstance(isin, numpy.ndarray):<EOL><INDENT>isin = isin.all()<EOL><DEDENT>if not isin:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>out = {self._outputvar : self.logit(x, self._a, self._b)}<EOL>return self.format_output(maps, out)<EOL>
r"""Computes :math:`\mathrm{logit}(x; a, b)`. The domain :math:`a, b` of :math:`x` are given by the class's bounds. Parameters ---------- maps : dict or FieldArray A dictionary or FieldArray which provides a map between the parameter name of the variable to transform and its value(s). Returns ------- out : dict or FieldArray A map between the transformed variable name and value(s), along with the original variable name and value(s).
f15991:c11:m6
def inverse_transform(self, maps):
y = maps[self._outputvar]<EOL>out = {self._inputvar : self.logistic(y, self._a, self._b)}<EOL>return self.format_output(maps, out)<EOL>
r"""Computes :math:`y = \mathrm{logistic}(x; a,b)`. The codomain :math:`a, b` of :math:`y` are given by the class's bounds. Parameters ---------- maps : dict or FieldArray A dictionary or FieldArray which provides a map between the parameter name of the variable to transform and its value(s). Returns ------- out : dict or FieldArray A map between the transformed variable name and value(s), along with the original variable name and value(s).
f15991:c11:m7
def jacobian(self, maps):
x = maps[self._inputvar]<EOL>isin = self._bounds.__contains__(x)<EOL>if isinstance(isin, numpy.ndarray) and not isin.all():<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>elif not isin:<EOL><INDENT>raise ValueError("<STR_LIT>".format(x))<EOL><DEDENT>return (self._b - self._a)/((x - self._a)*(self._b - x))<EOL>
r"""Computes the Jacobian of :math:`y = \mathrm{logit}(x; a,b)`. This is: .. math:: \frac{\mathrm{d}y}{\mathrm{d}x} = \frac{b -a}{(x-a)(b-x)}, where :math:`x \in (a, b)`. Parameters ---------- maps : dict or FieldArray A dictionary or FieldArray which provides a map between the parameter name of the variable to transform and its value(s). Returns ------- float The value of the jacobian at the given point(s).
f15991:c11:m8
def inverse_jacobian(self, maps):
x = maps[self._outputvar]<EOL>expx = numpy.exp(x)<EOL>return expx * (self._b - self._a) / (<NUM_LIT:1.> + expx)**<NUM_LIT><EOL>
r"""Computes the Jacobian of :math:`y = \mathrm{logistic}(x; a,b)`. This is: .. math:: \frac{\mathrm{d}y}{\mathrm{d}x} = \frac{e^x (b-a)}{(1+e^y)^2}, where :math:`y \in (a, b)`. Parameters ---------- maps : dict or FieldArray A dictionary or FieldArray which provides a map between the parameter name of the variable to transform and its value(s). Returns ------- float The value of the jacobian at the given point(s).
f15991:c11:m9
@classmethod<EOL><INDENT>def from_config(cls, cp, section, outputs, skip_opts=None,<EOL>additional_opts=None):<DEDENT>
<EOL>inputvar = cp.get_opt_tag(section, '<STR_LIT>', outputs)<EOL>s = '<STR_LIT:->'.join([section, outputs])<EOL>opt = '<STR_LIT>'.format(inputvar)<EOL>if skip_opts is None:<EOL><INDENT>skip_opts = []<EOL><DEDENT>if additional_opts is None:<EOL><INDENT>additional_opts = {}<EOL><DEDENT>else:<EOL><INDENT>additional_opts = additional_opts.copy()<EOL><DEDENT>if cp.has_option(s, opt):<EOL><INDENT>a = cp.get_opt_tag(section, opt, outputs)<EOL>skip_opts.append(opt)<EOL><DEDENT>else:<EOL><INDENT>a = None<EOL><DEDENT>opt = '<STR_LIT>'.format(inputvar)<EOL>if cp.has_option(s, opt):<EOL><INDENT>b = cp.get_opt_tag(section, opt, outputs)<EOL>skip_opts.append(opt)<EOL><DEDENT>else:<EOL><INDENT>b = None<EOL><DEDENT>if a is None and b is not None or b is None and a is not None:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>".format(inputvar, inputvar))<EOL><DEDENT>elif a is not None:<EOL><INDENT>additional_opts.update({'<STR_LIT>': (float(a), float(b))})<EOL><DEDENT>return super(Logit, cls).from_config(cp, section, outputs, skip_opts,<EOL>additional_opts)<EOL>
Initializes a Logit transform from the given section. The section must specify an input and output variable name. The domain of the input may be specified using `min-{input}`, `max-{input}`. Example: .. code-block:: ini [{section}-logitq] name = logit inputvar = q outputvar = logitq min-q = 1 max-q = 8 Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the transform options. section : str Name of the section in the configuration file. outputs : str The names of the parameters that are output by this transformation, separated by `VARARGS_DELIM`. These must appear in the "tag" part of the section header. skip_opts : list, optional Do not read options in the given list. additional_opts : dict, optional Any additional arguments to pass to the class. If an option is provided that also exists in the config file, the value provided will be used instead of being read from the file. Returns ------- cls An instance of the class.
f15991:c11:m10
def transform(self, maps):
sx, sy, sz = self._inputs<EOL>data = coordinates.cartesian_to_spherical(maps[sx], maps[sy], maps[sz])<EOL>out = {param : val for param, val in zip(self._outputs, data)}<EOL>return self.format_output(maps, out)<EOL>
This function transforms from cartesian to spherical spins. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c15:m0
def inverse_transform(self, maps):
a, az, po = self._outputs<EOL>data = coordinates.spherical_to_cartesian(maps[a], maps[az], maps[po])<EOL>out = {param : val for param, val in zip(self._outputs, data)}<EOL>return self.format_output(maps, out)<EOL>
This function transforms from spherical to cartesian spins. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
f15991:c15:m1
@property<EOL><INDENT>def bounds(self):<DEDENT>
return self._bounds<EOL>
Returns the range of the output parameter.
f15991:c20:m1
@classmethod<EOL><INDENT>def from_config(cls, cp, section, outputs, skip_opts=None,<EOL>additional_opts=None):<DEDENT>
<EOL>outputvar = cp.get_opt_tag(section, '<STR_LIT>', outputs)<EOL>if skip_opts is None:<EOL><INDENT>skip_opts = []<EOL><DEDENT>if additional_opts is None:<EOL><INDENT>additional_opts = {}<EOL><DEDENT>else:<EOL><INDENT>additional_opts = additional_opts.copy()<EOL><DEDENT>s = '<STR_LIT:->'.join([section, outputs])<EOL>opt = '<STR_LIT>'.format(outputvar)<EOL>if cp.has_option(s, opt):<EOL><INDENT>a = cp.get_opt_tag(section, opt, outputs)<EOL>skip_opts.append(opt)<EOL><DEDENT>else:<EOL><INDENT>a = None<EOL><DEDENT>opt = '<STR_LIT>'.format(outputvar)<EOL>if cp.has_option(s, opt):<EOL><INDENT>b = cp.get_opt_tag(section, opt, outputs)<EOL>skip_opts.append(opt)<EOL><DEDENT>else:<EOL><INDENT>b = None<EOL><DEDENT>if a is None and b is not None or b is None and a is not None:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>".format(outputvar, outputvar))<EOL><DEDENT>elif a is not None:<EOL><INDENT>additional_opts.update({'<STR_LIT>': (float(a), float(b))})<EOL><DEDENT>return super(Logistic, cls).from_config(cp, section, outputs,<EOL>skip_opts, additional_opts)<EOL>
Initializes a Logistic transform from the given section. The section must specify an input and output variable name. The codomain of the output may be specified using `min-{output}`, `max-{output}`. Example: .. code-block:: ini [{section}-q] name = logistic inputvar = logitq outputvar = q min-q = 1 max-q = 8 Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the transform options. section : str Name of the section in the configuration file. outputs : str The names of the parameters that are output by this transformation, separated by `VARARGS_DELIM`. These must appear in the "tag" part of the section header. skip_opts : list, optional Do not read options in the given list. additional_opts : dict, optional Any additional arguments to pass to the class. If an option is provided that also exists in the config file, the value provided will be used instead of being read from the file. Returns ------- cls An instance of the class.
f15991:c20:m2
def select_splitfilejob_instance(curr_exe):
if curr_exe == '<STR_LIT>':<EOL><INDENT>exe_class = PycbcSplitBankExecutable<EOL><DEDENT>elif curr_exe == '<STR_LIT>':<EOL><INDENT>exe_class = PycbcSplitBankXmlExecutable<EOL><DEDENT>elif curr_exe == '<STR_LIT>':<EOL><INDENT>exe_class = PycbcSplitInspinjExecutable<EOL><DEDENT>else:<EOL><INDENT>err_string = "<STR_LIT>" %(curr_exe,)<EOL>raise NotImplementedError(err_string)<EOL><DEDENT>return exe_class<EOL>
This function returns an instance of the class that is appropriate for splitting an output file up within workflow (for e.g. splitbank). Parameters ---------- curr_exe : string The name of the Executable that is being used. curr_section : string The name of the section storing options for this executble Returns -------- exe class : sub-class of pycbc.workflow.core.Executable The class that holds the utility functions appropriate for the given Executable. This class **must** contain * exe_class.create_job() and the job returned by this **must** contain * job.create_node()
f15992:m0
def setup_splittable_workflow(workflow, input_tables, out_dir=None, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>make_analysis_dir(out_dir)<EOL>splitMethod = workflow.cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>if splitMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>split_table_outs = setup_splittable_dax_generated(workflow,<EOL>input_tables, out_dir, tags)<EOL><DEDENT>elif splitMethod == "<STR_LIT>":<EOL><INDENT>split_table_outs = input_tables<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>return split_table_outs<EOL>
This function aims to be the gateway for code that is responsible for taking some input file containing some table, and splitting into multiple files containing different parts of that table. For now the only supported operation is using lalapps_splitbank to split a template bank xml file into multiple template bank xml files. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job.
f15992:m1
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags):
cp = workflow.cp<EOL>try:<EOL><INDENT>num_splits = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL><DEDENT>except BaseException:<EOL><INDENT>inj_interval = int(cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags))<EOL>if cp.has_option_tags("<STR_LIT>", "<STR_LIT>", tags) andcp.has_option("<STR_LIT>", "<STR_LIT>"):<EOL><INDENT>num_injs = int(cp.get_opt_tags("<STR_LIT>", "<STR_LIT>",<EOL>tags))<EOL><DEDENT>else:<EOL><INDENT>num_injs = int(cp.get_opt_tags("<STR_LIT>", "<STR_LIT>",<EOL>tags))<EOL><DEDENT>inj_tspace = float(abs(workflow.analysis_time)) / num_injs<EOL>num_splits = int(inj_interval // inj_tspace) + <NUM_LIT:1><EOL><DEDENT>split_exe_tag = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>split_exe = os.path.basename(cp.get("<STR_LIT>", split_exe_tag))<EOL>exe_class = select_splitfilejob_instance(split_exe)<EOL>out_file_groups = FileList([])<EOL>curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits,<EOL>out_dir=out_dir)<EOL>for input in input_tables:<EOL><INDENT>node = curr_exe_job.create_node(input, tags=tags)<EOL>workflow.add_node(node)<EOL>out_file_groups += node.output_files<EOL><DEDENT>return out_file_groups<EOL>
Function for setting up the splitting jobs as part of the workflow. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the jobs will be added to. input_tables : pycbc.workflow.core.FileList The input files to be split up. out_dir : path The directory in which output will be written. Returns -------- split_table_outs : pycbc.workflow.core.FileList The list of split up files as output from this job.
f15992:m2
def setup_psd_workflow(workflow, science_segs, datafind_outs,<EOL>output_dir=None, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>make_analysis_dir(output_dir)<EOL>cp = workflow.cp<EOL>try:<EOL><INDENT>psdMethod = cp.get_opt_tags("<STR_LIT>", "<STR_LIT>",<EOL>tags)<EOL><DEDENT>except:<EOL><INDENT>return FileList([])<EOL><DEDENT>if psdMethod == "<STR_LIT>":<EOL><INDENT>logging.info("<STR_LIT>")<EOL>psd_files = setup_psd_pregenerated(workflow, tags=tags)<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>return psd_files<EOL>
Setup static psd section of CBC workflow. At present this only supports pregenerated psd files, in the future these could be created within the workflow. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. science_segs : Keyed dictionary of glue.segmentlist objects scienceSegs[ifo] holds the science segments to be analysed for each ifo. datafind_outs : pycbc.workflow.core.FileList The file list containing the datafind files. output_dir : path string The directory where data products will be placed. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the psd files, 0 or 1 per ifo
f15993:m0
def setup_psd_pregenerated(workflow, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>psd_files = FileList([])<EOL>cp = workflow.cp<EOL>global_seg = workflow.analysis_time<EOL>user_tag = "<STR_LIT>"<EOL>try:<EOL><INDENT>pre_gen_file = cp.get_opt_tags('<STR_LIT>',<EOL>'<STR_LIT>', tags)<EOL>pre_gen_file = resolve_url(pre_gen_file)<EOL>file_url = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(pre_gen_file))<EOL>curr_file = File(workflow.ifos, user_tag, global_seg, file_url,<EOL>tags=tags)<EOL>curr_file.PFN(file_url, site='<STR_LIT>')<EOL>psd_files.append(curr_file)<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>for ifo in workflow.ifos:<EOL><INDENT>try:<EOL><INDENT>pre_gen_file = cp.get_opt_tags('<STR_LIT>',<EOL>'<STR_LIT>' % ifo.lower(),<EOL>tags)<EOL>pre_gen_file = resolve_url(pre_gen_file)<EOL>file_url = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(pre_gen_file))<EOL>curr_file = File(ifo, user_tag, global_seg, file_url,<EOL>tags=tags)<EOL>curr_file.PFN(file_url, site='<STR_LIT>')<EOL>psd_files.append(curr_file)<EOL><DEDENT>except ConfigParser.Error:<EOL><INDENT>logging.warn("<STR_LIT>" % (ifo,))<EOL>pass<EOL><DEDENT><DEDENT><DEDENT>return psd_files<EOL>
Setup CBC workflow to use pregenerated psd files. The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank and pycbc_plot_psd_file. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the gating files
f15993:m1
def int_gps_time_to_str(t):
if isinstance(t, int):<EOL><INDENT>return str(t)<EOL><DEDENT>elif isinstance(t, float):<EOL><INDENT>int_t = int(t)<EOL>if abs(t - int_t) > <NUM_LIT:0.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % str(t))<EOL><DEDENT>return str(int_t)<EOL><DEDENT>elif isinstance(t, lal.LIGOTimeGPS):<EOL><INDENT>if t.gpsNanoSeconds == <NUM_LIT:0>:<EOL><INDENT>return str(t.gpsSeconds)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % str(t))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>err_msg = "<STR_LIT>".format(type(t))<EOL>raise ValueError(err_msg)<EOL><DEDENT>
Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.
f15994:m0
def select_tmpltbank_class(curr_exe):
exe_to_class_map = {<EOL>'<STR_LIT>' : PyCBCTmpltbankExecutable,<EOL>'<STR_LIT>': PyCBCTmpltbankExecutable<EOL>}<EOL>try:<EOL><INDENT>return exe_to_class_map[curr_exe]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>" % curr_exe)<EOL><DEDENT>
This function returns a class that is appropriate for setting up template bank jobs within workflow. Parameters ---------- curr_exe : string The name of the executable to be used for generating template banks. Returns -------- exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility functions appropriate for the given executable. Instances of the class ('jobs') **must** have methods * job.create_node() and * job.get_valid_times(ifo, )
f15994:m1
def select_matchedfilter_class(curr_exe):
exe_to_class_map = {<EOL>'<STR_LIT>' : PyCBCInspiralExecutable,<EOL>'<STR_LIT>' : PyCBCInspiralExecutable,<EOL>'<STR_LIT>' : PyCBCMultiInspiralExecutable,<EOL>}<EOL>try:<EOL><INDENT>return exe_to_class_map[curr_exe]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>" % curr_exe)<EOL><DEDENT>
This function returns a class that is appropriate for setting up matched-filtering jobs within workflow. Parameters ---------- curr_exe : string The name of the matched filter executable to be used. Returns -------- exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility functions appropriate for the given executable. Instances of the class ('jobs') **must** have methods * job.create_node() and * job.get_valid_times(ifo, )
f15994:m2
def select_generic_executable(workflow, exe_tag):
exe_path = workflow.cp.get("<STR_LIT>", exe_tag)<EOL>exe_name = os.path.basename(exe_path)<EOL>exe_to_class_map = {<EOL>'<STR_LIT>' : LigolwAddExecutable,<EOL>'<STR_LIT>' : LigolwSSthincaExecutable,<EOL>'<STR_LIT>' : PycbcSqliteSimplifyExecutable,<EOL>'<STR_LIT>': SQLInOutExecutable,<EOL>'<STR_LIT>' : SQLInOutExecutable,<EOL>'<STR_LIT>' : SQLInOutExecutable,<EOL>'<STR_LIT>' : SQLInOutExecutable,<EOL>'<STR_LIT>' : LalappsInspinjExecutable,<EOL>'<STR_LIT>' : PycbcDarkVsBrightInjectionsExecutable,<EOL>'<STR_LIT>' : PycbcTimeslidesExecutable,<EOL>'<STR_LIT>' : ComputeDurationsExecutable,<EOL>'<STR_LIT>' : PycbcCalculateFarExecutable,<EOL>"<STR_LIT>" : SQLInOutExecutable,<EOL>"<STR_LIT>" : ExtractToXMLExecutable,<EOL>"<STR_LIT>" : InspinjfindExecutable,<EOL>"<STR_LIT>" : PycbcPickleHorizonDistsExecutable,<EOL>"<STR_LIT>" : PycbcCombineLikelihoodExecutable,<EOL>"<STR_LIT>" : PycbcGenerateRankingDataExecutable,<EOL>"<STR_LIT>" : PycbcCalculateLikelihoodExecutable,<EOL>"<STR_LIT>" : GstlalMarginalizeLikelihoodExecutable,<EOL>"<STR_LIT>" : GstlalFarfromsnrchisqhistExecutable,<EOL>"<STR_LIT>" : GstlalPlotSensitivity,<EOL>"<STR_LIT>" : GstlalPlotBackground,<EOL>"<STR_LIT>" : GstlalPlotSummary,<EOL>"<STR_LIT>" : GstlalSummaryPage,<EOL>"<STR_LIT>" : PycbcConditionStrainExecutable<EOL>}<EOL>try:<EOL><INDENT>return exe_to_class_map[exe_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>" % exe_name)<EOL><DEDENT>
Returns a class that is appropriate for setting up jobs to run executables having specific tags in the workflow config. Executables should not be "specialized" jobs fitting into one of the select_XXX_class functions above, i.e. not a matched filter or template bank job, which require extra setup. Parameters ---------- workflow : pycbc.workflow.core.Workflow The Workflow instance. exe_tag : string The name of the config section storing options for this executable and the option giving the executable path in the [executables] section. Returns -------- exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility functions appropriate for the given executable. Instances of the class ('jobs') **must** have a method job.create_node()
f15994:m3
def sngl_ifo_job_setup(workflow, ifo, out_files, curr_exe_job, science_segs,<EOL>datafind_outs, parents=None,<EOL>link_job_instance=None, allow_overlap=True,<EOL>compatibility_mode=True):
if compatibility_mode and not link_job_instance:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>data_length, valid_chunk, valid_length = identify_needed_data(curr_exe_job,<EOL>link_job_instance=link_job_instance)<EOL>for curr_seg in science_segs:<EOL><INDENT>segmenter = JobSegmenter(data_length, valid_chunk, valid_length,<EOL>curr_seg, curr_exe_job,<EOL>compatibility_mode=compatibility_mode)<EOL>for job_num in range(segmenter.num_jobs):<EOL><INDENT>job_valid_seg = segmenter.get_valid_times_for_job(job_num,<EOL>allow_overlap=allow_overlap)<EOL>job_data_seg = segmenter.get_data_times_for_job(job_num)<EOL>if parents:<EOL><INDENT>curr_parent = parents.find_outputs_in_range(ifo, job_valid_seg,<EOL>useSplitLists=True)<EOL>if not curr_parent:<EOL><INDENT>err_string = ("<STR_LIT>"<EOL>%(job_valid_seg[<NUM_LIT:0>], job_valid_seg[<NUM_LIT:1>]))<EOL>err_string += "<STR_LIT>"<EOL>raise ValueError(err_string)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>curr_parent = [None]<EOL><DEDENT>curr_dfouts = None<EOL>if datafind_outs:<EOL><INDENT>curr_dfouts = datafind_outs.find_all_output_in_range(ifo,<EOL>job_data_seg, useSplitLists=True)<EOL>if not curr_dfouts:<EOL><INDENT>err_str = ("<STR_LIT>"<EOL>%(job_data_seg[<NUM_LIT:0>],job_data_seg[<NUM_LIT:1>]))<EOL>err_str += "<STR_LIT>"<EOL>raise ValueError(err_str)<EOL><DEDENT><DEDENT>sorted_parents = sorted(curr_parent,<EOL>key=lambda fobj: fobj.tagged_description)<EOL>for pnum, parent in enumerate(sorted_parents):<EOL><INDENT>if len(curr_parent) != <NUM_LIT:1>:<EOL><INDENT>tag = ["<STR_LIT>" %(pnum,)]<EOL><DEDENT>else:<EOL><INDENT>tag = []<EOL><DEDENT>node = curr_exe_job.create_node(job_data_seg, job_valid_seg,<EOL>parent=parent,<EOL>dfParents=curr_dfouts,<EOL>tags=tag)<EOL>workflow.add_node(node)<EOL>curr_out_files = node.output_files<EOL>curr_out_files = [i for i in curr_out_files if '<STR_LIT>'not in i.tags]<EOL>out_files += curr_out_files<EOL><DEDENT><DEDENT><DEDENT>return out_files<EOL>
This function sets up a set of single ifo jobs. A basic overview of how this works is as follows: * (1) Identify the length of data that each job needs to read in, and what part of that data the job is valid for. * START LOOPING OVER SCIENCE SEGMENTS * (2) Identify how many jobs are needed (if any) to cover the given science segment and the time shift between jobs. If no jobs continue. * START LOOPING OVER JOBS * (3) Identify the time that the given job should produce valid output (ie. inspiral triggers) over. * (4) Identify the data range that the job will need to read in to produce the aforementioned valid output. * (5) Identify all parents/inputs of the job. * (6) Add the job to the workflow * END LOOPING OVER JOBS * END LOOPING OVER SCIENCE SEGMENTS Parameters ----------- workflow: pycbc.workflow.core.Workflow An instance of the Workflow class that manages the constructed workflow. ifo : string The name of the ifo to set up the jobs for out_files : pycbc.workflow.core.FileList The FileList containing the list of jobs. Jobs will be appended to this list, and it does not need to be empty when supplied. curr_exe_job : Job An instanced of the Job class that has a get_valid times method. science_segs : ligo.segments.segmentlist The list of times that the jobs should cover datafind_outs : pycbc.workflow.core.FileList The file list containing the datafind files. parents : pycbc.workflow.core.FileList (optional, kwarg, default=None) The FileList containing the list of jobs that are parents to the one being set up. link_job_instance : Job instance (optional), Coordinate the valid times with another Executable. allow_overlap : boolean (optional, kwarg, default = True) If this is set the times that jobs are valid for will be allowed to overlap. This may be desired for template banks which may have some overlap in the times they cover. This may not be desired for inspiral jobs, where you probably want triggers recorded by jobs to not overlap at all. compatibility_mode : boolean (optional, kwarg, default = False) If given the jobs will be tiled in the same method as used in inspiral hipe. This requires that link_job_instance is also given. If not given workflow's methods are used. Returns -------- out_files : pycbc.workflow.core.FileList A list of the files that will be generated by this step in the workflow.
f15994:m4
def multi_ifo_coherent_job_setup(workflow, out_files, curr_exe_job,<EOL>science_segs, datafind_outs, output_dir,<EOL>parents=None, slide_dict=None, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>data_seg, job_valid_seg = curr_exe_job.get_valid_times()<EOL>curr_out_files = FileList([])<EOL>if '<STR_LIT>' in datafind_outs[-<NUM_LIT:1>].descriptionand '<STR_LIT>' in datafind_outs[-<NUM_LIT:2>].description:<EOL><INDENT>ipn_sky_points = datafind_outs[-<NUM_LIT:1>]<EOL>bank_veto = datafind_outs[-<NUM_LIT:2>]<EOL>frame_files = datafind_outs[:-<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>ipn_sky_points = None<EOL>bank_veto = datafind_outs[-<NUM_LIT:1>]<EOL>frame_files = datafind_outs[:-<NUM_LIT:1>]<EOL><DEDENT>split_bank_counter = <NUM_LIT:0><EOL>if curr_exe_job.injection_file is None:<EOL><INDENT>for split_bank in parents:<EOL><INDENT>tag = list(tags)<EOL>tag.append(split_bank.tag_str)<EOL>node = curr_exe_job.create_node(data_seg, job_valid_seg,<EOL>parent=split_bank, dfParents=frame_files,<EOL>bankVetoBank=bank_veto, ipn_file=ipn_sky_points,<EOL>slide=slide_dict, tags=tag)<EOL>workflow.add_node(node)<EOL>split_bank_counter += <NUM_LIT:1><EOL>curr_out_files.extend(node.output_files)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for inj_file in curr_exe_job.injection_file:<EOL><INDENT>for split_bank in parents:<EOL><INDENT>tag = list(tags)<EOL>tag.append(inj_file.tag_str)<EOL>tag.append(split_bank.tag_str)<EOL>node = curr_exe_job.create_node(data_seg, job_valid_seg,<EOL>parent=split_bank, inj_file=inj_file, tags=tag,<EOL>dfParents=frame_files, bankVetoBank=bank_veto,<EOL>ipn_file=ipn_sky_points)<EOL>workflow.add_node(node)<EOL>split_bank_counter += <NUM_LIT:1><EOL>curr_out_files.extend(node.output_files)<EOL><DEDENT><DEDENT><DEDENT>curr_out_files = [i for i in curr_out_files if '<STR_LIT>'not in i.tags]<EOL>out_files += curr_out_files<EOL>return out_files<EOL>
Method for setting up coherent inspiral jobs.
f15994:m5
def identify_needed_data(curr_exe_job, link_job_instance=None):
<EOL>data_lengths, valid_chunks = curr_exe_job.get_valid_times()<EOL>valid_lengths = [abs(valid_chunk) for valid_chunk in valid_chunks]<EOL>if link_job_instance:<EOL><INDENT>link_data_length, link_valid_chunk = link_job_instance.get_valid_times()<EOL>if len(link_data_length) > <NUM_LIT:1> or len(valid_lengths) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>start_data_loss = max(valid_chunks[<NUM_LIT:0>][<NUM_LIT:0>], link_valid_chunk[<NUM_LIT:0>][<NUM_LIT:0>])<EOL>end_data_loss = max(data_lengths[<NUM_LIT:0>] - valid_chunks[<NUM_LIT:0>][<NUM_LIT:1>],link_data_length[<NUM_LIT:0>] - link_valid_chunk[<NUM_LIT:0>][<NUM_LIT:1>])<EOL>valid_chunks[<NUM_LIT:0>] = segments.segment(start_data_loss,data_lengths[<NUM_LIT:0>] - end_data_loss)<EOL>link_valid_chunk = segments.segment(start_data_loss,link_data_length[<NUM_LIT:0>] - end_data_loss)<EOL>link_valid_length = abs(link_valid_chunk)<EOL>if link_valid_length < valid_lengths[<NUM_LIT:0>]:<EOL><INDENT>valid_lengths[<NUM_LIT:0>] = link_valid_length<EOL><DEDENT><DEDENT>return data_lengths, valid_chunks, valid_lengths<EOL>
This function will identify the length of data that a specific executable needs to analyse and what part of that data is valid (ie. inspiral doesn't analyse the first or last 64+8s of data it reads in). In addition you can supply a second job instance to "link" to, which will ensure that the two jobs will have a one-to-one correspondence (ie. one template bank per one matched-filter job) and the corresponding jobs will be "valid" at the same times. Parameters ----------- curr_exe_job : Job An instance of the Job class that has a get_valid times method. link_job_instance : Job instance (optional), Coordinate the valid times with another executable. Returns -------- dataLength : float The amount of data (in seconds) that each instance of the job must read in. valid_chunk : glue.segment.segment The times within dataLength for which that jobs output **can** be valid (ie. for inspiral this is (72, dataLength-72) as, for a standard setup the inspiral job cannot look for triggers in the first 72 or last 72 seconds of data read in.) valid_length : float The maximum length of data each job can be valid for. If not using link_job_instance this is abs(valid_segment), but can be smaller than that if the linked job only analyses a small amount of data (for e.g.).
f15994:m6
def __init__(self, data_lengths, valid_chunks, valid_lengths, curr_seg,<EOL>curr_exe_class, compatibility_mode = False):
self.exe_class = curr_exe_class<EOL>self.curr_seg = curr_seg<EOL>self.curr_seg_length = float(abs(curr_seg))<EOL>self.data_length, self.valid_chunk, self.valid_length =self.pick_tile_size(self.curr_seg_length, data_lengths,<EOL>valid_chunks, valid_lengths)<EOL>self.data_chunk = segments.segment([<NUM_LIT:0>, self.data_length])<EOL>self.data_loss = self.data_length - abs(self.valid_chunk)<EOL>if self.data_loss < <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if self.curr_seg_length < self.data_length:<EOL><INDENT>self.num_jobs = <NUM_LIT:0><EOL>return<EOL><DEDENT>self.num_jobs = int( math.ceil( (self.curr_seg_length- self.data_loss) / float(self.valid_length) ))<EOL>self.compatibility_mode = compatibility_mode<EOL>if compatibility_mode and (self.valid_length != abs(self.valid_chunk)):<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>print(self.valid_length, self.valid_chunk)<EOL>raise ValueError(errMsg)<EOL><DEDENT>elif compatibility_mode and len(data_lengths) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>elif compatibility_mode:<EOL><INDENT>self.job_time_shift = self.valid_length<EOL><DEDENT>elif self.curr_seg_length == self.data_length:<EOL><INDENT>self.job_time_shift = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>self.job_time_shift = (self.curr_seg_length - self.data_length) /float(self.num_jobs - <NUM_LIT:1>)<EOL><DEDENT>
Initialize class.
f15994:c0:m0
def pick_tile_size(self, seg_size, data_lengths, valid_chunks, valid_lengths):
if len(valid_lengths) == <NUM_LIT:1>:<EOL><INDENT>return data_lengths[<NUM_LIT:0>], valid_chunks[<NUM_LIT:0>], valid_lengths[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>target_size = seg_size / <NUM_LIT:3><EOL>pick, pick_diff = <NUM_LIT:0>, abs(valid_lengths[<NUM_LIT:0>] - target_size)<EOL>for i, size in enumerate(valid_lengths):<EOL><INDENT>if abs(size - target_size) < pick_diff:<EOL><INDENT>pick, pick_diff = i, abs(size - target_size)<EOL><DEDENT><DEDENT>return data_lengths[pick], valid_chunks[pick], valid_lengths[pick]<EOL><DEDENT>
Choose job tiles size based on science segment length
f15994:c0:m1
def get_valid_times_for_job(self, num_job, allow_overlap=True):
if self.compatibility_mode:<EOL><INDENT>return self.get_valid_times_for_job_legacy(num_job)<EOL><DEDENT>else:<EOL><INDENT>return self.get_valid_times_for_job_workflow(num_job,<EOL>allow_overlap=allow_overlap)<EOL><DEDENT>
Get the times for which this job is valid.
f15994:c0:m2
def get_valid_times_for_job_workflow(self, num_job, allow_overlap=True):
<EOL>shift_dur = self.curr_seg[<NUM_LIT:0>] + int(self.job_time_shift * num_job+ <NUM_LIT>)<EOL>job_valid_seg = self.valid_chunk.shift(shift_dur)<EOL>if not allow_overlap:<EOL><INDENT>data_per_job = (self.curr_seg_length - self.data_loss) /float(self.num_jobs)<EOL>lower_boundary = num_job*data_per_job +self.valid_chunk[<NUM_LIT:0>] + self.curr_seg[<NUM_LIT:0>]<EOL>upper_boundary = data_per_job + lower_boundary<EOL>lower_boundary = int(lower_boundary)<EOL>upper_boundary = int(upper_boundary + <NUM_LIT>)<EOL>if lower_boundary < job_valid_seg[<NUM_LIT:0>] orupper_boundary > job_valid_seg[<NUM_LIT:1>]:<EOL><INDENT>err_msg = ("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>raise ValueError(err_msg)<EOL><DEDENT>job_valid_seg = segments.segment([lower_boundary,<EOL>upper_boundary])<EOL><DEDENT>return job_valid_seg<EOL>
Get the times for which the job num_job will be valid, using workflow's method.
f15994:c0:m3
def get_valid_times_for_job_legacy(self, num_job):
<EOL>shift_dur = self.curr_seg[<NUM_LIT:0>] + int(self.job_time_shift * num_job)<EOL>job_valid_seg = self.valid_chunk.shift(shift_dur)<EOL>if num_job == (self.num_jobs - <NUM_LIT:1>):<EOL><INDENT>dataPushBack = self.data_length - self.valid_chunk[<NUM_LIT:1>]<EOL>job_valid_seg = segments.segment(job_valid_seg[<NUM_LIT:0>],<EOL>self.curr_seg[<NUM_LIT:1>] - dataPushBack)<EOL><DEDENT>return job_valid_seg<EOL>
Get the times for which the job num_job will be valid, using the method use in inspiral hipe.
f15994:c0:m4
def get_data_times_for_job(self, num_job):
if self.compatibility_mode:<EOL><INDENT>job_data_seg = self.get_data_times_for_job_legacy(num_job)<EOL><DEDENT>else:<EOL><INDENT>job_data_seg = self.get_data_times_for_job_workflow(num_job)<EOL><DEDENT>if num_job == <NUM_LIT:0>:<EOL><INDENT>if job_data_seg[<NUM_LIT:0>] != self.curr_seg[<NUM_LIT:0>]:<EOL><INDENT>err= "<STR_LIT>"<EOL>err += "<STR_LIT>"<EOL>raise ValueError(err)<EOL><DEDENT><DEDENT>if num_job == (self.num_jobs - <NUM_LIT:1>):<EOL><INDENT>if job_data_seg[<NUM_LIT:1>] != self.curr_seg[<NUM_LIT:1>]:<EOL><INDENT>err = "<STR_LIT>"<EOL>err += "<STR_LIT>"<EOL>raise ValueError(err)<EOL><DEDENT><DEDENT>if hasattr(self.exe_class, '<STR_LIT>'):<EOL><INDENT>job_data_seg = self.exe_class.zero_pad_data_extend(job_data_seg,<EOL>self.curr_seg)<EOL><DEDENT>return job_data_seg<EOL>
Get the data that this job will read in.
f15994:c0:m5
def get_data_times_for_job_workflow(self, num_job):
<EOL>shift_dur = self.curr_seg[<NUM_LIT:0>] + int(self.job_time_shift * num_job+ <NUM_LIT>)<EOL>job_data_seg = self.data_chunk.shift(shift_dur)<EOL>return job_data_seg<EOL>
Get the data that this job will need to read in.
f15994:c0:m6
def get_data_times_for_job_legacy(self, num_job):
<EOL>shift_dur = self.curr_seg[<NUM_LIT:0>] + int(self.job_time_shift * num_job)<EOL>job_data_seg = self.data_chunk.shift(shift_dur)<EOL>if num_job == (self.num_jobs - <NUM_LIT:1>):<EOL><INDENT>dataPushBack = job_data_seg[<NUM_LIT:1>] - self.curr_seg[<NUM_LIT:1>]<EOL>assert dataPushBack >= <NUM_LIT:0><EOL>job_data_seg = segments.segment(job_data_seg[<NUM_LIT:0>] - dataPushBack,<EOL>self.curr_seg[<NUM_LIT:1>])<EOL>assert (abs(job_data_seg) == self.data_length)<EOL><DEDENT>return job_data_seg<EOL>
Get the data that this job will need to read in.
f15994:c0:m7
def get_valid_times(self):
if self.cp.has_option('<STR_LIT>',<EOL>'<STR_LIT>'):<EOL><INDENT>min_analysis_segs = int(self.cp.get('<STR_LIT>',<EOL>'<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>min_analysis_segs = <NUM_LIT:0><EOL><DEDENT>if self.cp.has_option('<STR_LIT>',<EOL>'<STR_LIT>'):<EOL><INDENT>max_analysis_segs = int(self.cp.get('<STR_LIT>',<EOL>'<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>max_analysis_segs = <NUM_LIT:1000><EOL><DEDENT>if self.cp.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>min_analysis_length = int(self.cp.get('<STR_LIT>',<EOL>'<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>min_analysis_length = <NUM_LIT:0><EOL><DEDENT>if self.cp.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>max_analysis_length = int(self.cp.get('<STR_LIT>',<EOL>'<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>max_analysis_length = <NUM_LIT><EOL><DEDENT>segment_length = int(self.get_opt('<STR_LIT>'))<EOL>pad_data = <NUM_LIT:0><EOL>if self.has_opt('<STR_LIT>'):<EOL><INDENT>pad_data += int(self.get_opt('<STR_LIT>'))<EOL><DEDENT>if self.has_opt('<STR_LIT>'):<EOL><INDENT>self.zero_padding=True<EOL><DEDENT>else:<EOL><INDENT>self.zero_padding=False<EOL><DEDENT>start_pad = int(self.get_opt( '<STR_LIT>'))<EOL>end_pad = int(self.get_opt('<STR_LIT>'))<EOL>seg_ranges = range(min_analysis_segs, max_analysis_segs + <NUM_LIT:1>)<EOL>data_lengths = []<EOL>valid_regions = []<EOL>for nsegs in seg_ranges:<EOL><INDENT>analysis_length = (segment_length - start_pad - end_pad) * nsegs<EOL>if not self.zero_padding:<EOL><INDENT>data_length = analysis_length + pad_data * <NUM_LIT:2>+ start_pad + end_pad<EOL>start = pad_data + start_pad<EOL>end = data_length - pad_data - end_pad<EOL><DEDENT>else:<EOL><INDENT>data_length = analysis_length + pad_data * <NUM_LIT:2><EOL>start = pad_data<EOL>end = data_length - pad_data<EOL><DEDENT>if data_length > max_analysis_length: continue<EOL>if data_length < min_analysis_length: continue<EOL>data_lengths += [data_length]<EOL>valid_regions += [segments.segment(start, end)]<EOL><DEDENT>if min_analysis_length:<EOL><INDENT>data_length = min_analysis_length<EOL>if not self.zero_padding:<EOL><INDENT>start = pad_data + start_pad<EOL>end = data_length - pad_data - end_pad<EOL><DEDENT>else:<EOL><INDENT>start = pad_data<EOL>end = data_length - pad_data<EOL><DEDENT>if end > start:<EOL><INDENT>data_lengths += [data_length]<EOL>valid_regions += [segments.segment(start, end)]<EOL><DEDENT><DEDENT>return data_lengths, valid_regions<EOL>
Determine possible dimensions of needed input and valid output
f15994:c1:m2
def zero_pad_data_extend(self, job_data_seg, curr_seg):
if self.zero_padding is False:<EOL><INDENT>return job_data_seg<EOL><DEDENT>else:<EOL><INDENT>start_pad = int(self.get_opt( '<STR_LIT>'))<EOL>end_pad = int(self.get_opt('<STR_LIT>'))<EOL>new_data_start = max(curr_seg[<NUM_LIT:0>], job_data_seg[<NUM_LIT:0>] - start_pad)<EOL>new_data_end = min(curr_seg[<NUM_LIT:1>], job_data_seg[<NUM_LIT:1>] + end_pad)<EOL>new_data_seg = segments.segment([new_data_start, new_data_end])<EOL>return new_data_seg<EOL><DEDENT>
When using zero padding, *all* data is analysable, but the setup functions must include the padding data where it is available so that we are not zero-padding in the middle of science segments. This function takes a job_data_seg, that is chosen for a particular node and extends it with segment-start-pad and segment-end-pad if that data is available.
f15994:c1:m3
def create_nodata_node(self, valid_seg, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>node = Node(self)<EOL>if self.write_psd:<EOL><INDENT>node.new_output_file_opt(valid_seg, '<STR_LIT>', '<STR_LIT>',<EOL>tags=tags+['<STR_LIT>'], store_file=self.retain_files)<EOL><DEDENT>node.new_output_file_opt(valid_seg, '<STR_LIT>', '<STR_LIT>',<EOL>store_file=self.retain_files)<EOL>if self.psd_files is not None:<EOL><INDENT>should_add = False<EOL>for ifo in self.ifo_list:<EOL><INDENT>for psd_file in self.psd_files:<EOL><INDENT>if ifo in psd_file.ifo_list:<EOL><INDENT>should_add = True<EOL><DEDENT><DEDENT><DEDENT>if should_add:<EOL><INDENT>node.add_input_opt('<STR_LIT>', psd_file)<EOL><DEDENT><DEDENT>return node<EOL>
A simplified version of create_node that creates a node that does not need to read in data. Parameters ----------- valid_seg : glue.segment The segment over which to declare the node valid. Usually this would be the duration of the analysis. Returns -------- node : pycbc.workflow.core.Node The instance corresponding to the created node.
f15994:c3:m2
def create_node(self, bank, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>node = Node(self)<EOL>node.add_input_opt('<STR_LIT>', bank)<EOL>out_files = FileList([])<EOL>for i in range( <NUM_LIT:0>, self.num_banks):<EOL><INDENT>curr_tag = '<STR_LIT>' %(i)<EOL>curr_tags = bank.tags + [curr_tag] + tags<EOL>job_tag = bank.description + "<STR_LIT:_>" + self.name.upper()<EOL>out_file = File(bank.ifo_list, job_tag, bank.segment,<EOL>extension=self.extension, directory=self.out_dir,<EOL>tags=curr_tags, store_file=self.retain_files)<EOL>out_files.append(out_file)<EOL><DEDENT>node.add_output_list_opt('<STR_LIT>', out_files)<EOL>return node<EOL>
Set up a CondorDagmanNode class to run splitbank code Parameters ---------- bank : pycbc.workflow.core.File The File containing the template bank to be split Returns -------- node : pycbc.workflow.core.Node The node to run the job
f15994:c29:m1
def create_node(self, config_file=None, seed=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>start_time = self.cp.get("<STR_LIT>", "<STR_LIT>")<EOL>end_time = self.cp.get("<STR_LIT>", "<STR_LIT>")<EOL>analysis_time = segments.segment(int(start_time), int(end_time))<EOL>node = Node(self)<EOL>node.add_input_opt("<STR_LIT>", config_file)<EOL>if seed:<EOL><INDENT>node.add_opt("<STR_LIT>", seed)<EOL><DEDENT>injection_file = node.new_output_file_opt(analysis_time,<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>tags=tags)<EOL>return node, injection_file<EOL>
Set up a CondorDagmanNode class to run ``pycbc_create_injections``. Parameters ---------- config_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for inference configuration file to be used with ``--config-files`` option. seed : int Seed to use for generating injections. tags : list A list of tags to include in filenames. Returns -------- node : pycbc.workflow.core.Node The node to run the job.
f15994:c32:m1
def create_node(self, channel_names, config_file, injection_file=None,<EOL>seed=None, fake_strain_seed=None, tags=None):
<EOL>tags = [] if tags is None else tags<EOL>start_time = self.cp.get("<STR_LIT>", "<STR_LIT>")<EOL>end_time = self.cp.get("<STR_LIT>", "<STR_LIT>")<EOL>analysis_time = segments.segment(int(start_time), int(end_time))<EOL>channel_names_opt = "<STR_LIT:U+0020>".join(["<STR_LIT>".format(k, v)<EOL>for k, v in channel_names.iteritems()])<EOL>if fake_strain_seed is not None:<EOL><INDENT>fake_strain_seed_opt = "<STR_LIT:U+0020>".join([<EOL>"<STR_LIT>".format(k, v)<EOL>for k, v in fake_strain_seed.iteritems()])<EOL><DEDENT>node = Node(self)<EOL>node.add_opt("<STR_LIT>", "<STR_LIT:U+0020>".join(self.ifo_list))<EOL>node.add_opt("<STR_LIT>", start_time)<EOL>node.add_opt("<STR_LIT>", end_time)<EOL>node.add_opt("<STR_LIT>", channel_names_opt)<EOL>node.add_input_opt("<STR_LIT>", config_file)<EOL>if fake_strain_seed is not None:<EOL><INDENT>node.add_opt("<STR_LIT>", fake_strain_seed_opt)<EOL><DEDENT>if injection_file:<EOL><INDENT>node.add_input_opt("<STR_LIT>", injection_file)<EOL><DEDENT>if seed:<EOL><INDENT>node.add_opt("<STR_LIT>", seed)<EOL><DEDENT>inference_file = node.new_output_file_opt(analysis_time,<EOL>"<STR_LIT>", "<STR_LIT>",<EOL>tags=tags)<EOL>if self.cp.has_option("<STR_LIT>",<EOL>"<STR_LIT>"):<EOL><INDENT>ckpt_file_name = "<STR_LIT>".format(inference_file.name)<EOL>ckpt_file = dax.File(ckpt_file_name)<EOL>node._dax_node.uses(ckpt_file, link=dax.Link.OUTPUT,<EOL>register=False, transfer=False)<EOL><DEDENT>return node, inference_file<EOL>
Set up a CondorDagmanNode class to run ``pycbc_inference``. Parameters ---------- channel_names : dict A ``dict`` of ``str`` to use for ``--channel-name`` option. config_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for inference configuration file to be used with ``--config-files`` option. injection_file : pycbc.workflow.core.File A ``pycbc.workflow.core.File`` for injection file to be used with ``--injection-file`` option. seed : int An ``int`` to be used with ``--seed`` option. fake_strain_seed : dict An ``int`` to be used with ``--fake-strain-seed`` option. tags : list A list of tags to include in filenames. Returns -------- node : pycbc.workflow.core.Node The node to run the job.
f15994:c33:m1
def setup_datafind_workflow(workflow, scienceSegs, outputDir, seg_file=None,<EOL>tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>make_analysis_dir(outputDir)<EOL>cp = workflow.cp<EOL>datafind_method = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>checkSegmentGaps = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL><DEDENT>else:<EOL><INDENT>checkSegmentGaps = "<STR_LIT>"<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>checkFramesExist = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL><DEDENT>else:<EOL><INDENT>checkFramesExist = "<STR_LIT>"<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>checkSegmentSummary = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL><DEDENT>else:<EOL><INDENT>checkSegmentSummary = "<STR_LIT>"<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>if datafind_method == "<STR_LIT>":<EOL><INDENT>datafindcaches, datafindouts =setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs,<EOL>outputDir, tags=tags)<EOL><DEDENT>elif datafind_method == "<STR_LIT>":<EOL><INDENT>datafindcaches, datafindouts =setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs,<EOL>outputDir, tags=tags)<EOL><DEDENT>elif datafind_method == "<STR_LIT>":<EOL><INDENT>datafindcaches, datafindouts =setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs,<EOL>outputDir, tags=tags)<EOL><DEDENT>elif datafind_method == "<STR_LIT>":<EOL><INDENT>datafindcaches, datafindouts =setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs,<EOL>outputDir, tags=tags)<EOL><DEDENT>elif datafind_method == "<STR_LIT>":<EOL><INDENT>pass<EOL><DEDENT>elif datafind_method == "<STR_LIT>":<EOL><INDENT>ifos = scienceSegs.keys()<EOL>datafindcaches, datafindouts =setup_datafind_from_pregenerated_lcf_files(cp, ifos,<EOL>outputDir, tags=tags)<EOL><DEDENT>else:<EOL><INDENT>msg = """<STR_LIT>"""<EOL>raise ValueError(msg)<EOL><DEDENT>using_backup_server = False<EOL>if datafind_method == "<STR_LIT>" ordatafind_method == "<STR_LIT>":<EOL><INDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>using_backup_server = True<EOL>backup_server = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL>cp_new = copy.deepcopy(cp)<EOL>cp_new.set("<STR_LIT>",<EOL>"<STR_LIT>", backup_server)<EOL>cp_new.set('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>backup_datafindcaches, backup_datafindouts =setup_datafind_runtime_frames_single_call_perifo(cp_new,<EOL>scienceSegs, outputDir, tags=tags)<EOL>backup_datafindouts = datafind_keep_unique_backups(backup_datafindouts, datafindouts)<EOL>datafindcaches.extend(backup_datafindcaches)<EOL>datafindouts.extend(backup_datafindouts)<EOL><DEDENT><DEDENT>logging.info("<STR_LIT>")<EOL>if checkSegmentGaps in ['<STR_LIT>','<STR_LIT>','<STR_LIT>']:<EOL><INDENT>logging.info("<STR_LIT>")<EOL>newScienceSegs = get_science_segs_from_datafind_outs(datafindcaches)<EOL>logging.info("<STR_LIT>")<EOL>missingData = False<EOL>for ifo in scienceSegs.keys():<EOL><INDENT>if not scienceSegs[ifo]:<EOL><INDENT>msg = "<STR_LIT>" %(ifo)<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>"<EOL>logging.warning(msg)<EOL>continue<EOL><DEDENT>if ifo not in newScienceSegs:<EOL><INDENT>msg = "<STR_LIT>"<EOL>msg += "<STR_LIT>" %(ifo)<EOL>logging.error(msg)<EOL>missingData = True<EOL>if checkSegmentGaps == '<STR_LIT>':<EOL><INDENT>scienceSegs[ifo] = segments.segmentlist()<EOL><DEDENT>continue<EOL><DEDENT>missing = scienceSegs[ifo] - newScienceSegs[ifo]<EOL>if abs(missing):<EOL><INDENT>msg = "<STR_LIT>" %(ifo)<EOL>msg += "<STR_LIT>" % "<STR_LIT:\n>".join(map(str, missing))<EOL>missingData = True<EOL>logging.error(msg)<EOL>if checkSegmentGaps == '<STR_LIT>':<EOL><INDENT>logging.info("<STR_LIT>"<EOL>%(ifo))<EOL>scienceSegs[ifo] = scienceSegs[ifo] - missing<EOL><DEDENT><DEDENT><DEDENT>if checkSegmentGaps == '<STR_LIT>' and missingData:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>logging.info("<STR_LIT>")<EOL><DEDENT>elif checkSegmentGaps == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>if checkFramesExist in ['<STR_LIT>','<STR_LIT>','<STR_LIT>']:<EOL><INDENT>logging.info("<STR_LIT>")<EOL>missingFrSegs, missingFrames =get_missing_segs_from_frame_file_cache(datafindcaches)<EOL>missingFlag = False<EOL>for ifo in missingFrames.keys():<EOL><INDENT>if not scienceSegs[ifo]:<EOL><INDENT>continue<EOL><DEDENT>if using_backup_server:<EOL><INDENT>new_list = []<EOL>for frame in missingFrames[ifo]:<EOL><INDENT>for dfout in datafindouts:<EOL><INDENT>dfout_pfns = list(dfout.pfns)<EOL>dfout_urls = [a.url for a in dfout_pfns]<EOL>if frame.url in dfout_urls:<EOL><INDENT>pfn = dfout_pfns[dfout_urls.index(frame.url)]<EOL>dfout.removePFN(pfn)<EOL>if len(dfout.pfns) == <NUM_LIT:0>:<EOL><INDENT>new_list.append(frame)<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>"%(frame.url,)<EOL>msg += "<STR_LIT>"%(str([a.url for a in dfout.pfns]),)<EOL>logging.info(msg)<EOL><DEDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>new_list.append(frame)<EOL><DEDENT><DEDENT>missingFrames[ifo] = new_list<EOL><DEDENT>if missingFrames[ifo]:<EOL><INDENT>msg = "<STR_LIT>" %(ifo)<EOL>msg +='<STR_LIT:\n>'.join([a.url for a in missingFrames[ifo]])<EOL>missingFlag = True<EOL>logging.error(msg)<EOL><DEDENT>if checkFramesExist == '<STR_LIT>':<EOL><INDENT>logging.info("<STR_LIT>" %(ifo))<EOL>scienceSegs[ifo] = scienceSegs[ifo] - missingFrSegs[ifo]<EOL><DEDENT><DEDENT>if checkFramesExist == '<STR_LIT>' and missingFlag:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>logging.info("<STR_LIT>")<EOL><DEDENT>elif checkFramesExist == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>if checkSegmentSummary in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>logging.info("<STR_LIT>")<EOL>dfScienceSegs = get_science_segs_from_datafind_outs(datafindcaches)<EOL>missingFlag = False<EOL>sci_seg_name = "<STR_LIT>"<EOL>if seg_file is None:<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT>if seg_file.seg_summ_dict is None:<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT>seg_summary_times = seg_file.seg_summ_dict<EOL>for ifo in dfScienceSegs.keys():<EOL><INDENT>curr_seg_summ_times = seg_summary_times[ifo + "<STR_LIT::>" + sci_seg_name]<EOL>missing = (dfScienceSegs[ifo] & seg_file.valid_segments)<EOL>missing.coalesce()<EOL>missing = missing - curr_seg_summ_times<EOL>missing.coalesce()<EOL>scienceButNotFrame = scienceSegs[ifo] - dfScienceSegs[ifo]<EOL>scienceButNotFrame.coalesce()<EOL>missing2 = scienceSegs[ifo] - scienceButNotFrame<EOL>missing2.coalesce()<EOL>missing2 = missing2 - curr_seg_summ_times<EOL>missing2.coalesce()<EOL>if abs(missing):<EOL><INDENT>msg = "<STR_LIT>" %(ifo)<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>" % "<STR_LIT:\n>".join(map(str, missing))<EOL>logging.error(msg)<EOL>missingFlag = True<EOL><DEDENT>if abs(missing2):<EOL><INDENT>msg = "<STR_LIT>" %(ifo)<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>"<EOL>msg += "<STR_LIT>" % "<STR_LIT:\n>".join(map(str, missing2))<EOL>logging.error(msg)<EOL>missingFlag = True<EOL><DEDENT><DEDENT>if checkSegmentSummary == '<STR_LIT>' and missingFlag:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT><DEDENT>elif checkSegmentSummary == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>"<EOL>errMsg += "<STR_LIT>"<EOL>raise ValueError(errMsg)<EOL><DEDENT>sci_avlble_dict = segments.segmentlistdict()<EOL>sci_avlble_name = "<STR_LIT>"<EOL>for ifo in scienceSegs.keys():<EOL><INDENT>sci_avlble_dict[ifo + '<STR_LIT::>' + sci_avlble_name] = scienceSegs[ifo]<EOL><DEDENT>sci_avlble_file = SegFile.from_segment_list_dict('<STR_LIT>',<EOL>sci_avlble_dict, ifo_list = scienceSegs.keys(),<EOL>valid_segment=workflow.analysis_time,<EOL>extension='<STR_LIT>', tags=tags, directory=outputDir)<EOL>logging.info("<STR_LIT>")<EOL>if datafind_method == "<STR_LIT>":<EOL><INDENT>datafindouts = None<EOL><DEDENT>else:<EOL><INDENT>datafindouts = FileList(datafindouts) <EOL><DEDENT>return datafindouts, sci_avlble_file, scienceSegs, sci_avlble_name<EOL>
Setup datafind section of the workflow. This section is responsible for generating, or setting up the workflow to generate, a list of files that record the location of the frame files needed to perform the analysis. There could be multiple options here, the datafind jobs could be done at run time or could be put into a dag. The subsequent jobs will know what was done here from the OutFileList containing the datafind jobs (and the Dagman nodes if appropriate. For now the only implemented option is to generate the datafind files at runtime. This module can also check if the frameFiles actually exist, check whether the obtained segments line up with the original ones and update the science segments to reflect missing data files. Parameters ---------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. seg_file : SegFile, optional (default=None) The file returned by get_science_segments containing the science segments and the associated segment_summary. This will be used for the segment_summary test and is required if, and only if, performing that test. tags : list of string, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindOuts : OutGroupList List of all the datafind output files for use later in the pipeline. sci_avlble_file : SegFile SegFile containing the analysable time after checks in the datafind module are applied to the input segment list. For production runs this is expected to be equal to the input segment list. scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. If the updateSegmentTimes kwarg is given this will be updated to reflect any instances of missing data. sci_avlble_name : string The name with which the analysable time is stored in the sci_avlble_file.
f15995:m0
def setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs,<EOL>outputDir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>connection = setup_datafind_server_connection(cp, tags=tags)<EOL>datafindouts = []<EOL>datafindcaches = []<EOL>logging.info("<STR_LIT>")<EOL>for ifo, scienceSegsIfo in scienceSegs.items():<EOL><INDENT>observatory = ifo[<NUM_LIT:0>].upper()<EOL>frameType = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>" % (ifo.lower()), tags)<EOL>for seg in scienceSegsIfo:<EOL><INDENT>msg = "<STR_LIT>" %(seg[<NUM_LIT:0>],seg[<NUM_LIT:1>])<EOL>msg += "<STR_LIT>" %(ifo)<EOL>logging.debug(msg)<EOL>startTime = int(seg[<NUM_LIT:0>])<EOL>endTime = int(seg[<NUM_LIT:1>])<EOL>try:<EOL><INDENT>cache, cache_file = run_datafind_instance(cp, outputDir,<EOL>connection, observatory, frameType,<EOL>startTime, endTime, ifo, tags=tags)<EOL><DEDENT>except:<EOL><INDENT>connection = setup_datafind_server_connection(cp, tags=tags)<EOL>cache, cache_file = run_datafind_instance(cp, outputDir,<EOL>connection, observatory, frameType,<EOL>startTime, endTime, ifo, tags=tags)<EOL><DEDENT>datafindouts.append(cache_file)<EOL>datafindcaches.append(cache)<EOL><DEDENT><DEDENT>return datafindcaches, datafindouts<EOL>
This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of output files that correspond to the cache .lcf files that are produced, which list the locations of all frame files. This will cause problems with pegasus, which expects to know about all input files (ie. the frame files themselves.) Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline.
f15995:m1
def setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs, outputDir,<EOL>tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>connection = setup_datafind_server_connection(cp, tags=tags)<EOL>cp.set("<STR_LIT>","<STR_LIT>","<STR_LIT:ignore>")<EOL>datafindouts = []<EOL>datafindcaches = []<EOL>logging.info("<STR_LIT>")<EOL>for ifo, scienceSegsIfo in scienceSegs.items():<EOL><INDENT>observatory = ifo[<NUM_LIT:0>].upper()<EOL>frameType = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>" % (ifo.lower()), tags)<EOL>startTime = int(scienceSegsIfo[<NUM_LIT:0>][<NUM_LIT:0>])<EOL>endTime = int(scienceSegsIfo[-<NUM_LIT:1>][<NUM_LIT:1>])<EOL>try:<EOL><INDENT>cache, cache_file = run_datafind_instance(cp, outputDir, connection,<EOL>observatory, frameType, startTime,<EOL>endTime, ifo, tags=tags)<EOL><DEDENT>except:<EOL><INDENT>connection = setup_datafind_server_connection(cp, tags=tags)<EOL>cache, cache_file = run_datafind_instance(cp, outputDir, connection,<EOL>observatory, frameType, startTime,<EOL>endTime, ifo, tags=tags)<EOL><DEDENT>datafindouts.append(cache_file)<EOL>datafindcaches.append(cache)<EOL><DEDENT>return datafindcaches, datafindouts<EOL>
This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_generated this call will only run one call to datafind per ifo, spanning the whole time. This function will return a list of output files that correspond to the cache .lcf files that are produced, which list the locations of all frame files. This will cause problems with pegasus, which expects to know about all input files (ie. the frame files themselves.) Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline.
f15995:m2
def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs,<EOL>outputDir, tags=None):
datafindcaches, _ =setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs,<EOL>outputDir, tags=tags)<EOL>datafindouts = convert_cachelist_to_filelist(datafindcaches)<EOL>return datafindcaches, datafindouts<EOL>
This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_generated this call will only run one call to datafind per ifo, spanning the whole time. This function will return a list of files corresponding to the individual frames returned by the datafind query. This will allow pegasus to more easily identify all the files used as input, but may cause problems for codes that need to take frame cache files as input. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline.
f15995:m3
def setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs,<EOL>outputDir, tags=None):
datafindcaches, _ =setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs,<EOL>outputDir, tags=tags)<EOL>datafindouts = convert_cachelist_to_filelist(datafindcaches)<EOL>return datafindcaches, datafindouts<EOL>
This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of files corresponding to the individual frames returned by the datafind query. This will allow pegasus to more easily identify all the files used as input, but may cause problems for codes that need to take frame cache files as input. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline.
f15995:m4
def setup_datafind_from_pregenerated_lcf_files(cp, ifos, outputDir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>datafindcaches = []<EOL>for ifo in ifos:<EOL><INDENT>search_string = "<STR_LIT>" %(ifo.lower(),)<EOL>frame_cache_file_name = cp.get_opt_tags("<STR_LIT>",<EOL>search_string, tags=tags)<EOL>curr_cache = lal.Cache.fromfilenames([frame_cache_file_name],<EOL>coltype=lal.LIGOTimeGPS)<EOL>curr_cache.ifo = ifo<EOL>datafindcaches.append(curr_cache)<EOL><DEDENT>datafindouts = convert_cachelist_to_filelist(datafindcaches)<EOL>return datafindcaches, datafindouts<EOL>
This function is used if you want to run with pregenerated lcf frame cache files. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files ifos : list of ifo strings List of ifos to get pregenerated files for. outputDir : path All output files written by datafind processes will be written to this directory. Currently this sub-module writes no output. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline.
f15995:m5
def convert_cachelist_to_filelist(datafindcache_list):
prev_file = None<EOL>prev_name = None<EOL>this_name = None<EOL>datafind_filelist = FileList([])<EOL>for cache in datafindcache_list:<EOL><INDENT>cache.sort()<EOL>curr_ifo = cache.ifo<EOL>for frame in cache:<EOL><INDENT>frame.url = frame.url.replace('<STR_LIT>','<STR_LIT>')<EOL>if prev_file:<EOL><INDENT>prev_name = os.path.basename(prev_file.cache_entry.url)<EOL>this_name = os.path.basename(frame.url)<EOL><DEDENT>if (prev_file is None) or (prev_name != this_name):<EOL><INDENT>currFile = File(curr_ifo, frame.description,<EOL>frame.segment, file_url=frame.url, use_tmp_subdirs=True)<EOL>datafind_filelist.append(currFile)<EOL>prev_file = currFile<EOL><DEDENT>if frame.url.startswith('<STR_LIT>'):<EOL><INDENT>currFile.PFN(frame.url, site='<STR_LIT>')<EOL>if frame.url.startswith(<EOL>'<STR_LIT>'):<EOL><INDENT>currFile.PFN(frame.url, site='<STR_LIT>')<EOL>currFile.PFN(frame.url.replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'), site='<STR_LIT>')<EOL>currFile.PFN(frame.url.replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'), site='<STR_LIT>')<EOL>currFile.PFN(frame.url.replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'), site='<STR_LIT>')<EOL><DEDENT>elif frame.url.startswith(<EOL>'<STR_LIT>'):<EOL><INDENT>for s in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>currFile.PFN(frame.url, site=s)<EOL>currFile.PFN(frame.url, site="<STR_LIT>".format(s))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>currFile.PFN(frame.url, site='<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return datafind_filelist<EOL>
Take as input a list of glue.lal.Cache objects and return a pycbc FileList containing all frames within those caches. Parameters ----------- datafindcache_list : list of glue.lal.Cache objects The list of cache files to convert. Returns -------- datafind_filelist : FileList of frame File objects The list of frame files.
f15995:m6
def get_science_segs_from_datafind_outs(datafindcaches):
newScienceSegs = {}<EOL>for cache in datafindcaches:<EOL><INDENT>if len(cache) > <NUM_LIT:0>:<EOL><INDENT>groupSegs = segments.segmentlist(e.segment for e in cache).coalesce()<EOL>ifo = cache.ifo<EOL>if ifo not in newScienceSegs:<EOL><INDENT>newScienceSegs[ifo] = groupSegs<EOL><DEDENT>else:<EOL><INDENT>newScienceSegs[ifo].extend(groupSegs)<EOL>newScienceSegs[ifo].coalesce()<EOL><DEDENT><DEDENT><DEDENT>return newScienceSegs<EOL>
This function will calculate the science segments that are covered in the OutGroupList containing the frame files returned by various calls to the datafind server. This can then be used to check whether this list covers what it is expected to cover. Parameters ---------- datafindcaches : OutGroupList List of all the datafind output files. Returns -------- newScienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances The times covered by the frames found in datafindOuts.
f15995:m7
def get_missing_segs_from_frame_file_cache(datafindcaches):
missingFrameSegs = {}<EOL>missingFrames = {}<EOL>for cache in datafindcaches:<EOL><INDENT>if len(cache) > <NUM_LIT:0>:<EOL><INDENT>if not cache[<NUM_LIT:0>].scheme == '<STR_LIT:file>':<EOL><INDENT>warn_msg = "<STR_LIT>" %(cache[<NUM_LIT:0>].scheme,)<EOL>warn_msg += "<STR_LIT>"<EOL>logging.info(warn_msg)<EOL>continue<EOL><DEDENT>_, currMissingFrames = cache.checkfilesexist(on_missing="<STR_LIT>")<EOL>missingSegs = segments.segmentlist(e.segmentfor e in currMissingFrames).coalesce()<EOL>ifo = cache.ifo<EOL>if ifo not in missingFrameSegs:<EOL><INDENT>missingFrameSegs[ifo] = missingSegs<EOL>missingFrames[ifo] = lal.Cache(currMissingFrames)<EOL><DEDENT>else:<EOL><INDENT>missingFrameSegs[ifo].extend(missingSegs)<EOL>missingFrameSegs[ifo].coalesce()<EOL>missingFrames[ifo].extend(currMissingFrames)<EOL><DEDENT><DEDENT><DEDENT>return missingFrameSegs, missingFrames<EOL>
This function will use os.path.isfile to determine if all the frame files returned by the local datafind server actually exist on the disk. This can then be used to update the science times if needed. Parameters ----------- datafindcaches : OutGroupList List of all the datafind output files. Returns -------- missingFrameSegs : Dict. of ifo keyed glue.segment.segmentlist instances The times corresponding to missing frames found in datafindOuts. missingFrames: Dict. of ifo keyed lal.Cache instances The list of missing frames
f15995:m8
def setup_datafind_server_connection(cp, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>if cp.has_option_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags):<EOL><INDENT>datafind_server = cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", tags)<EOL><DEDENT>else:<EOL><INDENT>datafind_server = None<EOL><DEDENT>return datafind_connection(datafind_server)<EOL>
This function is resposible for setting up the connection with the datafind server. Parameters ----------- cp : pycbc.workflow.configuration.WorkflowConfigParser The memory representation of the ConfigParser Returns -------- connection The open connection to the datafind server.
f15995:m9
def get_segment_summary_times(scienceFile, segmentName):
<EOL>segmentName = segmentName.split('<STR_LIT::>')<EOL>if not len(segmentName) in [<NUM_LIT:2>,<NUM_LIT:3>]:<EOL><INDENT>raise ValueError("<STR_LIT>" %(segmentName))<EOL><DEDENT>ifo = segmentName[<NUM_LIT:0>]<EOL>channel = segmentName[<NUM_LIT:1>]<EOL>version = '<STR_LIT>'<EOL>if len(segmentName) == <NUM_LIT:3>:<EOL><INDENT>version = int(segmentName[<NUM_LIT:2>])<EOL><DEDENT>xmldoc = utils.load_filename(scienceFile.cache_entry.path,<EOL>gz=scienceFile.cache_entry.path.endswith("<STR_LIT>"),<EOL>contenthandler=ContentHandler)<EOL>segmentDefTable = table.get_table(xmldoc, "<STR_LIT>")<EOL>for entry in segmentDefTable:<EOL><INDENT>if (entry.ifos == ifo) and (entry.name == channel):<EOL><INDENT>if len(segmentName) == <NUM_LIT:2> or (entry.version==version):<EOL><INDENT>segDefID = entry.segment_def_id<EOL>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>"%(segmentName))<EOL><DEDENT>segmentSummTable = table.get_table(xmldoc, "<STR_LIT>")<EOL>summSegList = segments.segmentlist([])<EOL>for entry in segmentSummTable:<EOL><INDENT>if entry.segment_def_id == segDefID:<EOL><INDENT>segment = segments.segment(entry.start_time, entry.end_time)<EOL>summSegList.append(segment)<EOL><DEDENT><DEDENT>summSegList.coalesce()<EOL>return summSegList<EOL>
This function will find the times for which the segment_summary is set for the flag given by segmentName. Parameters ----------- scienceFile : SegFile The segment file that we want to use to determine this. segmentName : string The DQ flag to search for times in the segment_summary table. Returns --------- summSegList : ligo.segments.segmentlist The times that are covered in the segment summary table.
f15995:m10
def run_datafind_instance(cp, outputDir, connection, observatory, frameType,<EOL>startTime, endTime, ifo, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>seg = segments.segment([startTime, endTime])<EOL>dfKwargs = {}<EOL>dfKwargs['<STR_LIT>'] = '<STR_LIT:ignore>'<EOL>if cp.has_section("<STR_LIT>"):<EOL><INDENT>for item, value in cp.items("<STR_LIT>"):<EOL><INDENT>dfKwargs[item] = value<EOL><DEDENT><DEDENT>for tag in tags:<EOL><INDENT>if cp.has_section('<STR_LIT>' %(tag)):<EOL><INDENT>for item, value in cp.items("<STR_LIT>" %(tag)):<EOL><INDENT>dfKwargs[item] = value<EOL><DEDENT><DEDENT><DEDENT>log_datafind_command(observatory, frameType, startTime, endTime,<EOL>os.path.join(outputDir,'<STR_LIT>'), **dfKwargs)<EOL>logging.debug("<STR_LIT>")<EOL>dfCache = connection.find_frame_urls(observatory, frameType,<EOL>startTime, endTime, **dfKwargs)<EOL>logging.debug("<STR_LIT>")<EOL>cache_file = File(ifo, '<STR_LIT>', seg, extension='<STR_LIT>',<EOL>directory=outputDir, tags=tags)<EOL>cache_file.PFN(cache_file.cache_entry.path, site='<STR_LIT>')<EOL>dfCache.ifo = ifo<EOL>fP = open(cache_file.storage_path, "<STR_LIT:w>")<EOL>for entry in dfCache:<EOL><INDENT>start = str(int(entry.segment[<NUM_LIT:0>]))<EOL>duration = str(int(abs(entry.segment)))<EOL>print("<STR_LIT>"% (entry.observatory, entry.description, start, duration, entry.url), file=fP)<EOL>entry.segment = segments.segment(int(entry.segment[<NUM_LIT:0>]), int(entry.segment[<NUM_LIT:1>]))<EOL><DEDENT>fP.close()<EOL>return dfCache, cache_file<EOL>
This function will query the datafind server once to find frames between the specified times for the specified frame type and observatory. Parameters ---------- cp : ConfigParser instance Source for any kwargs that should be sent to the datafind module outputDir : Output cache files will be written here. We also write the commands for reproducing what is done in this function to this directory. connection : datafind connection object Initialized through the glue.datafind module, this is the open connection to the datafind server. observatory : string The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not 'H1', 'L1', 'V1' which denote interferometers. frameType : string The frame type to query for. startTime : int Integer start time to query the datafind server for frames. endTime : int Integer end time to query the datafind server for frames. ifo : string The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'. Maybe this could be merged with the observatory string, but this could cause issues if running on old 'H2' and 'H1' data. tags : list of string, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniquify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- dfCache : glue.lal.Cache instance The glue.lal.Cache representation of the call to the datafind server and the returned frame files. cacheFile : pycbc.workflow.core.File Cache file listing all of the datafind output files for use later in the pipeline.
f15995:m11
def log_datafind_command(observatory, frameType, startTime, endTime,<EOL>outputDir, **dfKwargs):
<EOL>gw_command = ['<STR_LIT>', '<STR_LIT>', observatory,<EOL>'<STR_LIT>', frameType,<EOL>'<STR_LIT>', str(startTime),<EOL>'<STR_LIT>', str(endTime)]<EOL>for name, value in dfKwargs.items():<EOL><INDENT>if name == '<STR_LIT>':<EOL><INDENT>gw_command.append("<STR_LIT>")<EOL>gw_command.append(str(value))<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>gw_command.append("<STR_LIT>")<EOL>gw_command.append(str(value))<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>errMsg = "<STR_LIT>" %(name)<EOL>errMsg+= "<STR_LIT>"<EOL>logging.warn(errMsg)<EOL><DEDENT><DEDENT>fileName = "<STR_LIT>"%(observatory, frameType, startTime, endTime-startTime)<EOL>filePath = os.path.join(outputDir, fileName)<EOL>fP = open(filePath, '<STR_LIT:w>')<EOL>fP.write('<STR_LIT:U+0020>'.join(gw_command))<EOL>fP.close()<EOL>
This command will print an equivalent gw_data_find command to disk that can be used to debug why the internal datafind module is not working.
f15995:m12
def datafind_keep_unique_backups(backup_outs, orig_outs):
<EOL>return_list = FileList([])<EOL>orig_names = [f.name for f in orig_outs]<EOL>for file in backup_outs:<EOL><INDENT>if file.name not in orig_names:<EOL><INDENT>return_list.append(file)<EOL><DEDENT>else:<EOL><INDENT>index_num = orig_names.index(file.name)<EOL>orig_out = orig_outs[index_num]<EOL>pfns = list(file.pfns)<EOL>assert(len(pfns) == <NUM_LIT:1>)<EOL>orig_out.PFN(pfns[<NUM_LIT:0>].url, site='<STR_LIT>')<EOL><DEDENT><DEDENT>return return_list<EOL>
This function will take a list of backup datafind files, presumably obtained by querying a remote datafind server, e.g. CIT, and compares these against a list of original datafind files, presumably obtained by querying the local datafind server. Only the datafind files in the backup list that do not appear in the original list are returned. This allows us to use only files that are missing from the local cluster. Parameters ----------- backup_outs : FileList List of datafind files from the remote datafind server. orig_outs : FileList List of datafind files from the local datafind server. Returns -------- FileList List of datafind files in backup_outs and not in orig_outs.
f15995:m13
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n<EOL>return izip_longest(*args, fillvalue=fillvalue)<EOL>
Create a list of n length tuples
f15996:m0
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers,<EOL>tmpltbank_file, insp_segs, insp_data_name,<EOL>insp_anal_name, dax_output, out_dir, tags=None):
logging.info('<STR_LIT>')<EOL>if not workflow.cp.has_section('<STR_LIT>'):<EOL><INDENT>logging.info('<STR_LIT>')<EOL>logging.info('<STR_LIT>')<EOL>return<EOL><DEDENT>tags = [] if tags is None else tags<EOL>makedir(dax_output)<EOL>config_path = os.path.abspath(dax_output + '<STR_LIT:/>' + '<STR_LIT:_>'.join(tags) + '<STR_LIT>')<EOL>workflow.cp.write(open(config_path, '<STR_LIT:w>'))<EOL>config_file = wdax.File(os.path.basename(config_path))<EOL>config_file.PFN(urlparse.urljoin('<STR_LIT>', urllib.pathname2url(config_path)),<EOL>site='<STR_LIT>')<EOL>exe = Executable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos, out_dir=dax_output)<EOL>node = exe.create_node()<EOL>node.add_input_opt('<STR_LIT>', config_file)<EOL>node.add_input_opt('<STR_LIT>', tmpltbank_file)<EOL>node.add_input_opt('<STR_LIT>', coinc_file)<EOL>node.add_multiifo_input_list_opt('<STR_LIT>', single_triggers)<EOL>node.add_input_opt('<STR_LIT>', insp_segs)<EOL>node.add_opt('<STR_LIT>', insp_data_name)<EOL>node.add_opt('<STR_LIT>', insp_anal_name)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>name = node.output_files[<NUM_LIT:0>].name<EOL>map_file = node.output_files[<NUM_LIT:1>]<EOL>tc_file = node.output_files[<NUM_LIT:2>]<EOL>node.add_opt('<STR_LIT>', name)<EOL>node.add_opt('<STR_LIT>', out_dir)<EOL>workflow += node<EOL>fil = node.output_files[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>staging_site = workflow.cp.get('<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>staging_site = None<EOL><DEDENT>job = dax.DAX(fil)<EOL>job.addArguments('<STR_LIT>' % os.path.splitext(os.path.basename(name))[<NUM_LIT:0>])<EOL>Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)<EOL>workflow._adag.addJob(job)<EOL>dep = dax.Dependency(parent=node._dax_node, child=job)<EOL>workflow._adag.addDependency(dep)<EOL>logging.info('<STR_LIT>')<EOL>
Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots.
f15996:m1
def setup_single_det_minifollowups(workflow, single_trig_file, tmpltbank_file,<EOL>insp_segs, insp_data_name, insp_anal_name,<EOL>dax_output, out_dir, veto_file=None,<EOL>veto_segment_name=None, tags=None):
logging.info('<STR_LIT>')<EOL>if not workflow.cp.has_section('<STR_LIT>'):<EOL><INDENT>msg = '<STR_LIT>'<EOL>msg += '<STR_LIT>'<EOL>logging.info(msg)<EOL>logging.info('<STR_LIT>')<EOL>return<EOL><DEDENT>tags = [] if tags is None else tags<EOL>makedir(dax_output)<EOL>curr_ifo = single_trig_file.ifo<EOL>config_path = os.path.abspath(dax_output + '<STR_LIT:/>' + curr_ifo +'<STR_LIT:_>'.join(tags) + '<STR_LIT>')<EOL>workflow.cp.write(open(config_path, '<STR_LIT:w>'))<EOL>config_file = wdax.File(os.path.basename(config_path))<EOL>config_file.PFN(urlparse.urljoin('<STR_LIT>', urllib.pathname2url(config_path)),<EOL>site='<STR_LIT>')<EOL>exe = Executable(workflow.cp, '<STR_LIT>',<EOL>ifos=curr_ifo, out_dir=dax_output, tags=tags)<EOL>wikifile = curr_ifo + '<STR_LIT:_>'.join(tags) + '<STR_LIT>'<EOL>node = exe.create_node()<EOL>node.add_input_opt('<STR_LIT>', config_file)<EOL>node.add_input_opt('<STR_LIT>', tmpltbank_file)<EOL>node.add_input_opt('<STR_LIT>', single_trig_file)<EOL>node.add_input_opt('<STR_LIT>', insp_segs)<EOL>node.add_opt('<STR_LIT>', insp_data_name)<EOL>node.add_opt('<STR_LIT>', insp_anal_name)<EOL>node.add_opt('<STR_LIT>', curr_ifo)<EOL>node.add_opt('<STR_LIT>', wikifile)<EOL>if veto_file is not None:<EOL><INDENT>assert(veto_segment_name is not None)<EOL>node.add_input_opt('<STR_LIT>', veto_file)<EOL>node.add_opt('<STR_LIT>', veto_segment_name)<EOL><DEDENT>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>name = node.output_files[<NUM_LIT:0>].name<EOL>map_file = node.output_files[<NUM_LIT:1>]<EOL>tc_file = node.output_files[<NUM_LIT:2>]<EOL>node.add_opt('<STR_LIT>', name)<EOL>node.add_opt('<STR_LIT>', out_dir)<EOL>workflow += node<EOL>fil = node.output_files[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>staging_site = workflow.cp.get('<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>staging_site = None<EOL><DEDENT>job = dax.DAX(fil)<EOL>job.addArguments('<STR_LIT>'% os.path.splitext(os.path.basename(name))[<NUM_LIT:0>])<EOL>Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)<EOL>workflow._adag.addJob(job)<EOL>dep = dax.Dependency(parent=node._dax_node, child=job)<EOL>workflow._adag.addDependency(dep)<EOL>logging.info('<STR_LIT>')<EOL>
Create plots that followup the Nth loudest clustered single detector triggers from a merged single detector trigger HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating single_trig_file: pycbc.workflow.File The File class holding the single detector triggers. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots.
f15996:m2
def setup_injection_minifollowups(workflow, injection_file, inj_xml_file,<EOL>single_triggers, tmpltbank_file,<EOL>insp_segs, insp_data_name, insp_anal_name,<EOL>dax_output, out_dir, tags=None):
logging.info('<STR_LIT>')<EOL>if not workflow.cp.has_section('<STR_LIT>'):<EOL><INDENT>logging.info('<STR_LIT>')<EOL>logging.info('<STR_LIT>')<EOL>return<EOL><DEDENT>tags = [] if tags is None else tags<EOL>makedir(dax_output)<EOL>config_path = os.path.abspath(dax_output + '<STR_LIT:/>' + '<STR_LIT:_>'.join(tags) + '<STR_LIT>')<EOL>workflow.cp.write(open(config_path, '<STR_LIT:w>'))<EOL>config_file = wdax.File(os.path.basename(config_path))<EOL>config_file.PFN(urlparse.urljoin('<STR_LIT>', urllib.pathname2url(config_path)),<EOL>site='<STR_LIT>')<EOL>exe = Executable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos, out_dir=dax_output)<EOL>node = exe.create_node()<EOL>node.add_input_opt('<STR_LIT>', config_file)<EOL>node.add_input_opt('<STR_LIT>', tmpltbank_file)<EOL>node.add_input_opt('<STR_LIT>', injection_file)<EOL>node.add_input_opt('<STR_LIT>', inj_xml_file)<EOL>node.add_multiifo_input_list_opt('<STR_LIT>', single_triggers)<EOL>node.add_input_opt('<STR_LIT>', insp_segs)<EOL>node.add_opt('<STR_LIT>', insp_data_name)<EOL>node.add_opt('<STR_LIT>', insp_anal_name)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>', tags=tags)<EOL>name = node.output_files[<NUM_LIT:0>].name<EOL>map_file = node.output_files[<NUM_LIT:1>]<EOL>tc_file = node.output_files[<NUM_LIT:2>]<EOL>node.add_opt('<STR_LIT>', name)<EOL>node.add_opt('<STR_LIT>', out_dir)<EOL>workflow += node<EOL>fil = node.output_files[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>staging_site = workflow.cp.get('<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT>except:<EOL><INDENT>staging_site = None<EOL><DEDENT>job = dax.DAX(fil)<EOL>job.addArguments('<STR_LIT>' % os.path.splitext(os.path.basename(name))[<NUM_LIT:0>])<EOL>Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)<EOL>workflow._adag.addJob(job)<EOL>dep = dax.Dependency(parent=node._dax_node, child=job)<EOL>workflow._adag.addDependency(dep)<EOL>logging.info('<STR_LIT>')<EOL>
Create plots that followup the closest missed injections Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots.
f15996:m3
def make_single_template_plots(workflow, segs, data_read_name, analyzed_name,<EOL>params, out_dir, inj_file=None, exclude=None,<EOL>require=None, tags=None, params_str=None,<EOL>use_exact_inj_params=False):
tags = [] if tags is None else tags<EOL>makedir(out_dir)<EOL>name = '<STR_LIT>'<EOL>secs = requirestr(workflow.cp.get_subsections(name), require)<EOL>secs = excludestr(secs, exclude)<EOL>files = FileList([])<EOL>for tag in secs:<EOL><INDENT>for ifo in workflow.ifos:<EOL><INDENT>if params['<STR_LIT>' % ifo] == -<NUM_LIT:1.0>:<EOL><INDENT>continue<EOL><DEDENT>node = SingleTemplateExecutable(workflow.cp, '<STR_LIT>',<EOL>ifos=[ifo], out_dir=out_dir,<EOL>tags=[tag] + tags).create_node()<EOL>if use_exact_inj_params:<EOL><INDENT>node.add_opt('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>node.add_opt('<STR_LIT>', "<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>', "<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',<EOL>"<STR_LIT>" % params['<STR_LIT>'])<EOL>if '<STR_LIT>' in params or '<STR_LIT>' % ifo in params:<EOL><INDENT>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>try:<EOL><INDENT>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL><DEDENT>except:<EOL><INDENT>node.add_opt('<STR_LIT>',<EOL>"<STR_LIT>" % params['<STR_LIT>' % ifo])<EOL><DEDENT><DEDENT><DEDENT>str_trig_time = '<STR_LIT>' %(params[ifo + '<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>', str_trig_time)<EOL>node.add_input_opt('<STR_LIT>', segs)<EOL>if inj_file is not None:<EOL><INDENT>node.add_input_opt('<STR_LIT>', inj_file)<EOL><DEDENT>node.add_opt('<STR_LIT>', data_read_name)<EOL>node.add_opt('<STR_LIT>', analyzed_name)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>',<EOL>'<STR_LIT>', store_file=False)<EOL>data = node.output_files[<NUM_LIT:0>]<EOL>workflow += node<EOL>node = PlotExecutable(workflow.cp, name, ifos=[ifo],<EOL>out_dir=out_dir, tags=[tag] + tags).create_node()<EOL>node.add_input_opt('<STR_LIT>', data)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>',<EOL>'<STR_LIT>')<EOL>title="<STR_LIT>" %(ifo)<EOL>if params_str is not None:<EOL><INDENT>title+= "<STR_LIT>" %(params_str)<EOL><DEDENT>title+="<STR_LIT:'>"<EOL>node.add_opt('<STR_LIT>', title)<EOL>caption = "<STR_LIT>"<EOL>if params_str is not None:<EOL><INDENT>caption += "<STR_LIT>" %(params_str)<EOL><DEDENT>if use_exact_inj_params:<EOL><INDENT>caption += "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>caption += "<STR_LIT>"<EOL>caption += "<STR_LIT>"%(params['<STR_LIT>'], params['<STR_LIT>'], params['<STR_LIT>'],<EOL>params['<STR_LIT>'])<EOL><DEDENT>node.add_opt('<STR_LIT>', caption)<EOL>workflow += node<EOL>files += node.output_files<EOL><DEDENT><DEDENT>return files<EOL>
Function for creating jobs to run the pycbc_single_template code and to run the associated plotting code pycbc_single_template_plots and add these jobs to the workflow. Parameters ----------- workflow : workflow.Workflow instance The pycbc.workflow.Workflow instance to add these jobs to. segs : workflow.File instance The pycbc.workflow.File instance that points to the XML file containing the segment lists of data read in and data analyzed. data_read_name : str The name of the segmentlist containing the data read in by each inspiral job in the segs file. analyzed_name : str The name of the segmentlist containing the data analyzed by each inspiral job in the segs file. params : dictionary A dictionary containing the parameters of the template to be used. params[ifo+'end_time'] is required for all ifos in workflow.ifos. If use_exact_inj_params is False then also need to supply values for [mass1, mass2, spin1z, spin2x]. For precessing templates one also needs to supply [spin1y, spin1x, spin2x, spin2y, inclination] additionally for precession one must supply u_vals or u_vals_+ifo for all ifos. u_vals is the ratio between h_+ and h_x to use when constructing h(t). h(t) = (h_+ * u_vals) + h_x. out_dir : str Directory in which to store the output files. inj_file : workflow.File (optional, default=None) If given send this injection file to the job so that injections are made into the data. exclude : list (optional, default=None) If given, then when considering which subsections in the ini file to parse for options to add to single_template_plot, only use subsections that *do not* match strings in this list. require : list (optional, default=None) If given, then when considering which subsections in the ini file to parse for options to add to single_template_plot, only use subsections matching strings in this list. tags : list (optional, default=None) Add this list of tags to all jobs. params_str : str (optional, default=None) If given add this string to plot title and caption to describe the template that was used. use_exact_inj_params : boolean (optional, default=False) If True do not use masses and spins listed in the params dictionary but instead use the injection closest to the filter time as a template. Returns -------- output_files : workflow.FileList The list of workflow.Files created in this function.
f15996:m4
def make_plot_waveform_plot(workflow, params, out_dir, ifos, exclude=None,<EOL>require=None, tags=None):
tags = [] if tags is None else tags<EOL>makedir(out_dir)<EOL>name = '<STR_LIT>'<EOL>secs = requirestr(workflow.cp.get_subsections(name), require)<EOL>secs = excludestr(secs, exclude)<EOL>files = FileList([])<EOL>for tag in secs:<EOL><INDENT>node = PlotExecutable(workflow.cp, '<STR_LIT>', ifos=ifos,<EOL>out_dir=out_dir, tags=[tag] + tags).create_node()<EOL>node.add_opt('<STR_LIT>', "<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>', "<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>if '<STR_LIT>' in params:<EOL><INDENT>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>',"<STR_LIT>" % params['<STR_LIT>'])<EOL>node.add_opt('<STR_LIT>', "<STR_LIT>" % params['<STR_LIT>'])<EOL><DEDENT>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>',<EOL>'<STR_LIT>')<EOL>workflow += node<EOL>files += node.output_files<EOL><DEDENT>return files<EOL>
Add plot_waveform jobs to the workflow.
f15996:m5
def make_sngl_ifo(workflow, sngl_file, bank_file, trigger_id, out_dir, ifo,<EOL>tags=None, rank=None):
tags = [] if tags is None else tags<EOL>makedir(out_dir)<EOL>name = '<STR_LIT>'<EOL>files = FileList([])<EOL>node = PlotExecutable(workflow.cp, name, ifos=[ifo],<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_opt('<STR_LIT>', sngl_file)<EOL>node.add_input_opt('<STR_LIT>', bank_file)<EOL>node.add_opt('<STR_LIT>', str(trigger_id))<EOL>if rank is not None:<EOL><INDENT>node.add_opt('<STR_LIT>', str(rank))<EOL><DEDENT>node.add_opt('<STR_LIT>', ifo)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>files += node.output_files<EOL>return files<EOL>
Setup a job to create sngl detector sngl ifo html summary snippet.
f15996:m8
def make_qscan_plot(workflow, ifo, trig_time, out_dir, injection_file=None,<EOL>data_segments=None, time_window=<NUM_LIT:100>, tags=None):
tags = [] if tags is None else tags<EOL>makedir(out_dir)<EOL>name = '<STR_LIT>'<EOL>curr_exe = PlotQScanExecutable(workflow.cp, name, ifos=[ifo],<EOL>out_dir=out_dir, tags=tags)<EOL>node = curr_exe.create_node()<EOL>start = trig_time - time_window<EOL>end = trig_time + time_window<EOL>if data_segments is not None:<EOL><INDENT>for seg in data_segments:<EOL><INDENT>if trig_time in seg:<EOL><INDENT>data_seg = seg<EOL>break<EOL><DEDENT>elif trig_time == -<NUM_LIT:1.0>:<EOL><INDENT>node.add_opt('<STR_LIT>', int(trig_time))<EOL>node.add_opt('<STR_LIT>', int(trig_time))<EOL>node.add_opt('<STR_LIT>', trig_time)<EOL>caption_string = "<STR_LIT>" % ifo<EOL>node.add_opt('<STR_LIT>', caption_string)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files<EOL><DEDENT><DEDENT>else:<EOL><INDENT>err_msg = "<STR_LIT>".format(trig_time)<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT>if curr_exe.has_opt('<STR_LIT>'):<EOL><INDENT>pad_data = int(curr_exe.get_opt('<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>pad_data = <NUM_LIT:0><EOL><DEDENT>if end > (data_seg[<NUM_LIT:1>] - pad_data):<EOL><INDENT>end = data_seg[<NUM_LIT:1>] - pad_data<EOL><DEDENT>if start < (data_seg[<NUM_LIT:0>] + pad_data):<EOL><INDENT>start = data_seg[<NUM_LIT:0>] + pad_data<EOL><DEDENT><DEDENT>node.add_opt('<STR_LIT>', int(start))<EOL>node.add_opt('<STR_LIT>', int(end))<EOL>node.add_opt('<STR_LIT>', trig_time)<EOL>if injection_file is not None:<EOL><INDENT>node.add_input_opt('<STR_LIT>', injection_file)<EOL><DEDENT>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files<EOL>
Generate a make_qscan node and add it to workflow. This function generates a single node of the singles_timefreq executable and adds it to the current workflow. Parent/child relationships are set by the input/output files automatically. Parameters ----------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. ifo: str Which interferometer are we using? trig_time: int The time of the trigger being followed up. out_dir: str Location of directory to output to injection_file: pycbc.workflow.File (optional, default=None) If given, add the injections in the file to strain before making the plot. data_segments: ligo.segments.segmentlist (optional, default=None) The list of segments for which data exists and can be read in. If given the start/end times given to singles_timefreq will be adjusted if [trig_time - time_window, trig_time + time_window] does not completely lie within a valid data segment. A ValueError will be raised if the trig_time is not within a valid segment, or if it is not possible to find 2*time_window (plus the padding) of continuous data around the trigger. This **must** be coalesced. time_window: int (optional, default=None) The amount of data (not including padding) that will be read in by the singles_timefreq job. The default value of 100s should be fine for most cases. tags: list (optional, default=None) List of tags to add to the created nodes, which determine file naming.
f15996:m10
def make_singles_timefreq(workflow, single, bank_file, trig_time, out_dir,<EOL>veto_file=None, time_window=<NUM_LIT:10>, data_segments=None,<EOL>tags=None):
tags = [] if tags is None else tags<EOL>makedir(out_dir)<EOL>name = '<STR_LIT>'<EOL>curr_exe = SingleTimeFreqExecutable(workflow.cp, name, ifos=[single.ifo],<EOL>out_dir=out_dir, tags=tags)<EOL>node = curr_exe.create_node()<EOL>node.add_input_opt('<STR_LIT>', single)<EOL>node.add_input_opt('<STR_LIT>', bank_file)<EOL>start = trig_time - time_window<EOL>end = trig_time + time_window<EOL>if data_segments is not None:<EOL><INDENT>for seg in data_segments:<EOL><INDENT>if trig_time in seg:<EOL><INDENT>data_seg = seg<EOL>break<EOL><DEDENT>elif trig_time == -<NUM_LIT:1.0>:<EOL><INDENT>node.add_opt('<STR_LIT>', int(trig_time))<EOL>node.add_opt('<STR_LIT>', int(trig_time))<EOL>node.add_opt('<STR_LIT>', trig_time)<EOL>if veto_file:<EOL><INDENT>node.add_input_opt('<STR_LIT>', veto_file)<EOL><DEDENT>node.add_opt('<STR_LIT>', single.ifo)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files<EOL><DEDENT><DEDENT>else:<EOL><INDENT>err_msg = "<STR_LIT>".format(trig_time)<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT>if curr_exe.has_opt('<STR_LIT>'):<EOL><INDENT>pad_data = int(curr_exe.get_opt('<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>pad_data = <NUM_LIT:0><EOL><DEDENT>if abs(data_seg) < (<NUM_LIT:2> * time_window + <NUM_LIT:2> * pad_data):<EOL><INDENT>tl = <NUM_LIT:2> * time_window + <NUM_LIT:2> * pad_data<EOL>err_msg = "<STR_LIT>".format(tl)<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>".format(abs(data_seg))<EOL>raise ValueError(err_msg)<EOL><DEDENT>if data_seg[<NUM_LIT:0>] > (start - pad_data):<EOL><INDENT>start = data_seg[<NUM_LIT:0>] + pad_data<EOL>end = start + <NUM_LIT:2> * time_window<EOL><DEDENT>if data_seg[<NUM_LIT:1>] < (end + pad_data):<EOL><INDENT>end = data_seg[<NUM_LIT:1>] - pad_data<EOL>start = end - <NUM_LIT:2> * time_window<EOL><DEDENT>if data_seg[<NUM_LIT:0>] > (start - pad_data):<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>raise ValueError(err_msg)<EOL><DEDENT><DEDENT>node.add_opt('<STR_LIT>', int(start))<EOL>node.add_opt('<STR_LIT>', int(end))<EOL>node.add_opt('<STR_LIT>', trig_time)<EOL>if veto_file:<EOL><INDENT>node.add_input_opt('<STR_LIT>', veto_file)<EOL><DEDENT>node.add_opt('<STR_LIT>', single.ifo)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files<EOL>
Generate a singles_timefreq node and add it to workflow. This function generates a single node of the singles_timefreq executable and adds it to the current workflow. Parent/child relationships are set by the input/output files automatically. Parameters ----------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. single: pycbc.workflow.core.File instance The File object storing the single-detector triggers to followup. bank_file: pycbc.workflow.core.File instance The File object storing the template bank. trig_time: int The time of the trigger being followed up. out_dir: str Location of directory to output to veto_file: pycbc.workflow.core.File (optional, default=None) If given use this file to veto triggers to determine the loudest event. FIXME: Veto files *should* be provided a definer argument and not just assume that all segments should be read. time_window: int (optional, default=None) The amount of data (not including padding) that will be read in by the singles_timefreq job. The default value of 10s should be fine for most cases. data_segments: ligo.segments.segmentlist (optional, default=None) The list of segments for which data exists and can be read in. If given the start/end times given to singles_timefreq will be adjusted if [trig_time - time_window, trig_time + time_window] does not completely lie within a valid data segment. A ValueError will be raised if the trig_time is not within a valid segment, or if it is not possible to find 2*time_window (plus the padding) of continuous data around the trigger. This **must** be coalesced. tags: list (optional, default=None) List of tags to add to the created nodes, which determine file naming.
f15996:m11
def create_noop_node():
exe = wdax.Executable('<STR_LIT>')<EOL>pfn = distutils.spawn.find_executable('<STR_LIT:true>')<EOL>exe.add_pfn(pfn)<EOL>node = wdax.Node(exe)<EOL>return node<EOL>
Creates a noop node that can be added to a DAX doing nothing. The reason for using this is if a minifollowups dax contains no triggers currently the dax will contain no jobs and be invalid. By adding a noop node we ensure that such daxes will actually run if one adds one such noop node. Adding such a noop node into a workflow *more than once* will cause a failure.
f15996:m12
def chunks(l, n):
newn = int(len(l) / n)<EOL>for i in xrange(<NUM_LIT:0>, n-<NUM_LIT:1>):<EOL><INDENT>yield l[i*newn:i*newn+newn]<EOL><DEDENT>yield l[n*newn-newn:]<EOL>
Yield n successive chunks from l.
f15997:m0
def compute_inj_optimal_snr(workflow, inj_file, precalc_psd_files, out_dir,<EOL>tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>node = Executable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_opt('<STR_LIT>', inj_file)<EOL>node.add_input_list_opt('<STR_LIT>', precalc_psd_files)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files[<NUM_LIT:0>]<EOL>
Set up a job for computing optimal SNRs of a sim_inspiral file.
f15998:m1
def cut_distant_injections(workflow, inj_file, out_dir, tags=None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>node = Executable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_opt('<STR_LIT>', inj_file)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files[<NUM_LIT:0>]<EOL>
Set up a job for removing injections that are too distant to be seen
f15998:m2
def setup_injection_workflow(workflow, output_dir=None,<EOL>inj_section_name='<STR_LIT>', exttrig_file=None,<EOL>tags =None):
if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>make_analysis_dir(output_dir)<EOL>full_segment = workflow.analysis_time<EOL>ifos = workflow.ifos<EOL>inj_tags = []<EOL>inj_files = FileList([])<EOL>for section in workflow.cp.get_subsections(inj_section_name):<EOL><INDENT>inj_tag = section.upper()<EOL>curr_tags = tags + [inj_tag]<EOL>if not inj_tag.endswith("<STR_LIT>"):<EOL><INDENT>err_msg = "<STR_LIT>"<EOL>err_msg += "<STR_LIT>"<EOL>err_msg += "<STR_LIT>" %(inj_tag.lower())<EOL>raise ValueError(err_msg)<EOL><DEDENT>injection_method = workflow.cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>curr_tags)<EOL>if injection_method in ["<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>inj_job = LalappsInspinjExecutable(workflow.cp, inj_section_name,<EOL>out_dir=output_dir, ifos='<STR_LIT>',<EOL>tags=curr_tags)<EOL>node = inj_job.create_node(full_segment)<EOL>if injection_method == "<STR_LIT>":<EOL><INDENT>workflow.execute_node(node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(node)<EOL><DEDENT>inj_file = node.output_files[<NUM_LIT:0>]<EOL>inj_files.append(inj_file)<EOL><DEDENT>elif injection_method == "<STR_LIT>":<EOL><INDENT>injectionFilePath = workflow.cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", curr_tags)<EOL>injectionFilePath = resolve_url(injectionFilePath)<EOL>file_url = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(injectionFilePath))<EOL>inj_file = File('<STR_LIT>', '<STR_LIT>', full_segment, file_url,<EOL>tags=curr_tags)<EOL>inj_file.PFN(injectionFilePath, site='<STR_LIT>')<EOL>inj_files.append(inj_file)<EOL><DEDENT>elif injection_method in ["<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>inj_job = LalappsInspinjExecutable(workflow.cp, inj_section_name,<EOL>out_dir=output_dir, ifos=ifos,<EOL>tags=curr_tags)<EOL>node = inj_job.create_node(full_segment, exttrig_file)<EOL>if injection_method == "<STR_LIT>":<EOL><INDENT>workflow.execute_node(node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(node)<EOL><DEDENT>inj_file = node.output_files[<NUM_LIT:0>]<EOL>if workflow.cp.has_option("<STR_LIT>",<EOL>"<STR_LIT>"):<EOL><INDENT>em_filter_job = PycbcDarkVsBrightInjectionsExecutable(<EOL>workflow.cp,<EOL>'<STR_LIT>',<EOL>tags=curr_tags,<EOL>out_dir=output_dir,<EOL>ifos=ifos)<EOL>node = em_filter_job.create_node(inj_file, full_segment,<EOL>curr_tags)<EOL>if injection_method == "<STR_LIT>":<EOL><INDENT>workflow.execute_node(node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(node)<EOL><DEDENT>inj_file = node.output_files[<NUM_LIT:0>]<EOL><DEDENT>if workflow.cp.has_option("<STR_LIT>",<EOL>"<STR_LIT>"):<EOL><INDENT>jitter_job = LigolwCBCJitterSkylocExecutable(workflow.cp,<EOL>'<STR_LIT>',<EOL>tags=curr_tags,<EOL>out_dir=output_dir,<EOL>ifos=ifos)<EOL>node = jitter_job.create_node(inj_file, full_segment, curr_tags)<EOL>if injection_method == "<STR_LIT>":<EOL><INDENT>workflow.execute_node(node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(node)<EOL><DEDENT>inj_file = node.output_files[<NUM_LIT:0>]<EOL><DEDENT>if workflow.cp.has_option("<STR_LIT>",<EOL>"<STR_LIT>"):<EOL><INDENT>align_job = LigolwCBCAlignTotalSpinExecutable(workflow.cp,<EOL>'<STR_LIT>', tags=curr_tags, out_dir=output_dir,<EOL>ifos=ifos)<EOL>node = align_job.create_node(inj_file, full_segment, curr_tags)<EOL>if injection_method == "<STR_LIT>":<EOL><INDENT>workflow.execute_node(node)<EOL><DEDENT>else:<EOL><INDENT>workflow.add_node(node)<EOL><DEDENT>inj_file = node.output_files[<NUM_LIT:0>]<EOL><DEDENT>inj_files.append(inj_file)<EOL><DEDENT>else:<EOL><INDENT>err = "<STR_LIT>"<EOL>err += "<STR_LIT>" % (injection_method)<EOL>raise ValueError(err)<EOL><DEDENT>inj_tags.append(inj_tag)<EOL><DEDENT>logging.info("<STR_LIT>")<EOL>return inj_files, inj_tags<EOL>
This function is the gateway for setting up injection-generation jobs in a workflow. It should be possible for this function to support a number of different ways/codes that could be used for doing this, however as this will presumably stay as a single call to a single code (which need not be inspinj) there are currently no subfunctions in this moudle. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. output_dir : path The directory in which injection files will be stored. inj_section_name : string (optional, default='injections') The string that corresponds to the option describing the exe location in the [executables] section of the .ini file and that corresponds to the section (and sub-sections) giving the options that will be given to the code at run time. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. This will be used in output names. Returns -------- inj_files : pycbc.workflow.core.FileList The list of injection files created by this call. inj_tags : list of strings The tag corresponding to each injection file and used to uniquely identify them. The FileList class contains functions to search based on tags.
f15998:m3
def set_grb_start_end(cp, start, end):
cp.set("<STR_LIT>", "<STR_LIT>", str(start))<EOL>cp.set("<STR_LIT>", "<STR_LIT>", str(end))<EOL>return cp<EOL>
Function to update analysis boundaries as workflow is generated Parameters ---------- cp : pycbc.workflow.configuration.WorkflowConfigParser object The parsed configuration options of a pycbc.workflow.core.Workflow. start : int The start of the workflow analysis time. end : int The end of the workflow analysis time. Returns -------- cp : pycbc.workflow.configuration.WorkflowConfigParser object The modified WorkflowConfigParser object.
f15999:m0
def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):
if os.getenv("<STR_LIT>") is None:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>lalDir = os.getenv("<STR_LIT>")<EOL>sci_seg = segments.segment(int(cp.get("<STR_LIT>", "<STR_LIT>")),<EOL>int(cp.get("<STR_LIT>", "<STR_LIT>")))<EOL>file_list = FileList([])<EOL>if bank_veto:<EOL><INDENT>shutil.copy("<STR_LIT>""<STR_LIT>" % lalDir, "<STR_LIT:%s>" % run_dir)<EOL>bank_veto_url = "<STR_LIT>" % run_dir<EOL>bank_veto = File(ifos, "<STR_LIT>", sci_seg,<EOL>file_url=bank_veto_url)<EOL>bank_veto.PFN(bank_veto.cache_entry.path, site="<STR_LIT>")<EOL>file_list.extend(FileList([bank_veto]))<EOL><DEDENT>if summary_files:<EOL><INDENT>shutil.copy("<STR_LIT>""<STR_LIT>" % lalDir, "<STR_LIT:%s>" % run_dir)<EOL>summary_js_url = "<STR_LIT>"% run_dir<EOL>summary_js = File(ifos, "<STR_LIT>", sci_seg,<EOL>file_url=summary_js_url)<EOL>summary_js.PFN(summary_js.cache_entry.path, site="<STR_LIT>")<EOL>file_list.extend(FileList([summary_js]))<EOL>shutil.copy("<STR_LIT>""<STR_LIT>" % lalDir, "<STR_LIT:%s>" % run_dir)<EOL>summary_css_url = "<STR_LIT>"% run_dir<EOL>summary_css = File(ifos, "<STR_LIT>", sci_seg,<EOL>file_url=summary_css_url)<EOL>summary_css.PFN(summary_css.cache_entry.path, site="<STR_LIT>")<EOL>file_list.extend(FileList([summary_css]))<EOL><DEDENT>return file_list<EOL><DEDENT>
Retrieve files needed to run coh_PTF jobs within a PyGRB workflow Parameters ---------- cp : pycbc.workflow.configuration.WorkflowConfigParser object The parsed configuration options of a pycbc.workflow.core.Workflow. ifos : str String containing the analysis interferometer IDs. run_dir : str The run directory, destination for retrieved files. bank_veto : Boolean If true, will retrieve the bank_veto_bank.xml file. summary_files : Boolean If true, will retrieve the summary page style files. Returns ------- file_list : pycbc.workflow.FileList object A FileList containing the retrieved files.
f15999:m1
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
<EOL>xmldoc = ligolw.Document()<EOL>xmldoc.appendChild(ligolw.LIGO_LW())<EOL>tbl = lsctables.New(lsctables.ExtTriggersTable)<EOL>cols = tbl.validcolumns<EOL>xmldoc.childNodes[-<NUM_LIT:1>].appendChild(tbl)<EOL>row = tbl.appendRow()<EOL>setattr(row, "<STR_LIT>", float(cp.get("<STR_LIT>", "<STR_LIT>")))<EOL>setattr(row, "<STR_LIT>", float(cp.get("<STR_LIT>", "<STR_LIT>")))<EOL>setattr(row, "<STR_LIT>", int(cp.get("<STR_LIT>", "<STR_LIT>")))<EOL>setattr(row, "<STR_LIT>", str(cp.get("<STR_LIT>", "<STR_LIT>")))<EOL>for entry in cols.keys():<EOL><INDENT>if not hasattr(row, entry):<EOL><INDENT>if cols[entry] in ['<STR_LIT>','<STR_LIT>']:<EOL><INDENT>setattr(row,entry,<NUM_LIT:0.>)<EOL><DEDENT>elif cols[entry] == '<STR_LIT>':<EOL><INDENT>setattr(row,entry,<NUM_LIT:0>)<EOL><DEDENT>elif cols[entry] == '<STR_LIT>':<EOL><INDENT>setattr(row,entry,'<STR_LIT>')<EOL><DEDENT>elif entry == '<STR_LIT>':<EOL><INDENT>row.process_id = ilwd.ilwdchar("<STR_LIT>")<EOL><DEDENT>elif entry == '<STR_LIT>':<EOL><INDENT>row.event_id = ilwd.ilwdchar("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>" %(entry), file=sys.stderr)<EOL>raise ValueError<EOL><DEDENT><DEDENT><DEDENT>xml_file_name = "<STR_LIT>" % str(cp.get("<STR_LIT>",<EOL>"<STR_LIT>"))<EOL>xml_file_path = os.path.join(out_dir, xml_file_name)<EOL>utils.write_filename(xmldoc, xml_file_path)<EOL>xml_file_url = urlparse.urljoin("<STR_LIT>", urllib.pathname2url(xml_file_path))<EOL>xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)<EOL>xml_file.PFN(xml_file_url, site="<STR_LIT>")<EOL>return xml_file<EOL>
Make an ExtTrig xml file containing information on the external trigger Parameters ---------- cp : pycbc.workflow.configuration.WorkflowConfigParser object The parsed configuration options of a pycbc.workflow.core.Workflow. ifos : str String containing the analysis interferometer IDs. sci_seg : ligo.segments.segment The science segment for the analysis run. out_dir : str The output directory, destination for xml file. Returns ------- xml_file : pycbc.workflow.File object The xml file with external trigger information.
f15999:m2
def get_ipn_sky_files(workflow, file_url, tags=None):
tags = tags or []<EOL>ipn_sky_points = resolve_url(file_url)<EOL>sky_points_url = urlparse.urljoin("<STR_LIT>",<EOL>urllib.pathname2url(ipn_sky_points))<EOL>sky_points_file = File(workflow.ifos, "<STR_LIT>",<EOL>workflow.analysis_time, file_url=sky_points_url, tags=tags)<EOL>sky_points_file.PFN(sky_points_url, site="<STR_LIT>")<EOL>return sky_points_file<EOL>
Retreive the sky point files for searching over the IPN error box and populating it with injections. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. file_url : string The URL of the IPN sky points file. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- sky_points_file : pycbc.workflow.core.File File object representing the IPN sky points file.
f15999:m3
def make_gating_node(workflow, datafind_files, outdir=None, tags=None):
cp = workflow.cp<EOL>if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>condition_strain_class = select_generic_executable(workflow,<EOL>"<STR_LIT>")<EOL>condition_strain_nodes = []<EOL>condition_strain_outs = FileList([])<EOL>for ifo in workflow.ifos:<EOL><INDENT>input_files = FileList([datafind_file for datafind_file indatafind_files if datafind_file.ifo == ifo])<EOL>condition_strain_jobs = condition_strain_class(cp, "<STR_LIT>",<EOL>ifo=ifo, out_dir=outdir, tags=tags)<EOL>condition_strain_node, condition_strain_out =condition_strain_jobs.create_node(input_files, tags=tags)<EOL>condition_strain_nodes.append(condition_strain_node)<EOL>condition_strain_outs.extend(FileList([condition_strain_out]))<EOL><DEDENT>return condition_strain_nodes, condition_strain_outs<EOL>
Generate jobs for autogating the data for PyGRB runs. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. datafind_files : pycbc.workflow.core.FileList A FileList containing the frame files to be gated. outdir : string Path of the output directory tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- condition_strain_nodes : list List containing the pycbc.workflow.core.Node objects representing the autogating jobs. condition_strain_outs : pycbc.workflow.core.FileList FileList containing the pycbc.workflow.core.File objects representing the gated frame files.
f15999:m4
def get_sky_grid_scale(sky_error, sigma_sys=<NUM_LIT>):
return <NUM_LIT> * (sky_error**<NUM_LIT:2> + sigma_sys**<NUM_LIT:2>)**<NUM_LIT:0.5><EOL>
Calculate suitable 3-sigma radius of the search patch, incorporating Fermi GBM systematic if necessary.
f15999:m5
def make_seg_table(workflow, seg_files, seg_names, out_dir, tags=None,<EOL>title_text=None, description=None):
seg_files = list(seg_files)<EOL>seg_names = list(seg_names)<EOL>if tags is None: tags = []<EOL>makedir(out_dir)<EOL>node = PlotExecutable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_list_opt('<STR_LIT>', seg_files)<EOL>quoted_seg_names = []<EOL>for s in seg_names:<EOL><INDENT>quoted_seg_names.append("<STR_LIT:'>" + s + "<STR_LIT:'>")<EOL><DEDENT>node.add_opt('<STR_LIT>', '<STR_LIT:U+0020>'.join(quoted_seg_names))<EOL>if description:<EOL><INDENT>node.add_opt('<STR_LIT>', "<STR_LIT:'>" + description + "<STR_LIT:'>")<EOL><DEDENT>if title_text:<EOL><INDENT>node.add_opt('<STR_LIT>', "<STR_LIT:'>" + title_text + "<STR_LIT:'>")<EOL><DEDENT>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files[<NUM_LIT:0>]<EOL>
Creates a node in the workflow for writing the segment summary table. Returns a File instances for the output file.
f16000:m12
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None):
if vetodef_file is None:<EOL><INDENT>vetodef_file = workflow.cp.get_opt_tags("<STR_LIT>",<EOL>"<STR_LIT>", [])<EOL>file_url = urlparse.urljoin('<STR_LIT>',<EOL>urllib.pathname2url(vetodef_file))<EOL>vdf_file = File(workflow.ifos, '<STR_LIT>',<EOL>workflow.analysis_time, file_url=file_url)<EOL>vdf_file.PFN(file_url, site='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>vdf_file = vetodef_file<EOL><DEDENT>if tags is None: tags = []<EOL>makedir(out_dir)<EOL>node = PlotExecutable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_opt('<STR_LIT>', vdf_file)<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files[<NUM_LIT:0>]<EOL>
Creates a node in the workflow for writing the veto_definer table. Returns a File instances for the output file.
f16000:m13
def make_seg_plot(workflow, seg_files, out_dir, seg_names=None, tags=None):
seg_files = list(seg_files)<EOL>if tags is None: tags = []<EOL>makedir(out_dir)<EOL>node = PlotExecutable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_list_opt('<STR_LIT>', seg_files)<EOL>quoted_seg_names = []<EOL>for s in seg_names:<EOL><INDENT>quoted_seg_names.append("<STR_LIT:'>" + s + "<STR_LIT:'>")<EOL><DEDENT>node.add_opt('<STR_LIT>', '<STR_LIT:U+0020>'.join(quoted_seg_names))<EOL>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files[<NUM_LIT:0>]<EOL>
Creates a node in the workflow for plotting science, and veto segments.
f16000:m14
def make_ifar_plot(workflow, trigger_file, out_dir, tags=None,<EOL>hierarchical_level=None):
if hierarchical_level is not None and tags:<EOL><INDENT>tags = [("<STR_LIT>".format(<EOL>hierarchical_level))] + tags<EOL><DEDENT>elif hierarchical_level is not None and not tags:<EOL><INDENT>tags = ["<STR_LIT>".format(hierarchical_level)]<EOL><DEDENT>elif hierarchical_level is None and not tags:<EOL><INDENT>tags = []<EOL><DEDENT>makedir(out_dir)<EOL>node = PlotExecutable(workflow.cp, '<STR_LIT>', ifos=workflow.ifos,<EOL>out_dir=out_dir, tags=tags).create_node()<EOL>node.add_input_opt('<STR_LIT>', trigger_file)<EOL>if hierarchical_level is not None:<EOL><INDENT>node.add_opt('<STR_LIT>', hierarchical_level)<EOL><DEDENT>node.new_output_file_opt(workflow.analysis_time, '<STR_LIT>', '<STR_LIT>')<EOL>workflow += node<EOL>return node.output_files[<NUM_LIT:0>]<EOL>
Creates a node in the workflow for plotting cumulative histogram of IFAR values.
f16000:m15
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()<EOL>magic = f.readline()<EOL>if not re.search(self.magic_re, magic):<EOL><INDENT>f.close()<EOL>raise LoadError(<EOL>"<STR_LIT>" %<EOL>filename)<EOL><DEDENT>try:<EOL><INDENT>while <NUM_LIT:1>:<EOL><INDENT>line = f.readline()<EOL>if line == "<STR_LIT>": break<EOL>if line.endswith("<STR_LIT:\n>"): line = line[:-<NUM_LIT:1>]<EOL>sline = line.strip()<EOL>if sline.startswith("<STR_LIT>"):<EOL><INDENT>line = sline[<NUM_LIT:10>:]<EOL><DEDENT>elif (sline.startswith(("<STR_LIT:#>", "<STR_LIT:$>")) or sline == "<STR_LIT>"):<EOL><INDENT>continue<EOL><DEDENT>domain, domain_specified, path, secure, expires, name, value =line.split("<STR_LIT:\t>")<EOL>secure = (secure == "<STR_LIT>")<EOL>domain_specified = (domain_specified == "<STR_LIT>")<EOL>if name == "<STR_LIT>":<EOL><INDENT>name = value<EOL>value = None<EOL><DEDENT>initial_dot = domain.startswith("<STR_LIT:.>")<EOL>assert domain_specified == initial_dot<EOL>discard = False<EOL>if expires == "<STR_LIT>":<EOL><INDENT>expires = None<EOL>discard = True<EOL><DEDENT>c = Cookie(<NUM_LIT:0>, name, value,<EOL>None, False,<EOL>domain, domain_specified, initial_dot,<EOL>path, False,<EOL>secure,<EOL>expires,<EOL>discard,<EOL>None,<EOL>None,<EOL>{})<EOL>if not ignore_discard and c.discard:<EOL><INDENT>continue<EOL><DEDENT>if not ignore_expires and c.is_expired(now):<EOL><INDENT>continue<EOL><DEDENT>self.set_cookie(c)<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>raise<EOL><DEDENT>except Exception:<EOL><INDENT>_warn_unhandled_exception()<EOL>raise LoadError("<STR_LIT>" %<EOL>(filename, line))<EOL><DEDENT>
This function is required to monkey patch MozillaCookieJar's _really_load function which does not understand the curl format cookie file created by ecp-cookie-init. It patches the code so that #HttpOnly_ get loaded. https://bugs.python.org/issue2190 https://bugs.python.org/file37625/httponly.patch
f16001:m0
def istext(s, text_characters=None, threshold=<NUM_LIT>):
text_characters = "<STR_LIT>".join(map(chr, range(<NUM_LIT:32>, <NUM_LIT>))) + "<STR_LIT>"<EOL>_null_trans = string.maketrans("<STR_LIT>", "<STR_LIT>")<EOL>if "<STR_LIT>" in s:<EOL><INDENT>return False<EOL><DEDENT>if not s:<EOL><INDENT>return True<EOL><DEDENT>t = s.translate(_null_trans, text_characters)<EOL>return len(t)/float(len(s)) <= threshold<EOL>
Determines if the string is a set of binary data or a text file. This is done by checking if a large proportion of characters are > 0X7E (0x7F is <DEL> and unprintable) or low bit control codes. In other words things that you wouldn't see (often) in a text file. (ASCII past 0x7F might appear, but rarely). Code modified from https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s12.html
f16001:m1
def resolve_url(url, directory=None, permissions=None):
u = urlparse(url)<EOL>if directory is None:<EOL><INDENT>directory = os.getcwd()<EOL><DEDENT>filename = os.path.join(directory,os.path.basename(u.path))<EOL>if u.scheme == '<STR_LIT>' or u.scheme == '<STR_LIT:file>':<EOL><INDENT>if os.path.isfile(u.path):<EOL><INDENT>if os.path.isfile(filename):<EOL><INDENT>src_inode = os.stat(u.path)[stat.ST_INO]<EOL>dst_inode = os.stat(filename)[stat.ST_INO]<EOL>if src_inode != dst_inode:<EOL><INDENT>shutil.copy(u.path, filename)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>shutil.copy(u.path, filename)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>errmsg = "<STR_LIT>" % (u.path, url)<EOL>raise ValueError(errmsg)<EOL><DEDENT><DEDENT>elif u.scheme == '<STR_LIT:http>' or u.scheme == '<STR_LIT>':<EOL><INDENT>s = requests.Session()<EOL>s.mount(str(u.scheme)+'<STR_LIT>',<EOL>requests.adapters.HTTPAdapter(max_retries=<NUM_LIT:5>))<EOL>cookie_dict = {}<EOL>ecp_file = '<STR_LIT>' % os.getuid()<EOL>if os.path.isfile(ecp_file):<EOL><INDENT>cj = cookielib.MozillaCookieJar()<EOL>cj.load(ecp_file, ignore_discard=True, ignore_expires=True)<EOL><DEDENT>else:<EOL><INDENT>cj = []<EOL><DEDENT>for c in cj:<EOL><INDENT>if c.domain == u.netloc:<EOL><INDENT>cookie_dict[c.name] = c.value<EOL><DEDENT>elif u.netloc == "<STR_LIT>" andc.domain == "<STR_LIT>":<EOL><INDENT>cookie_dict[c.name] = c.value<EOL><DEDENT><DEDENT>r = s.get(url, cookies=cookie_dict, allow_redirects=True)<EOL>if r.status_code != <NUM_LIT:200>:<EOL><INDENT>errmsg = "<STR_LIT>" % (url,<EOL>r.status_code)<EOL>raise ValueError(errmsg)<EOL><DEDENT>if u.netloc == '<STR_LIT>' or u.netloc == '<STR_LIT>':<EOL><INDENT>if istext(r.content):<EOL><INDENT>soup = BeautifulSoup(r.content, '<STR_LIT>')<EOL>desc = soup.findAll(attrs={"<STR_LIT>":"<STR_LIT>"})<EOL>if len(desc) anddesc[<NUM_LIT:0>]['<STR_LIT:content>'] == '<STR_LIT>':<EOL><INDENT>raise ValueError(ecp_cookie_error.format(url))<EOL><DEDENT><DEDENT><DEDENT>output_fp = open(filename, '<STR_LIT:w>')<EOL>output_fp.write(r.content)<EOL>output_fp.close()<EOL><DEDENT>else:<EOL><INDENT>errmsg = "<STR_LIT>" % (u.scheme)<EOL>errmsg += "<STR_LIT>"<EOL>raise ValueError(errmsg)<EOL><DEDENT>if not os.path.isfile(filename):<EOL><INDENT>errmsg = "<STR_LIT>" % (filename,url)<EOL>raise ValueError(errmsg)<EOL><DEDENT>if permissions:<EOL><INDENT>if os.access(filename, os.W_OK):<EOL><INDENT>os.chmod(filename, permissions)<EOL><DEDENT>else:<EOL><INDENT>s = os.stat(filename)[stat.ST_MODE]<EOL>if (s & permissions) != permissions:<EOL><INDENT>errmsg = "<STR_LIT>" % url<EOL>raise ValueError(errmsg)<EOL><DEDENT><DEDENT><DEDENT>return filename<EOL>
Resolves a URL to a local file, and returns the path to that file.
f16001:m2
def add_workflow_command_line_group(parser):
workflowArgs = parser.add_argument_group('<STR_LIT>',<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>workflowArgs.add_argument("<STR_LIT>", nargs="<STR_LIT:+>", action='<STR_LIT:store>',<EOL>metavar="<STR_LIT>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>workflowArgs.add_argument("<STR_LIT>", nargs="<STR_LIT:*>", action='<STR_LIT:store>',<EOL>metavar="<STR_LIT>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL>workflowArgs.add_argument("<STR_LIT>", nargs="<STR_LIT:*>", action='<STR_LIT:store>',<EOL>metavar="<STR_LIT>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL>
The standard way of initializing a ConfigParser object in workflow will be to do it from the command line. This is done by giving a --local-config-files filea.ini fileb.ini filec.ini command. You can also set config file override commands on the command line. This will be most useful when setting (for example) start and end times, or active ifos. This is done by --config-overrides section1:option1:value1 section2:option2:value2 ... This can also be given as --config-overrides section1:option1 where the value will be left as ''. To remove a configuration option, use the command line argument --config-delete section1:option1 which will delete option1 from [section1] or --config-delete section1 to delete all of the options in [section1] Deletes are implemented before overrides. This function returns an argparse OptionGroup to ensure these options are parsed correctly and can then be sent directly to initialize an WorkflowConfigParser. Parameters ----------- parser : argparse.ArgumentParser instance The initialized argparse instance to add the workflow option group to.
f16001:m3
def __init__(self, configFiles=None, overrideTuples=None, parsedFilePath=None, deleteTuples=None):
if configFiles is None:<EOL><INDENT>configFiles = []<EOL><DEDENT>if overrideTuples is None:<EOL><INDENT>overrideTuples = []<EOL><DEDENT>if deleteTuples is None:<EOL><INDENT>deleteTuples = []<EOL><DEDENT>glue.pipeline.DeepCopyableConfigParser.__init__(self)<EOL>self.optionxform = str<EOL>configFiles = [resolve_url(cFile) for cFile in configFiles]<EOL>self.read_ini_file(configFiles)<EOL>self.perform_exe_expansion()<EOL>self.split_multi_sections()<EOL>self.populate_shared_sections()<EOL>for delete in deleteTuples:<EOL><INDENT>if len(delete) == <NUM_LIT:1>:<EOL><INDENT>if self.remove_section(delete[<NUM_LIT:0>]) is False:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>" % delete )<EOL><DEDENT>else:<EOL><INDENT>logging.info("<STR_LIT>",<EOL>delete[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>elif len(delete) == <NUM_LIT:2>:<EOL><INDENT>if self.remove_option(delete[<NUM_LIT:0>],delete[<NUM_LIT:1>]) is False:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>" % delete )<EOL><DEDENT>else:<EOL><INDENT>logging.info("<STR_LIT>"<EOL>"<STR_LIT>", delete[<NUM_LIT:1>], delete[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>" % str(delete) )<EOL><DEDENT><DEDENT>for override in overrideTuples:<EOL><INDENT>if len(override) not in [<NUM_LIT:2>,<NUM_LIT:3>]:<EOL><INDENT>errmsg = "<STR_LIT>"<EOL>errmsg = "<STR_LIT>" % (str(override) )<EOL>raise ValueError(errmsg)<EOL><DEDENT>section = override[<NUM_LIT:0>]<EOL>option = override[<NUM_LIT:1>]<EOL>value = '<STR_LIT>'<EOL>if len(override) == <NUM_LIT:3>:<EOL><INDENT>value = override[<NUM_LIT:2>]<EOL><DEDENT>if not self.has_section(section):<EOL><INDENT>self.add_section(section)<EOL><DEDENT>self.set(section, option, value)<EOL>logging.info("<STR_LIT>"<EOL>"<STR_LIT>", section, option, value )<EOL><DEDENT>self.perform_extended_interpolation()<EOL>self.sanity_check_subsections()<EOL>if parsedFilePath:<EOL><INDENT>fp = open(parsedFilePath,'<STR_LIT:w>')<EOL>self.write(fp)<EOL>fp.close()<EOL><DEDENT>
Initialize an WorkflowConfigParser. This reads the input configuration files, overrides values if necessary and performs the interpolation. See https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html Parameters ----------- configFiles : Path to .ini file, or list of paths The file(s) to be read in and parsed. overrideTuples : List of (section, option, value) tuples Add the (section, option, value) triplets provided in this list to the provided .ini file(s). If the section, option pair is already present, it will be overwritten. parsedFilePath : Path, optional (default=None) If given, write the parsed .ini file back to disk at this location. deleteTuples : List of (section, option) tuples Delete the (section, option) pairs provided in this list from provided .ini file(s). If the section only is provided, the entire section will be deleted. Returns -------- WorkflowConfigParser Initialized WorkflowConfigParser instance.
f16001:c0:m0
@classmethod<EOL><INDENT>def from_args(cls, args):<DEDENT>
<EOL>confFiles = []<EOL>if args.config_files:<EOL><INDENT>confFiles += args.config_files<EOL><DEDENT>confDeletes = args.config_delete or []<EOL>parsedDeletes = []<EOL>for delete in confDeletes:<EOL><INDENT>splitDelete = delete.split("<STR_LIT::>")<EOL>if len(splitDelete) > <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>" % str(delete))<EOL><DEDENT>else:<EOL><INDENT>parsedDeletes.append(tuple(splitDelete))<EOL><DEDENT><DEDENT>confOverrides = args.config_overrides or []<EOL>parsedOverrides = []<EOL>for override in confOverrides:<EOL><INDENT>splitOverride = override.split("<STR_LIT::>")<EOL>if len(splitOverride) == <NUM_LIT:3>:<EOL><INDENT>parsedOverrides.append(tuple(splitOverride))<EOL><DEDENT>elif len(splitOverride) == <NUM_LIT:2>:<EOL><INDENT>parsedOverrides.append(tuple(splitOverride + ["<STR_LIT>"]))<EOL><DEDENT>elif len(splitOverride) > <NUM_LIT:3>:<EOL><INDENT>rec_value = '<STR_LIT::>'.join(splitOverride[<NUM_LIT:2>:])<EOL>parsedOverrides.append(tuple(splitOverride[:<NUM_LIT:2>] + [rec_value]))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>" % str(override))<EOL><DEDENT><DEDENT>return cls(confFiles, parsedOverrides, None, parsedDeletes)<EOL>
Initialize a WorkflowConfigParser instance using the command line values parsed in args. args must contain the values provided by the workflow_command_line_group() function. If you are not using the standard workflow command line interface, you should probably initialize directly using __init__() Parameters ----------- args : argparse.ArgumentParser The command line arguments parsed by argparse
f16001:c0:m1
def read_ini_file(self, cpFile):
<EOL>self.read(cpFile)<EOL>
Read a .ini file and return it as a ConfigParser class. This function does none of the parsing/combining of sections. It simply reads the file and returns it unedited Stub awaiting more functionality - see configparser_test.py Parameters ---------- cpFile : Path to .ini file, or list of paths The path(s) to a .ini file to be read in Returns ------- cp : ConfigParser The ConfigParser class containing the read in .ini file
f16001:c0:m2