_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q31900 | BaseModel.update | train | def update(self, **params):
"""Updates the current parameter positions and resets stats.
If any sampling transforms are specified, they are applied to the
params before being stored.
"""
self._current_params = self._transform_params(**params)
self._current_stats = ModelStats() | python | {
"resource": ""
} |
q31901 | BaseModel.get_current_stats | train | def get_current_stats(self, names=None):
"""Return one or more of the current stats as a tuple.
This function does no computation. It only returns what has already
been calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Parameters
----------
names : list of str, optional
Specify the names of the stats to retrieve. If ``None`` (the
default), will return ``default_stats``.
Returns
-------
tuple :
The current values of the requested stats, as a tuple. The order
of the stats is the same as the names.
"""
if names is None:
names = self.default_stats
return self._current_stats.getstats(names) | python | {
"resource": ""
} |
q31902 | BaseModel._trytoget | train | def _trytoget(self, statname, fallback, **kwargs):
"""Helper function to get a stat from ``_current_stats``.
If the statistic hasn't been calculated, ``_current_stats`` will raise
an ``AttributeError``. In that case, the ``fallback`` function will
be called. If that call is successful, the ``statname`` will be added
to ``_current_stats`` with the returned value.
Parameters
----------
statname : str
The stat to get from ``current_stats``.
fallback : method of self
The function to call if the property call fails.
\**kwargs :
Any other keyword arguments are passed through to the function.
Returns
-------
float :
The value of the property.
"""
try:
return getattr(self._current_stats, statname)
except AttributeError:
val = fallback(**kwargs)
setattr(self._current_stats, statname, val)
return val | python | {
"resource": ""
} |
q31903 | BaseModel._logjacobian | train | def _logjacobian(self):
"""Calculates the logjacobian of the current parameters."""
if self.sampling_transforms is None:
logj = 0.
else:
logj = self.sampling_transforms.logjacobian(
**self.current_params)
return logj | python | {
"resource": ""
} |
q31904 | BaseModel._logprior | train | def _logprior(self):
"""Calculates the log prior at the current parameters."""
logj = self.logjacobian
logp = self.prior_distribution(**self.current_params) + logj
if numpy.isnan(logp):
logp = -numpy.inf
return logp | python | {
"resource": ""
} |
q31905 | BaseModel.logposterior | train | def logposterior(self):
"""Returns the log of the posterior of the current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then the ``loglikelihood``
is not called.
"""
logp = self.logprior
if logp == -numpy.inf:
return logp
else:
return logp + self.loglikelihood | python | {
"resource": ""
} |
q31906 | BaseModel.prior_rvs | train | def prior_rvs(self, size=1, prior=None):
"""Returns random variates drawn from the prior.
If the ``sampling_params`` are different from the ``variable_params``,
the variates are transformed to the `sampling_params` parameter space
before being returned.
Parameters
----------
size : int, optional
Number of random values to return for each parameter. Default is 1.
prior : JointDistribution, optional
Use the given prior to draw values rather than the saved prior.
Returns
-------
FieldArray
A field array of the random values.
"""
# draw values from the prior
if prior is None:
prior = self.prior_distribution
p0 = prior.rvs(size=size)
# transform if necessary
if self.sampling_transforms is not None:
ptrans = self.sampling_transforms.apply(p0)
# pull out the sampling args
p0 = FieldArray.from_arrays([ptrans[arg]
for arg in self.sampling_params],
names=self.sampling_params)
return p0 | python | {
"resource": ""
} |
q31907 | BaseModel._transform_params | train | def _transform_params(self, **params):
"""Applies all transforms to the given params.
Parameters
----------
\**params :
Key, value pairs of parameters to apply the transforms to.
Returns
-------
dict
A dictionary of the transformed parameters.
"""
# apply inverse transforms to go from sampling parameters to
# variable args
if self.sampling_transforms is not None:
params = self.sampling_transforms.apply(params, inverse=True)
# apply waveform transforms
if self.waveform_transforms is not None:
params = transforms.apply_transforms(params,
self.waveform_transforms,
inverse=False)
# apply boundary conditions
params = self.prior_distribution.apply_boundary_conditions(**params)
return params | python | {
"resource": ""
} |
q31908 | BaseModel.extra_args_from_config | train | def extra_args_from_config(cp, section, skip_args=None, dtypes=None):
"""Gets any additional keyword in the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
section : str
The name of the section to read.
skip_args : list of str, optional
Names of arguments to skip.
dtypes : dict, optional
A dictionary of arguments -> data types. If an argument is found
in the dict, it will be cast to the given datatype. Otherwise, the
argument's value will just be read from the config file (and thus
be a string).
Returns
-------
dict
Dictionary of keyword arguments read from the config file.
"""
kwargs = {}
if dtypes is None:
dtypes = {}
if skip_args is None:
skip_args = []
read_args = [opt for opt in cp.options(section)
if opt not in skip_args]
for opt in read_args:
val = cp.get(section, opt)
# try to cast the value if a datatype was specified for this opt
try:
val = dtypes[opt](val)
except KeyError:
pass
kwargs[opt] = val
return kwargs | python | {
"resource": ""
} |
q31909 | BaseModel.prior_from_config | train | def prior_from_config(cp, variable_params, prior_section,
constraint_section):
"""Gets arguments and keyword arguments from a config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of of model parameter names.
prior_section : str
Section to read prior(s) from.
constraint_section : str
Section to read constraint(s) from.
Returns
-------
pycbc.distributions.JointDistribution
The prior.
"""
# get prior distribution for each variable parameter
logging.info("Setting up priors for each parameter")
dists = distributions.read_distributions_from_config(cp, prior_section)
constraints = distributions.read_constraints_from_config(
cp, constraint_section)
return distributions.JointDistribution(variable_params, *dists,
constraints=constraints) | python | {
"resource": ""
} |
q31910 | BaseModel._init_args_from_config | train | def _init_args_from_config(cls, cp):
"""Helper function for loading parameters.
This retrieves the prior, variable parameters, static parameterss,
constraints, sampling transforms, and waveform transforms
(if provided).
Parameters
----------
cp : ConfigParser
Config parser to read.
Returns
-------
dict :
Dictionary of the arguments. Has keys ``variable_params``,
``static_params``, ``prior``, and ``sampling_transforms``. If
waveform transforms are in the config file, will also have
``waveform_transforms``.
"""
section = "model"
prior_section = "prior"
vparams_section = 'variable_params'
sparams_section = 'static_params'
constraint_section = 'constraint'
# check that the name exists and matches
name = cp.get(section, 'name')
if name != cls.name:
raise ValueError("section's {} name does not match mine {}".format(
name, cls.name))
# get model parameters
variable_params, static_params = distributions.read_params_from_config(
cp, prior_section=prior_section, vargs_section=vparams_section,
sargs_section=sparams_section)
# get prior
prior = cls.prior_from_config(cp, variable_params, prior_section,
constraint_section)
args = {'variable_params': variable_params,
'static_params': static_params,
'prior': prior}
# try to load sampling transforms
try:
sampling_transforms = SamplingTransforms.from_config(
cp, variable_params)
except ValueError:
sampling_transforms = None
args['sampling_transforms'] = sampling_transforms
# get any waveform transforms
if any(cp.get_subsections('waveform_transforms')):
logging.info("Loading waveform transforms")
args['waveform_transforms'] = \
transforms.read_transforms_from_config(
cp, 'waveform_transforms')
return args | python | {
"resource": ""
} |
q31911 | BaseModel.write_metadata | train | def write_metadata(self, fp):
"""Writes metadata to the given file handler.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
"""
fp.attrs['model'] = self.name
fp.attrs['variable_params'] = list(self.variable_params)
fp.attrs['sampling_params'] = list(self.sampling_params)
fp.write_kwargs_to_attrs(fp.attrs, static_params=self.static_params) | python | {
"resource": ""
} |
q31912 | get_cov_params | train | def get_cov_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=None, lambda2=None, quadparam1=None,
quadparam2=None):
"""
Function to convert between masses and spins and locations in the xi
parameter space. Xi = Cartesian metric and rotated to principal components.
Parameters
-----------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
xis : list of floats or numpy.arrays
Position of the system(s) in the xi coordinate system
"""
# Do this by doing masses - > lambdas -> mus
mus = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=lambda1, lambda2=lambda2,
quadparam1=quadparam1, quadparam2=quadparam2)
# and then mus -> xis
xis = get_covaried_params(mus, metricParams.evecsCV[fUpper])
return xis | python | {
"resource": ""
} |
q31913 | get_conv_params | train | def get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=None, lambda2=None, quadparam1=None,
quadparam2=None):
"""
Function to convert between masses and spins and locations in the mu
parameter space. Mu = Cartesian metric, but not principal components.
Parameters
-----------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
mus : list of floats or numpy.arrays
Position of the system(s) in the mu coordinate system
"""
# Do this by masses -> lambdas
lambdas = get_chirp_params(mass1, mass2, spin1z, spin2z,
metricParams.f0, metricParams.pnOrder,
lambda1=lambda1, lambda2=lambda2,
quadparam1=quadparam1, quadparam2=quadparam2)
# and lambdas -> mus
mus = get_mu_params(lambdas, metricParams, fUpper)
return mus | python | {
"resource": ""
} |
q31914 | get_mu_params | train | def get_mu_params(lambdas, metricParams, fUpper):
"""
Function to rotate from the lambda coefficients into position in the mu
coordinate system. Mu = Cartesian metric, but not principal components.
Parameters
-----------
lambdas : list of floats or numpy.arrays
Position of the system(s) in the lambda coefficients
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
mus : list of floats or numpy.arrays
Position of the system(s) in the mu coordinate system
"""
lambdas = numpy.array(lambdas, copy=False)
# If original inputs were floats we need to make this a 2D array
if len(lambdas.shape) == 1:
resize_needed = True
lambdas = lambdas[:,None]
else:
resize_needed = False
evecs = metricParams.evecs[fUpper]
evals = metricParams.evals[fUpper]
evecs = numpy.array(evecs, copy=False)
mus = ((lambdas.T).dot(evecs)).T
mus = mus * numpy.sqrt(evals)[:,None]
if resize_needed:
mus = numpy.ndarray.flatten(mus)
return mus | python | {
"resource": ""
} |
q31915 | calc_point_dist | train | def calc_point_dist(vsA, entryA):
"""
This function is used to determine the distance between two points.
Parameters
----------
vsA : list or numpy.array or similar
An array of point 1's position in the \chi_i coordinate system
entryA : list or numpy.array or similar
An array of point 2's position in the \chi_i coordinate system
MMdistA : float
The minimal mismatch allowed between the points
Returns
--------
val : float
The metric distance between the two points.
"""
chi_diffs = vsA - entryA
val = ((chi_diffs)*(chi_diffs)).sum()
return val | python | {
"resource": ""
} |
q31916 | read_injections | train | def read_injections(sim_files, m_dist, s_dist, d_dist):
''' Read all the injections from the files in the provided folder.
The files must belong to individual set i.e. no files that combine
all the injections in a run.
Identify injection strategies and finds parameter boundaries.
Collect injection according to GPS.
Parameters
----------
sim_files: list
List containign names of the simulation files
m_dist: list
The mass distribution used in the simulation runs
s_dist: list
The spin distribution used in the simulation runs
d_dist: list
The distance distribution used in the simulation runs
Returns
-------
injections: dictionary
Contains the organized information about the injections
'''
injections = {}
min_d, max_d = 1e12, 0
nf = len(sim_files)
for i in range(nf):
key = str(i)
injections[key] = process_injections(sim_files[i])
injections[key]['file_name'] = sim_files[i]
injections[key]['m_dist'] = m_dist[i]
injections[key]['s_dist'] = s_dist[i]
injections[key]['d_dist'] = d_dist[i]
mass1, mass2 = injections[key]['mass1'], injections[key]['mass2']
distance = injections[key]['distance']
mchirp = m1m2tomch(mass1, mass2)
injections[key]['chirp_mass'] = mchirp
injections[key]['total_mass'] = mass1 + mass2
injections[key]['mtot_range'] = [min(mass1 + mass2), max(mass1 + mass2)]
injections[key]['m1_range'] = [min(mass1), max(mass1)]
injections[key]['m2_range'] = [min(mass2), max(mass2)]
injections[key]['d_range'] = [min(distance), max(distance)]
min_d, max_d = min(min_d, min(distance)), max(max_d, max(distance))
injections['z_range'] = [dlum_to_z(min_d), dlum_to_z(max_d)]
return injections | python | {
"resource": ""
} |
q31917 | process_injections | train | def process_injections(hdffile):
"""Function to read in the injection file and
extract the found injections and all injections
Parameters
----------
hdffile: hdf file
File for which injections are to be processed
Returns
-------
data: dictionary
Dictionary containing injection read from the input file
"""
data = {}
with h5py.File(hdffile, 'r') as inp:
found_index = inp['found_after_vetoes/injection_index'][:]
for param in _save_params:
data[param] = inp['injections/'+param][:]
ifar = np.zeros_like(data[_save_params[0]])
ifar[found_index] = inp['found_after_vetoes/ifar'][:]
data['ifar'] = ifar
stat = np.zeros_like(data[_save_params[0]])
stat[found_index] = inp['found_after_vetoes/stat'][:]
data['stat'] = stat
return data | python | {
"resource": ""
} |
q31918 | astro_redshifts | train | def astro_redshifts(min_z, max_z, nsamples):
'''Sample the redshifts for sources, with redshift
independent rate, using standard cosmology
Parameters
----------
min_z: float
Minimum redshift
max_z: float
Maximum redshift
nsamples: int
Number of samples
Returns
-------
z_astro: array
nsamples of redshift, between min_z, max_z, by standard cosmology
'''
dz, fac = 0.001, 3.0
# use interpolation instead of directly estimating all the pdfz for rndz
V = quad(contracted_dVdc, 0., max_z)[0]
zbins = np.arange(min_z, max_z + dz/2., dz)
zcenter = (zbins[:-1] + zbins[1:]) / 2
pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V
int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)
rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))
pdf_zs = int_pdf(rndz)
maxpdf = max(pdf_zs)
rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf
diff = pdf_zs - rndn
idx = np.where(diff > 0)
z_astro = rndz[idx]
np.random.shuffle(z_astro)
z_astro.resize(nsamples)
return z_astro | python | {
"resource": ""
} |
q31919 | inj_mass_pdf | train | def inj_mass_pdf(key, mass1, mass2, lomass, himass, lomass_2 = 0, himass_2 = 0):
'''Estimate the probability density based on the injection strategy
Parameters
----------
key: string
Injection strategy
mass1: array
First mass of the injections
mass2: array
Second mass of the injections
lomass: float
Lower value of the mass distributions
himass: float
higher value of the mass distribution
Returns
-------
pdf: array
Probability density of the injections
'''
mass1, mass2 = np.array(mass1), np.array(mass2)
if key == 'totalMass':
# Returns the PDF of mass when total mass is uniformly distributed.
# Both the component masses have the same distribution for this case.
# Parameters
# ----------
# lomass: lower component mass
# himass: higher component mass
bound = np.sign((lomass + himass) - (mass1 + mass2))
bound += np.sign((himass - mass1)*(mass1 - lomass))
bound += np.sign((himass - mass2)*(mass2 - lomass))
idx = np.where(bound != 3)
pdf = 1./(himass - lomass)/(mass1 + mass2 - 2 * lomass)
pdf[idx] = 0
return pdf
if key == 'componentMass':
# Returns the PDF of mass when component mass is uniformly
# distributed. Component masses are independent for this case.
# Parameters
# ----------
# lomass: lower component mass
# himass: higher component mass
bound = np.sign((himass - mass1)*(mass1 - lomass))
bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2))
idx = np.where(bound != 2)
pdf = np.ones_like(mass1) / (himass - lomass) / (himass_2 - lomass_2)
pdf[idx] = 0
return pdf
if key == 'log':
# Returns the PDF of mass when component mass is uniform in log.
# Component masses are independent for this case.
# Parameters
# ----------
# lomass: lower component mass
# himass: higher component mass
bound = np.sign((himass - mass1)*(mass1 - lomass))
bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2))
idx = np.where(bound != 2)
pdf = 1 / (log(himass) - log(lomass)) / (log(himass_2) - log(lomass_2))
pdf /= (mass1 * mass2)
pdf[idx] = 0
return pdf | python | {
"resource": ""
} |
q31920 | inj_spin_pdf | train | def inj_spin_pdf(key, high_spin, spinz):
''' Estimate the probability density of the
injections for the spin distribution.
Parameters
----------
key: string
Injections strategy
high_spin: float
Maximum spin used in the strategy
spinz: array
Spin of the injections (for one component)
'''
# If the data comes from disable_spin simulation
if spinz[0] == 0:
return np.ones_like(spinz)
spinz = np.array(spinz)
bound = np.sign(np.absolute(high_spin) - np.absolute(spinz))
bound += np.sign(1 - np.absolute(spinz))
if key == 'precessing':
# Returns the PDF of spins when total spin is
# isotropically distributed. Both the component
# masses have the same distribution for this case.
pdf = (np.log(high_spin - np.log(abs(spinz)))/high_spin/2)
idx = np.where(bound != 2)
pdf[idx] = 0
return pdf
if key == 'aligned':
# Returns the PDF of mass when spins are aligned and uniformly
# distributed. Component spins are independent for this case.
pdf = (np.ones_like(spinz) / 2 / high_spin)
idx = np.where(bound != 2)
pdf[idx] = 0
return pdf
if key == 'disable_spin':
# Returns unit array
pdf = np.ones_like(spinz)
return pdf | python | {
"resource": ""
} |
q31921 | inj_distance_pdf | train | def inj_distance_pdf(key, distance, low_dist, high_dist, mchirp = 1):
''' Estimate the probability density of the
injections for the distance distribution.
Parameters
----------
key: string
Injections strategy
distance: array
Array of distances
low_dist: float
Lower value of distance used in the injection strategy
high_dist: float
Higher value of distance used in the injection strategy
'''
distance = np.array(distance)
if key == 'uniform':
# Returns the PDF at a distance when
# distance is uniformly distributed.
pdf = np.ones_like(distance)/(high_dist - low_dist)
bound = np.sign((high_dist - distance)*(distance - low_dist))
idx = np.where(bound != 1)
pdf[idx] = 0
return pdf
if key == 'dchirp':
# Returns the PDF at a distance when distance is uniformly
# distributed but scaled by the chirp mass
weight = (mchirp/_mch_BNS)**(5./6)
pdf = np.ones_like(distance) / weight / (high_dist - low_dist)
bound = np.sign((weight*high_dist - distance)*(distance - weight*low_dist))
idx = np.where(bound != 1)
pdf[idx] = 0
return pdf | python | {
"resource": ""
} |
q31922 | get_common_cbc_transforms | train | def get_common_cbc_transforms(requested_params, variable_args,
valid_params=None):
"""Determines if any additional parameters from the InferenceFile are
needed to get derived parameters that user has asked for.
First it will try to add any base parameters that are required to calculate
the derived parameters. Then it will add any sampling parameters that are
required to calculate the base parameters needed.
Parameters
----------
requested_params : list
List of parameters that user wants.
variable_args : list
List of parameters that InferenceFile has.
valid_params : list
List of parameters that can be accepted.
Returns
-------
requested_params : list
Updated list of parameters that user wants.
all_c : list
List of BaseTransforms to apply.
"""
variable_args = set(variable_args) if not isinstance(variable_args, set) \
else variable_args
# try to parse any equations by putting all strings together
# this will get some garbage but ensures all alphanumeric/underscored
# parameter names are added
new_params = []
for opt in requested_params:
s = ""
for ch in opt:
s += ch if ch.isalnum() or ch == "_" else " "
new_params += s.split(" ")
requested_params = set(list(requested_params) + list(new_params))
# can pass a list of valid parameters to remove garbage from parsing above
if valid_params:
valid_params = set(valid_params)
requested_params = requested_params.intersection(valid_params)
# find all the transforms for the requested derived parameters
# calculated from base parameters
from_base_c = []
for converter in common_cbc_inverse_transforms:
if (converter.outputs.issubset(variable_args) or
converter.outputs.isdisjoint(requested_params)):
continue
intersect = converter.outputs.intersection(requested_params)
if (not intersect or intersect.issubset(converter.inputs) or
intersect.issubset(variable_args)):
continue
requested_params.update(converter.inputs)
from_base_c.append(converter)
# find all the tranforms for the required base parameters
# calculated from sampling parameters
to_base_c = []
for converter in common_cbc_forward_transforms:
if (converter.inputs.issubset(variable_args) and
len(converter.outputs.intersection(requested_params)) > 0):
requested_params.update(converter.inputs)
to_base_c.append(converter)
variable_args.update(converter.outputs)
# get list of transforms that converts sampling parameters to the base
# parameters and then converts base parameters to the derived parameters
all_c = to_base_c + from_base_c
return list(requested_params), all_c | python | {
"resource": ""
} |
q31923 | apply_transforms | train | def apply_transforms(samples, transforms, inverse=False):
"""Applies a list of BaseTransform instances on a mapping object.
Parameters
----------
samples : {FieldArray, dict}
Mapping object to apply transforms to.
transforms : list
List of BaseTransform instances to apply. Nested transforms are assumed
to be in order for forward transforms.
inverse : bool, optional
Apply inverse transforms. In this case transforms will be applied in
the opposite order. Default is False.
Returns
-------
samples : {FieldArray, dict}
Mapping object with transforms applied. Same type as input.
"""
if inverse:
transforms = transforms[::-1]
for t in transforms:
try:
if inverse:
samples = t.inverse_transform(samples)
else:
samples = t.transform(samples)
except NotImplementedError:
continue
return samples | python | {
"resource": ""
} |
q31924 | compute_jacobian | train | def compute_jacobian(samples, transforms, inverse=False):
"""Computes the jacobian of the list of transforms at the given sample
points.
Parameters
----------
samples : {FieldArray, dict}
Mapping object specifying points at which to compute jacobians.
transforms : list
List of BaseTransform instances to apply. Nested transforms are assumed
to be in order for forward transforms.
inverse : bool, optional
Compute inverse jacobians. Default is False.
Returns
-------
float :
The product of the jacobians of all fo the transforms.
"""
j = 1.
if inverse:
for t in transforms:
j *= t.inverse_jacobian(samples)
else:
for t in transforms:
j *= t.jacobian(samples)
return j | python | {
"resource": ""
} |
q31925 | order_transforms | train | def order_transforms(transforms):
"""Orders transforms to ensure proper chaining.
For example, if `transforms = [B, A, C]`, and `A` produces outputs needed
by `B`, the transforms will be re-rorderd to `[A, B, C]`.
Parameters
----------
transforms : list
List of transform instances to order.
Outputs
-------
list :
List of transformed ordered such that forward transforms can be carried
out without error.
"""
# get a set of all inputs and all outputs
outputs = set().union(*[t.outputs for t in transforms])
out = []
remaining = [t for t in transforms]
while remaining:
# pull out transforms that have no inputs in the set of outputs
leftover = []
for t in remaining:
if t.inputs.isdisjoint(outputs):
out.append(t)
outputs -= t.outputs
else:
leftover.append(t)
remaining = leftover
return out | python | {
"resource": ""
} |
q31926 | read_transforms_from_config | train | def read_transforms_from_config(cp, section="transforms"):
"""Returns a list of PyCBC transform instances for a section in the
given configuration file.
If the transforms are nested (i.e., the output of one transform is the
input of another), the returned list will be sorted by the order of the
nests.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"transforms", string}
Prefix on section names from which to retrieve the transforms.
Returns
-------
list
A list of the parsed transforms.
"""
trans = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
t = transforms[name].from_config(cp, section, subsection)
trans.append(t)
return order_transforms(trans) | python | {
"resource": ""
} |
q31927 | BaseTransform.format_output | train | def format_output(old_maps, new_maps):
""" This function takes the returned dict from `transform` and converts
it to the same datatype as the input.
Parameters
----------
old_maps : {FieldArray, dict}
The mapping object to add new maps to.
new_maps : dict
A dict with key as parameter name and value is numpy.array.
Returns
-------
{FieldArray, dict}
The old_maps object with new keys from new_maps.
"""
# if input is FieldArray then return FieldArray
if isinstance(old_maps, record.FieldArray):
keys = new_maps.keys()
values = [new_maps[key] for key in keys]
for key, vals in zip(keys, values):
try:
old_maps = old_maps.add_fields([vals], [key])
except ValueError:
old_maps[key] = vals
return old_maps
# if input is dict then return dict
elif isinstance(old_maps, dict):
out = old_maps.copy()
out.update(new_maps)
return out
# else error
else:
raise TypeError("Input type must be FieldArray or dict.") | python | {
"resource": ""
} |
q31928 | BaseTransform.from_config | train | def from_config(cls, cp, section, outputs, skip_opts=None,
additional_opts=None):
"""Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
tag = outputs
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
outputs = set(outputs.split(VARARGS_DELIM))
special_args = ['name'] + skip_opts + additional_opts.keys()
# get any extra arguments to pass to init
extra_args = {}
for opt in cp.options("-".join([section, tag])):
if opt in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, opt, tag)
try:
val = float(val)
except ValueError:
pass
# add option
extra_args.update({opt:val})
extra_args.update(additional_opts)
out = cls(**extra_args)
# check that the outputs matches
if outputs-out.outputs != set() or out.outputs-outputs != set():
raise ValueError("outputs of class do not match outputs specified "
"in section")
return out | python | {
"resource": ""
} |
q31929 | CustomTransform._createscratch | train | def _createscratch(self, shape=1):
"""Creates a scratch FieldArray to use for transforms."""
self._scratch = record.FieldArray(shape, dtype=[(p, float)
for p in self.inputs]) | python | {
"resource": ""
} |
q31930 | CustomTransform._copytoscratch | train | def _copytoscratch(self, maps):
"""Copies the data in maps to the scratch space.
If the maps contain arrays that are not the same shape as the scratch
space, a new scratch space will be created.
"""
try:
for p in self.inputs:
self._scratch[p][:] = maps[p]
except ValueError:
# we'll get a ValueError if the scratch space isn't the same size
# as the maps; in that case, re-create the scratch space with the
# appropriate size and try again
invals = maps[list(self.inputs)[0]]
if isinstance(invals, numpy.ndarray):
shape = invals.shape
else:
shape = len(invals)
self._createscratch(shape)
for p in self.inputs:
self._scratch[p][:] = maps[p] | python | {
"resource": ""
} |
q31931 | CustomTransform._getslice | train | def _getslice(self, maps):
"""Determines how to slice the scratch for returning values."""
invals = maps[list(self.inputs)[0]]
if not isinstance(invals, (numpy.ndarray, list)):
getslice = 0
else:
getslice = slice(None, None)
return getslice | python | {
"resource": ""
} |
q31932 | CustomTransform.transform | train | def transform(self, maps):
"""Applies the transform functions to the given maps object.
Parameters
----------
maps : dict, or FieldArray
Returns
-------
dict or FieldArray
A map object containing the transformed variables, along with the
original variables. The type of the output will be the same as the
input.
"""
if self.transform_functions is None:
raise NotImplementedError("no transform function(s) provided")
# copy values to scratch
self._copytoscratch(maps)
# ensure that we return the same data type in each dict
getslice = self._getslice(maps)
# evaluate the functions
out = {p: self._scratch[func][getslice]
for p,func in self.transform_functions.items()}
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31933 | CustomTransform.from_config | train | def from_config(cls, cp, section, outputs):
"""Loads a CustomTransform from the given config file.
Example section:
.. code-block:: ini
[{section}-outvar1+outvar2]
name = custom
inputs = inputvar1, inputvar2
outvar1 = func1(inputs)
outvar2 = func2(inputs)
jacobian = func(inputs)
"""
tag = outputs
outputs = set(outputs.split(VARARGS_DELIM))
inputs = map(str.strip,
cp.get_opt_tag(section, 'inputs', tag).split(','))
# get the functions for each output
transform_functions = {}
for var in outputs:
# check if option can be cast as a float
func = cp.get_opt_tag(section, var, tag)
transform_functions[var] = func
s = '-'.join([section, tag])
if cp.has_option(s, 'jacobian'):
jacobian = cp.get_opt_tag(section, 'jacobian', tag)
else:
jacobian = None
return cls(inputs, outputs, transform_functions, jacobian=jacobian) | python | {
"resource": ""
} |
q31934 | MchirpQToMass1Mass2.transform | train | def transform(self, maps):
"""This function transforms from chirp mass and mass ratio to component
masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpQToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'q': numpy.array([2.])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'q': array([ 2.])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_q(
maps[parameters.mchirp],
maps[parameters.q])
out[parameters.mass2] = conversions.mass2_from_mchirp_q(
maps[parameters.mchirp],
maps[parameters.q])
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31935 | MchirpQToMass1Mass2.inverse_transform | train | def inverse_transform(self, maps):
"""This function transforms from component masses to chirp mass and
mass ratio.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpQToMass1Mass2()
>>> t.inverse_transform({'mass1': numpy.array([16.4]), 'mass2': numpy.array([8.2])})
{'mass1': array([ 16.4]), 'mass2': array([ 8.2]),
'mchirp': array([ 9.97717521]), 'q': 2.0}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
m1 = maps[parameters.mass1]
m2 = maps[parameters.mass2]
out[parameters.mchirp] = conversions.mchirp_from_mass1_mass2(m1, m2)
out[parameters.q] = m1 / m2
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31936 | MchirpQToMass1Mass2.jacobian | train | def jacobian(self, maps):
"""Returns the Jacobian for transforming mchirp and q to mass1 and
mass2.
"""
mchirp = maps[parameters.mchirp]
q = maps[parameters.q]
return mchirp * ((1.+q)/q**3.)**(2./5) | python | {
"resource": ""
} |
q31937 | MchirpQToMass1Mass2.inverse_jacobian | train | def inverse_jacobian(self, maps):
"""Returns the Jacobian for transforming mass1 and mass2 to
mchirp and q.
"""
m1 = maps[parameters.mass1]
m2 = maps[parameters.mass2]
return conversions.mchirp_from_mass1_mass2(m1, m2)/m2**2. | python | {
"resource": ""
} |
q31938 | MchirpEtaToMass1Mass2.transform | train | def transform(self, maps):
"""This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31939 | MchirpEtaToMass1Mass2.jacobian | train | def jacobian(self, maps):
"""Returns the Jacobian for transforming mchirp and eta to mass1 and
mass2.
"""
mchirp = maps[parameters.mchirp]
eta = maps[parameters.eta]
m1 = conversions.mass1_from_mchirp_eta(mchirp, eta)
m2 = conversions.mass2_from_mchirp_eta(mchirp, eta)
return mchirp * (m1 - m2) / (m1 + m2)**3 | python | {
"resource": ""
} |
q31940 | MchirpEtaToMass1Mass2.inverse_jacobian | train | def inverse_jacobian(self, maps):
"""Returns the Jacobian for transforming mass1 and mass2 to
mchirp and eta.
"""
m1 = maps[parameters.mass1]
m2 = maps[parameters.mass2]
mchirp = conversions.mchirp_from_mass1_mass2(m1, m2)
eta = conversions.eta_from_mass1_mass2(m1, m2)
return -1. * mchirp / eta**(6./5) | python | {
"resource": ""
} |
q31941 | ChirpDistanceToDistance.transform | train | def transform(self, maps):
"""This function transforms from chirp distance to luminosity distance,
given the chirp mass.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy as np
>>> from pycbc import transforms
>>> t = transforms.ChirpDistanceToDistance()
>>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])})
{'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.distance] = \
conversions.distance_from_chirp_distance_mchirp(
maps[parameters.chirp_distance],
maps[parameters.mchirp],
ref_mass=self.ref_mass)
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31942 | ChirpDistanceToDistance.inverse_transform | train | def inverse_transform(self, maps):
"""This function transforms from luminosity distance to chirp distance,
given the chirp mass.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy as np
>>> from pycbc import transforms
>>> t = transforms.ChirpDistanceToDistance()
>>> t.inverse_transform({'distance': np.array([40.]), 'mchirp': np.array([1.2])})
{'distance': array([ 40.]), 'chirp_distance': array([ 40.52073522]), 'mchirp': array([ 1.2])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.chirp_distance] = \
conversions.chirp_distance(maps[parameters.distance],
maps[parameters.mchirp], ref_mass=self.ref_mass)
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31943 | SphericalSpin1ToCartesianSpin1.transform | train | def transform(self, maps):
""" This function transforms from spherical to cartesian spins.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.SphericalSpin1ToCartesianSpin1()
>>> t.transform({'spin1_a': numpy.array([0.1]), 'spin1_azimuthal': numpy.array([0.1]), 'spin1_polar': numpy.array([0.1])})
{'spin1_a': array([ 0.1]), 'spin1_azimuthal': array([ 0.1]), 'spin1_polar': array([ 0.1]),
'spin2x': array([ 0.00993347]), 'spin2y': array([ 0.00099667]), 'spin2z': array([ 0.09950042])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
a, az, po = self._inputs
data = coordinates.spherical_to_cartesian(maps[a], maps[az], maps[po])
out = {param : val for param, val in zip(self._outputs, data)}
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31944 | SphericalSpin1ToCartesianSpin1.inverse_transform | train | def inverse_transform(self, maps):
""" This function transforms from cartesian to spherical spins.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
sx, sy, sz = self._outputs
data = coordinates.cartesian_to_spherical(maps[sx], maps[sy], maps[sz])
out = {param : val for param, val in zip(self._outputs, data)}
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31945 | DistanceToRedshift.transform | train | def transform(self, maps):
""" This function transforms from distance to redshift.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.DistanceToRedshift()
>>> t.transform({'distance': numpy.array([1000])})
{'distance': array([1000]), 'redshift': 0.19650987609144363}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {parameters.redshift : cosmology.redshift(
maps[parameters.distance])}
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31946 | AlignedMassSpinToCartesianSpin.transform | train | def transform(self, maps):
""" This function transforms from aligned mass-weighted spins to
cartesian spins aligned along the z-axis.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
mass1 = maps[parameters.mass1]
mass2 = maps[parameters.mass2]
out = {}
out[parameters.spin1z] = \
conversions.spin1z_from_mass1_mass2_chi_eff_chi_a(
mass1, mass2,
maps[parameters.chi_eff], maps["chi_a"])
out[parameters.spin2z] = \
conversions.spin2z_from_mass1_mass2_chi_eff_chi_a(
mass1, mass2,
maps[parameters.chi_eff], maps["chi_a"])
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31947 | AlignedMassSpinToCartesianSpin.inverse_transform | train | def inverse_transform(self, maps):
""" This function transforms from component masses and cartesian spins
to mass-weighted spin parameters aligned with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
mass1 = maps[parameters.mass1]
spin1z = maps[parameters.spin1z]
mass2 = maps[parameters.mass2]
spin2z = maps[parameters.spin2z]
out = {
parameters.chi_eff : conversions.chi_eff(mass1, mass2,
spin1z, spin2z),
"chi_a" : conversions.chi_a(mass1, mass2, spin1z, spin2z),
}
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31948 | PrecessionMassSpinToCartesianSpin.transform | train | def transform(self, maps):
""" This function transforms from mass-weighted spins to caretsian spins
in the x-y plane.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
# find primary and secondary masses
# since functions in conversions.py map to primary/secondary masses
m_p = conversions.primary_mass(maps["mass1"], maps["mass2"])
m_s = conversions.secondary_mass(maps["mass1"], maps["mass2"])
# find primary and secondary xi
# can re-purpose spin functions for just a generic variable
xi_p = conversions.primary_spin(maps["mass1"], maps["mass2"],
maps["xi1"], maps["xi2"])
xi_s = conversions.secondary_spin(maps["mass1"], maps["mass2"],
maps["xi1"], maps["xi2"])
# convert using convention of conversions.py that is mass1 > mass2
spinx_p = conversions.spin1x_from_xi1_phi_a_phi_s(
xi_p, maps["phi_a"], maps["phi_s"])
spiny_p = conversions.spin1y_from_xi1_phi_a_phi_s(
xi_p, maps["phi_a"], maps["phi_s"])
spinx_s = conversions.spin2x_from_mass1_mass2_xi2_phi_a_phi_s(
m_p, m_s, xi_s, maps["phi_a"], maps["phi_s"])
spiny_s = conversions.spin2y_from_mass1_mass2_xi2_phi_a_phi_s(
m_p, m_s, xi_s, maps["phi_a"], maps["phi_s"])
# map parameters from primary/secondary to indices
out = {}
if isinstance(m_p, numpy.ndarray):
mass1, mass2 = map(numpy.array, [maps["mass1"], maps["mass2"]])
mask_mass1_gte_mass2 = mass1 >= mass2
mask_mass1_lt_mass2 = mass1 < mass2
out[parameters.spin1x] = numpy.concatenate((
spinx_p[mask_mass1_gte_mass2],
spinx_s[mask_mass1_lt_mass2]))
out[parameters.spin1y] = numpy.concatenate((
spiny_p[mask_mass1_gte_mass2],
spiny_s[mask_mass1_lt_mass2]))
out[parameters.spin2x] = numpy.concatenate((
spinx_p[mask_mass1_lt_mass2],
spinx_s[mask_mass1_gte_mass2]))
out[parameters.spin2y] = numpy.concatenate((
spinx_p[mask_mass1_lt_mass2],
spinx_s[mask_mass1_gte_mass2]))
elif maps["mass1"] > maps["mass2"]:
out[parameters.spin1x] = spinx_p
out[parameters.spin1y] = spiny_p
out[parameters.spin2x] = spinx_s
out[parameters.spin2y] = spiny_s
else:
out[parameters.spin1x] = spinx_s
out[parameters.spin1y] = spiny_s
out[parameters.spin2x] = spinx_p
out[parameters.spin2y] = spiny_p
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31949 | PrecessionMassSpinToCartesianSpin.inverse_transform | train | def inverse_transform(self, maps):
""" This function transforms from component masses and cartesian spins to
mass-weighted spin parameters perpendicular with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
# convert
out = {}
xi1 = conversions.primary_xi(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
xi2 = conversions.secondary_xi(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
out["phi_a"] = conversions.phi_a(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
out["phi_s"] = conversions.phi_s(
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
# map parameters from primary/secondary to indices
if isinstance(xi1, numpy.ndarray):
mass1, mass2 = map(numpy.array, [maps[parameters.mass1],
maps[parameters.mass2]])
mask_mass1_gte_mass2 = mass1 >= mass2
mask_mass1_lt_mass2 = mass1 < mass2
out["xi1"] = numpy.concatenate((
xi1[mask_mass1_gte_mass2],
xi2[mask_mass1_lt_mass2]))
out["xi2"] = numpy.concatenate((
xi1[mask_mass1_gte_mass2],
xi2[mask_mass1_lt_mass2]))
elif maps["mass1"] > maps["mass2"]:
out["xi1"] = xi1
out["xi2"] = xi2
else:
out["xi1"] = xi2
out["xi2"] = xi1
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31950 | CartesianSpinToChiP.transform | train | def transform(self, maps):
""" This function transforms from component masses and caretsian spins
to chi_p.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out["chi_p"] = conversions.chi_p(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
return self.format_output(maps, out) | python | {
"resource": ""
} |
q31951 | Logistic.from_config | train | def from_config(cls, cp, section, outputs, skip_opts=None,
additional_opts=None):
"""Initializes a Logistic transform from the given section.
The section must specify an input and output variable name. The
codomain of the output may be specified using `min-{output}`,
`max-{output}`. Example:
.. code-block:: ini
[{section}-q]
name = logistic
inputvar = logitq
outputvar = q
min-q = 1
max-q = 8
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
# pull out the minimum, maximum values of the output variable
outputvar = cp.get_opt_tag(section, 'output', outputs)
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
s = '-'.join([section, outputs])
opt = 'min-{}'.format(outputvar)
if cp.has_option(s, opt):
a = cp.get_opt_tag(section, opt, outputs)
skip_opts.append(opt)
else:
a = None
opt = 'max-{}'.format(outputvar)
if cp.has_option(s, opt):
b = cp.get_opt_tag(section, opt, outputs)
skip_opts.append(opt)
else:
b = None
if a is None and b is not None or b is None and a is not None:
raise ValueError("if providing a min(max)-{}, must also provide "
"a max(min)-{}".format(outputvar, outputvar))
elif a is not None:
additional_opts.update({'codomain': (float(a), float(b))})
return super(Logistic, cls).from_config(cp, section, outputs,
skip_opts, additional_opts) | python | {
"resource": ""
} |
q31952 | get_options_from_group | train | def get_options_from_group(option_group):
"""
Take an option group and return all the options that are defined in that
group.
"""
option_list = option_group._group_actions
command_lines = []
for option in option_list:
option_strings = option.option_strings
for string in option_strings:
if string.startswith('--'):
command_lines.append(string)
return command_lines | python | {
"resource": ""
} |
q31953 | insert_base_bank_options | train | def insert_base_bank_options(parser):
"""
Adds essential common options for template bank generation to an
ArgumentParser instance.
"""
def match_type(s):
err_msg = "must be a number between 0 and 1 excluded, not %r" % s
try:
value = float(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value <= 0 or value >= 1:
raise argparse.ArgumentTypeError(err_msg)
return value
parser.add_argument(
'-m', '--min-match', type=match_type, required=True,
help="Generate bank with specified minimum match. Required.")
parser.add_argument(
'-O', '--output-file', required=True,
help="Output file name. Required.")
parser.add_argument('--f-low-column', type=str, metavar='NAME',
help='If given, store the lower frequency cutoff into '
'column NAME of the single-inspiral table.') | python | {
"resource": ""
} |
q31954 | insert_metric_calculation_options | train | def insert_metric_calculation_options(parser):
"""
Adds the options used to obtain a metric in the bank generation codes to an
argparser as an OptionGroup. This should be used if you want to use these
options in your code.
"""
metricOpts = parser.add_argument_group(
"Options related to calculating the parameter space metric")
metricOpts.add_argument("--pn-order", action="store", type=str,
required=True,
help="Determines the PN order to use. For a bank of "
"non-spinning templates, spin-related terms in the "
"metric will be zero. REQUIRED. "
"Choices: %s" %(pycbcValidOrdersHelpDescriptions))
metricOpts.add_argument("--f0", action="store", type=positive_float,
default=70.,\
help="f0 is used as a dynamic scaling factor when "
"calculating integrals used in metric construction. "
"I.e. instead of integrating F(f) we integrate F(f/f0) "
"then rescale by powers of f0. The default value 70Hz "
"should be fine for most applications. OPTIONAL. "
"UNITS=Hz. **WARNING: If the ethinca metric is to be "
"calculated, f0 must be set equal to f-low**")
metricOpts.add_argument("--f-low", action="store", type=positive_float,
required=True,
help="Lower frequency cutoff used in computing the "
"parameter space metric. REQUIRED. UNITS=Hz")
metricOpts.add_argument("--f-upper", action="store", type=positive_float,
required=True,
help="Upper frequency cutoff used in computing the "
"parameter space metric. REQUIRED. UNITS=Hz")
metricOpts.add_argument("--delta-f", action="store", type=positive_float,
required=True,
help="Frequency spacing used in computing the parameter "
"space metric: integrals of the form \int F(f) df "
"are approximated as \sum F(f) delta_f. REQUIRED. "
"UNITS=Hz")
metricOpts.add_argument("--write-metric", action="store_true",
default=False, help="If given write the metric components "
"to disk as they are calculated.")
return metricOpts | python | {
"resource": ""
} |
q31955 | verify_ethinca_metric_options | train | def verify_ethinca_metric_options(opts, parser):
"""
Checks that the necessary options are given for the ethinca metric
calculation.
Parameters
----------
opts : argparse.Values instance
Result of parsing the input options with OptionParser
parser : object
The OptionParser instance.
"""
if opts.filter_cutoff is not None and not (opts.filter_cutoff in
pnutils.named_frequency_cutoffs.keys()):
parser.error("Need a valid cutoff formula to calculate ethinca or "
"assign filter f_final values! Possible values are "
+str(pnutils.named_frequency_cutoffs.keys()))
if (opts.calculate_ethinca_metric or opts.calculate_time_metric_components)\
and not opts.ethinca_frequency_step:
parser.error("Need to specify a cutoff frequency step to calculate "
"ethinca!")
if not (opts.calculate_ethinca_metric or\
opts.calculate_time_metric_components) and opts.ethinca_pn_order:
parser.error("Can't specify an ethinca PN order if not "
"calculating ethinca metric!") | python | {
"resource": ""
} |
q31956 | check_ethinca_against_bank_params | train | def check_ethinca_against_bank_params(ethincaParams, metricParams):
"""
Cross-check the ethinca and bank layout metric calculation parameters
and set the ethinca metric PN order equal to the bank PN order if not
previously set.
Parameters
----------
ethincaParams: instance of ethincaParameters
metricParams: instance of metricParameters
"""
if ethincaParams.doEthinca:
if metricParams.f0 != metricParams.fLow:
raise ValueError("If calculating ethinca metric, f0 and f-low "
"must be equal!")
if ethincaParams.fLow is not None and (
ethincaParams.fLow != metricParams.fLow):
raise ValueError("Ethinca metric calculation does not currently "
"support a f-low value different from the bank "
"metric!")
if ethincaParams.pnOrder is None:
ethincaParams.pnOrder = metricParams.pnOrder
else: pass | python | {
"resource": ""
} |
q31957 | metricParameters.from_argparse | train | def from_argparse(cls, opts):
"""
Initialize an instance of the metricParameters class from an
argparse.OptionParser instance. This assumes that
insert_metric_calculation_options
and
verify_metric_calculation_options
have already been called before initializing the class.
"""
return cls(opts.pn_order, opts.f_low, opts.f_upper, opts.delta_f,\
f0=opts.f0, write_metric=opts.write_metric) | python | {
"resource": ""
} |
q31958 | metricParameters.psd | train | def psd(self):
"""
A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation.
"""
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd | python | {
"resource": ""
} |
q31959 | metricParameters.evals | train | def evals(self):
"""
The eigenvalues of the parameter space.
This is a Dictionary of numpy.array
Each entry in the dictionary corresponds to the different frequency
ranges described in vary_fmax. If vary_fmax = False, the only entry
will be f_upper, this corresponds to integrals in [f_low,f_upper). This
entry is always present. Each other entry will use floats as keys to
the dictionary. These floats give the upper frequency cutoff when it is
varying.
Each numpy.array contains the eigenvalues which, with the eigenvectors
in evecs, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
"""
if self._evals is None:
errMsg = "The metric eigenvalues have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evals | python | {
"resource": ""
} |
q31960 | metricParameters.evecs | train | def evecs(self):
"""
The eigenvectors of the parameter space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
"""
if self._evecs is None:
errMsg = "The metric eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evecs | python | {
"resource": ""
} |
q31961 | metricParameters.metric | train | def metric(self):
"""
The metric of the parameter space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the metric of the parameter space in the
Lambda_i coordinate system.
"""
if self._metric is None:
errMsg = "The metric eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._metric | python | {
"resource": ""
} |
q31962 | metricParameters.evecsCV | train | def evecsCV(self):
"""
The eigenvectors of the principal directions of the mu space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
"""
if self._evecsCV is None:
errMsg = "The covariance eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evecsCV | python | {
"resource": ""
} |
q31963 | massRangeParameters.from_argparse | train | def from_argparse(cls, opts, nonSpin=False):
"""
Initialize an instance of the massRangeParameters class from an
argparse.OptionParser instance. This assumes that
insert_mass_range_option_group
and
verify_mass_range_options
have already been called before initializing the class.
"""
if nonSpin:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass)
else:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, maxNSSpinMag=opts.max_ns_spin_mag,
maxBHSpinMag=opts.max_bh_spin_mag,
nsbhFlag=opts.nsbh_flag,
max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
ns_bh_boundary_mass=opts.ns_bh_boundary_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass) | python | {
"resource": ""
} |
q31964 | massRangeParameters.is_outside_range | train | def is_outside_range(self, mass1, mass2, spin1z, spin2z):
"""
Test if a given location in mass1, mass2, spin1z, spin2z is within the
range of parameters allowed by the massParams object.
"""
# Mass1 test
if mass1 * 1.001 < self.minMass1:
return 1
if mass1 > self.maxMass1 * 1.001:
return 1
# Mass2 test
if mass2 * 1.001 < self.minMass2:
return 1
if mass2 > self.maxMass2 * 1.001:
return 1
# Spin1 test
if self.nsbhFlag:
if (abs(spin1z) > self.maxBHSpinMag * 1.001):
return 1
else:
spin1zM = abs(spin1z)
if not( (mass1 * 1.001 > self.ns_bh_boundary_mass \
and spin1zM <= self.maxBHSpinMag * 1.001) \
or (mass1 < self.ns_bh_boundary_mass * 1.001 \
and spin1zM <= self.maxNSSpinMag * 1.001)):
return 1
# Spin2 test
if self.nsbhFlag:
if (abs(spin2z) > self.maxNSSpinMag * 1.001):
return 1
else:
spin2zM = abs(spin2z)
if not( (mass2 * 1.001 > self.ns_bh_boundary_mass \
and spin2zM <= self.maxBHSpinMag * 1.001) \
or (mass2 < self.ns_bh_boundary_mass * 1.001 and \
spin2zM <= self.maxNSSpinMag * 1.001)):
return 1
# Total mass test
mTot = mass1 + mass2
if mTot > self.maxTotMass * 1.001:
return 1
if mTot * 1.001 < self.minTotMass:
return 1
# Eta test
eta = mass1 * mass2 / (mTot * mTot)
if eta > self.maxEta * 1.001:
return 1
if eta * 1.001 < self.minEta:
return 1
# Chirp mass test
chirp_mass = mTot * eta**(3./5.)
if self.min_chirp_mass is not None \
and chirp_mass * 1.001 < self.min_chirp_mass:
return 1
if self.max_chirp_mass is not None \
and chirp_mass > self.max_chirp_mass * 1.001:
return 1
return 0 | python | {
"resource": ""
} |
q31965 | ethincaParameters.from_argparse | train | def from_argparse(cls, opts):
"""
Initialize an instance of the ethincaParameters class from an
argparse.OptionParser instance. This assumes that
insert_ethinca_metric_options
and
verify_ethinca_metric_options
have already been called before initializing the class.
"""
return cls(opts.ethinca_pn_order, opts.filter_cutoff,
opts.ethinca_frequency_step, fLow=None,
full_ethinca=opts.calculate_ethinca_metric,
time_ethinca=opts.calculate_time_metric_components) | python | {
"resource": ""
} |
q31966 | set_sim_data | train | def set_sim_data(inj, field, data):
"""Sets data of a SimInspiral instance."""
try:
sim_field = sim_inspiral_map[field]
except KeyError:
sim_field = field
# for tc, map to geocentric times
if sim_field == 'tc':
inj.geocent_end_time = int(data)
inj.geocent_end_time_ns = int(1e9*(data % 1))
else:
setattr(inj, sim_field, data) | python | {
"resource": ""
} |
q31967 | get_hdf_injtype | train | def get_hdf_injtype(sim_file):
"""Gets the HDFInjectionSet class to use with the given file.
This looks for the ``injtype`` in the given file's top level ``attrs``. If
that attribute isn't set, will default to :py:class:`CBCHDFInjectionSet`.
Parameters
----------
sim_file : str
Name of the file. The file must already exist.
Returns
-------
HDFInjectionSet :
The type of HDFInjectionSet to use.
"""
with h5py.File(sim_file, 'r') as fp:
try:
ftype = fp.attrs['injtype']
except KeyError:
ftype = CBCHDFInjectionSet.injtype
return hdfinjtypes[ftype] | python | {
"resource": ""
} |
q31968 | hdf_injtype_from_approximant | train | def hdf_injtype_from_approximant(approximant):
"""Gets the HDFInjectionSet class to use with the given approximant.
Parameters
----------
approximant : str
Name of the approximant.
Returns
-------
HDFInjectionSet :
The type of HDFInjectionSet to use.
"""
retcls = None
for cls in hdfinjtypes.values():
if approximant in cls.supported_approximants():
retcls = cls
if retcls is None:
# none were found, raise an error
raise ValueError("Injection file type unknown for approximant {}"
.format(approximant))
return retcls | python | {
"resource": ""
} |
q31969 | _XMLInjectionSet.write | train | def write(filename, samples, write_params=None, static_args=None):
"""Writes the injection samples to the given xml.
Parameters
----------
filename : str
The name of the file to write to.
samples : io.FieldArray
FieldArray of parameters.
write_params : list, optional
Only write the given parameter names. All given names must be keys
in ``samples``. Default is to write all parameters in ``samples``.
static_args : dict, optional
Dictionary mapping static parameter names to values. These are
written to the ``attrs``.
"""
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
simtable = lsctables.New(lsctables.SimInspiralTable)
xmldoc.childNodes[0].appendChild(simtable)
if static_args is None:
static_args = {}
if write_params is None:
write_params = samples.fieldnames
for ii in range(samples.size):
sim = lsctables.SimInspiral()
# initialize all elements to None
for col in sim.__slots__:
setattr(sim, col, None)
for field in write_params:
data = samples[ii][field]
set_sim_data(sim, field, data)
# set any static args
for (field, value) in static_args.items():
set_sim_data(sim, field, value)
simtable.append(sim)
ligolw_utils.write_filename(xmldoc, filename,
gz=filename.endswith('gz')) | python | {
"resource": ""
} |
q31970 | PartitionedTmpltbank.get_point_from_bins_and_idx | train | def get_point_from_bins_and_idx(self, chi1_bin, chi2_bin, idx):
"""Find masses and spins given bin numbers and index.
Given the chi1 bin, chi2 bin and an index, return the masses and spins
of the point at that index. Will fail if no point exists there.
Parameters
-----------
chi1_bin : int
The bin number for chi1.
chi2_bin : int
The bin number for chi2.
idx : int
The index within the chi1, chi2 bin.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of heavier body.
spin2z : float
Spin of lighter body.
"""
mass1 = self.massbank[chi1_bin][chi2_bin]['mass1s'][idx]
mass2 = self.massbank[chi1_bin][chi2_bin]['mass2s'][idx]
spin1z = self.massbank[chi1_bin][chi2_bin]['spin1s'][idx]
spin2z = self.massbank[chi1_bin][chi2_bin]['spin2s'][idx]
return mass1, mass2, spin1z, spin2z | python | {
"resource": ""
} |
q31971 | PartitionedTmpltbank.find_point_bin | train | def find_point_bin(self, chi_coords):
"""
Given a set of coordinates in the chi parameter space, identify the
indices of the chi1 and chi2 bins that the point occurs in. Returns
these indices.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
Returns
--------
chi1_bin : int
Index of the chi_1 bin.
chi2_bin : int
Index of the chi_2 bin.
"""
# Identify bin
chi1_bin = int((chi_coords[0] - self.chi1_min) // self.bin_spacing)
chi2_bin = int((chi_coords[1] - self.chi2_min) // self.bin_spacing)
self.check_bin_existence(chi1_bin, chi2_bin)
return chi1_bin, chi2_bin | python | {
"resource": ""
} |
q31972 | PartitionedTmpltbank.calc_point_distance | train | def calc_point_distance(self, chi_coords):
"""
Calculate distance between point and the bank. Return the closest
distance.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
for idx, bank_chis in \
enumerate(self.bank[curr_chi1_bin][curr_chi2_bin]):
dist = coord_utils.calc_point_dist(chi_coords, bank_chis)
if dist < min_dist:
min_dist = dist
indexes = (curr_chi1_bin, curr_chi2_bin, idx)
return min_dist, indexes | python | {
"resource": ""
} |
q31973 | PartitionedTmpltbank.calc_point_distance_vary | train | def calc_point_distance_vary(self, chi_coords, point_fupper, mus):
"""
Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
# No points = Next iteration
curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin]
if not curr_bank['mass1s'].size:
continue
# *NOT* the same of .min and .max
f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts'])
f_other = numpy.maximum(point_fupper, curr_bank['freqcuts'])
# NOTE: freq_idxes is a vector!
freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper])
# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
vecs1 = mus[freq_idxes, :]
# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
range_idxes = numpy.arange(len(freq_idxes))
vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :]
# Now do the sums
dists = (vecs1 - vecs2)*(vecs1 - vecs2)
# This reduces to 1D: idx = stored index
dists = numpy.sum(dists, axis=1)
norm_upper = numpy.array([self.normalization_map[f] \
for f in f_upper])
norm_other = numpy.array([self.normalization_map[f] \
for f in f_other])
norm_fac = norm_upper / norm_other
renormed_dists = 1 - (1 - dists)*norm_fac
curr_min_dist = renormed_dists.min()
if curr_min_dist < min_dist:
min_dist = curr_min_dist
indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin()
return min_dist, indexes | python | {
"resource": ""
} |
q31974 | PartitionedTmpltbank.add_point_by_chi_coords | train | def add_point_by_chi_coords(self, chi_coords, mass1, mass2, spin1z, spin2z,
point_fupper=None, mus=None):
"""
Add a point to the partitioned template bank. The point_fupper and mus
kwargs must be provided for all templates if the vary fupper capability
is desired. This requires that the chi_coords, as well as mus and
point_fupper if needed, to be precalculated. If you just have the
masses and don't want to worry about translations see
add_point_by_masses, which will do translations and then call this.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
mass1 : float
The heavier mass of the point to add.
mass2 : float
The lighter mass of the point to add.
spin1z: float
The [aligned] spin on the heavier body.
spin2z: float
The [aligned] spin on the lighter body.
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
self.bank[chi1_bin][chi2_bin].append(copy.deepcopy(chi_coords))
curr_bank = self.massbank[chi1_bin][chi2_bin]
if curr_bank['mass1s'].size:
curr_bank['mass1s'] = numpy.append(curr_bank['mass1s'],
numpy.array([mass1]))
curr_bank['mass2s'] = numpy.append(curr_bank['mass2s'],
numpy.array([mass2]))
curr_bank['spin1s'] = numpy.append(curr_bank['spin1s'],
numpy.array([spin1z]))
curr_bank['spin2s'] = numpy.append(curr_bank['spin2s'],
numpy.array([spin2z]))
if point_fupper is not None:
curr_bank['freqcuts'] = numpy.append(curr_bank['freqcuts'],
numpy.array([point_fupper]))
# Mus needs to append onto axis 0. See below for contents of
# the mus variable
if mus is not None:
curr_bank['mus'] = numpy.append(curr_bank['mus'],
numpy.array([mus[:,:]]), axis=0)
else:
curr_bank['mass1s'] = numpy.array([mass1])
curr_bank['mass2s'] = numpy.array([mass2])
curr_bank['spin1s'] = numpy.array([spin1z])
curr_bank['spin2s'] = numpy.array([spin2z])
if point_fupper is not None:
curr_bank['freqcuts'] = numpy.array([point_fupper])
# curr_bank['mus'] is a 3D array
# NOTE: mu relates to the non-covaried Cartesian coordinate system
# Axis 0: Template index
# Axis 1: Frequency cutoff index
# Axis 2: Mu coordinate index
if mus is not None:
curr_bank['mus'] = numpy.array([mus[:,:]]) | python | {
"resource": ""
} |
q31975 | PartitionedTmpltbank.add_tmpltbank_from_xml_table | train | def add_tmpltbank_from_xml_table(self, sngl_table, vary_fupper=False):
"""
This function will take a sngl_inspiral_table of templates and add them
into the partitioned template bank object.
Parameters
-----------
sngl_table : sngl_inspiral_table
List of sngl_inspiral templates.
vary_fupper : False
If given also include the additional information needed to compute
distances with a varying upper frequency cutoff.
"""
for sngl in sngl_table:
self.add_point_by_masses(sngl.mass1, sngl.mass2, sngl.spin1z,
sngl.spin2z, vary_fupper=vary_fupper) | python | {
"resource": ""
} |
q31976 | PartitionedTmpltbank.add_tmpltbank_from_hdf_file | train | def add_tmpltbank_from_hdf_file(self, hdf_fp, vary_fupper=False):
"""
This function will take a pointer to an open HDF File object containing
a list of templates and add them into the partitioned template bank
object.
Parameters
-----------
hdf_fp : h5py.File object
The template bank in HDF5 format.
vary_fupper : False
If given also include the additional information needed to compute
distances with a varying upper frequency cutoff.
"""
mass1s = hdf_fp['mass1'][:]
mass2s = hdf_fp['mass2'][:]
spin1zs = hdf_fp['spin1z'][:]
spin2zs = hdf_fp['spin2z'][:]
for idx in xrange(len(mass1s)):
self.add_point_by_masses(mass1s[idx], mass2s[idx], spin1zs[idx],
spin2zs[idx], vary_fupper=vary_fupper) | python | {
"resource": ""
} |
q31977 | PartitionedTmpltbank.output_all_points | train | def output_all_points(self):
"""Return all points in the bank.
Return all points in the bank as lists of m1, m2, spin1z, spin2z.
Returns
-------
mass1 : list
List of mass1 values.
mass2 : list
List of mass2 values.
spin1z : list
List of spin1z values.
spin2z : list
List of spin2z values.
"""
mass1 = []
mass2 = []
spin1z = []
spin2z = []
for i in self.massbank.keys():
for j in self.massbank[i].keys():
for k in xrange(len(self.massbank[i][j]['mass1s'])):
curr_bank = self.massbank[i][j]
mass1.append(curr_bank['mass1s'][k])
mass2.append(curr_bank['mass2s'][k])
spin1z.append(curr_bank['spin1s'][k])
spin2z.append(curr_bank['spin2s'][k])
return mass1, mass2, spin1z, spin2z | python | {
"resource": ""
} |
q31978 | _get_freq | train | def _get_freq(freqfunc, m1, m2, s1z, s2z):
"""
Wrapper of the LALSimulation function returning the frequency
for a given frequency function and template parameters.
Parameters
----------
freqfunc : lalsimulation FrequencyFunction wrapped object e.g.
lalsimulation.fEOBNRv2RD
m1 : float-ish, i.e. castable to float
First component mass in solar masses
m2 : float-ish
Second component mass in solar masses
s1z : float-ish
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float-ish
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float
Frequency in Hz
"""
# Convert to SI units for lalsimulation
m1kg = float(m1) * lal.MSUN_SI
m2kg = float(m2) * lal.MSUN_SI
return lalsimulation.SimInspiralGetFrequency(
m1kg, m2kg, 0, 0, float(s1z), 0, 0, float(s2z), int(freqfunc)) | python | {
"resource": ""
} |
q31979 | get_freq | train | def get_freq(freqfunc, m1, m2, s1z, s2z):
"""
Returns the LALSimulation function which evaluates the frequency
for the given frequency function and template parameters.
Parameters
----------
freqfunc : string
Name of the frequency function to use, e.g., 'fEOBNRv2RD'
m1 : float or numpy.array
First component mass in solar masses
m2 : float or numpy.array
Second component mass in solar masses
s1z : float or numpy.array
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float or numpy.array
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
lalsim_ffunc = getattr(lalsimulation, freqfunc)
return _vec_get_freq(lalsim_ffunc, m1, m2, s1z, s2z) | python | {
"resource": ""
} |
q31980 | frequency_cutoff_from_name | train | def frequency_cutoff_from_name(name, m1, m2, s1z, s2z):
"""
Returns the result of evaluating the frequency cutoff function
specified by 'name' on a template with given parameters.
Parameters
----------
name : string
Name of the cutoff function
m1 : float or numpy.array
First component mass in solar masses
m2 : float or numpy.array
Second component mass in solar masses
s1z : float or numpy.array
First component dimensionless spin S_1/m_1^2 projected onto L
s2z : float or numpy.array
Second component dimensionless spin S_2/m_2^2 projected onto L
Returns
-------
f : float or numpy.array
Frequency in Hz
"""
params = {"mass1":m1, "mass2":m2, "spin1z":s1z, "spin2z":s2z}
return named_frequency_cutoffs[name](params) | python | {
"resource": ""
} |
q31981 | _get_imr_duration | train | def _get_imr_duration(m1, m2, s1z, s2z, f_low, approximant="SEOBNRv4"):
"""Wrapper of lalsimulation template duration approximate formula"""
m1, m2, s1z, s2z, f_low = float(m1), float(m2), float(s1z), float(s2z),\
float(f_low)
if approximant == "SEOBNRv2":
chi = lalsimulation.SimIMRPhenomBComputeChi(m1, m2, s1z, s2z)
time_length = lalsimulation.SimIMRSEOBNRv2ChirpTimeSingleSpin(
m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, chi, f_low)
elif approximant == "IMRPhenomD":
time_length = lalsimulation.SimIMRPhenomDChirpTime(
m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z, f_low)
elif approximant == "SEOBNRv4":
# NB for no clear reason this function has f_low as first argument
time_length = lalsimulation.SimIMRSEOBNRv4ROMTimeOfFrequency(
f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z)
else:
raise RuntimeError("I can't calculate a duration for %s" % approximant)
# FIXME Add an extra factor of 1.1 for 'safety' since the duration
# functions are approximate
return time_length * 1.1 | python | {
"resource": ""
} |
q31982 | get_inspiral_tf | train | def get_inspiral_tf(tc, mass1, mass2, spin1, spin2, f_low, n_points=100,
pn_2order=7, approximant='TaylorF2'):
"""Compute the time-frequency evolution of an inspiral signal.
Return a tuple of time and frequency vectors tracking the evolution of an
inspiral signal in the time-frequency plane.
"""
# handle param-dependent approximant specification
class Params:
pass
params = Params()
params.mass1 = mass1
params.mass2 = mass2
params.spin1z = spin1
params.spin2z = spin2
try:
approximant = eval(approximant, {'__builtins__': None},
dict(params=params))
except NameError:
pass
if approximant in ['TaylorF2', 'SPAtmplt']:
from pycbc.waveform.spa_tmplt import findchirp_chirptime
# FIXME spins are not taken into account
f_high = f_SchwarzISCO(mass1 + mass2)
track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high),
n_points)
track_t = numpy.array([findchirp_chirptime(float(mass1), float(mass2),
float(f), pn_2order) for f in track_f])
elif approximant in ['SEOBNRv2', 'SEOBNRv2_ROM_DoubleSpin',
'SEOBNRv2_ROM_DoubleSpin_HI']:
f_high = get_final_freq('SEOBNRv2', mass1, mass2, spin1, spin2)
track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(f_high),
n_points)
# use HI function as it has wider freq range validity
track_t = numpy.array([
lalsimulation.SimIMRSEOBNRv2ROMDoubleSpinHITimeOfFrequency(f,
solar_mass_to_kg(mass1), solar_mass_to_kg(mass2),
float(spin1), float(spin2)) for f in track_f])
elif approximant in ['SEOBNRv4', 'SEOBNRv4_ROM']:
f_high = get_final_freq('SEOBNRv4', mass1, mass2, spin1, spin2)
# use frequency below final freq in case of rounding error
track_f = numpy.logspace(numpy.log10(f_low), numpy.log10(0.999*f_high),
n_points)
track_t = numpy.array([
lalsimulation.SimIMRSEOBNRv4ROMTimeOfFrequency(
f, solar_mass_to_kg(mass1), solar_mass_to_kg(mass2),
float(spin1), float(spin2)) for f in track_f])
else:
raise ValueError('Approximant ' + approximant + ' not supported')
return (tc - track_t, track_f) | python | {
"resource": ""
} |
q31983 | kerr_lightring_velocity | train | def kerr_lightring_velocity(chi):
"""Return the velocity at the Kerr light ring"""
# If chi > 0.9996, the algorithm cannot solve the function
if chi >= 0.9996:
return brentq(kerr_lightring, 0, 0.8, args=(0.9996))
else:
return brentq(kerr_lightring, 0, 0.8, args=(chi)) | python | {
"resource": ""
} |
q31984 | hybrid_meco_velocity | train | def hybrid_meco_velocity(m1, m2, chi1, chi2, qm1=None, qm2=None):
"""Return the velocity of the hybrid MECO
Parameters
----------
m1 : float
Mass of the primary object in solar masses.
m2 : float
Mass of the secondary object in solar masses.
chi1: float
Dimensionless spin of the primary object.
chi2: float
Dimensionless spin of the secondary object.
qm1: {None, float}, optional
Quadrupole-monopole term of the primary object (1 for black holes).
If None, will be set to qm1 = 1.
qm2: {None, float}, optional
Quadrupole-monopole term of the secondary object (1 for black holes).
If None, will be set to qm2 = 1.
Returns
-------
v: float
The velocity (dimensionless) of the hybrid MECO
"""
if qm1 is None:
qm1 = 1
if qm2 is None:
qm2 = 1
# Set bounds at 0.1 to skip v=0 and at the lightring velocity
chi = (chi1 * m1 + chi2 * m2) / (m1 + m2)
vmax = kerr_lightring_velocity(chi) - 0.01
return minimize(hybridEnergy, 0.2, args=(m1, m2, chi1, chi2, qm1, qm2),
bounds=[(0.1, vmax)]).x.item() | python | {
"resource": ""
} |
q31985 | hybrid_meco_frequency | train | def hybrid_meco_frequency(m1, m2, chi1, chi2, qm1=None, qm2=None):
"""Return the frequency of the hybrid MECO
Parameters
----------
m1 : float
Mass of the primary object in solar masses.
m2 : float
Mass of the secondary object in solar masses.
chi1: float
Dimensionless spin of the primary object.
chi2: float
Dimensionless spin of the secondary object.
qm1: {None, float}, optional
Quadrupole-monopole term of the primary object (1 for black holes).
If None, will be set to qm1 = 1.
qm2: {None, float}, optional
Quadrupole-monopole term of the secondary object (1 for black holes).
If None, will be set to qm2 = 1.
Returns
-------
f: float
The frequency (in Hz) of the hybrid MECO
"""
if qm1 is None:
qm1 = 1
if qm2 is None:
qm2 = 1
return velocity_to_frequency(hybrid_meco_velocity(m1, m2, chi1, chi2, qm1, qm2), m1 + m2) | python | {
"resource": ""
} |
q31986 | BaseInferenceFile.parse_parameters | train | def parse_parameters(self, parameters, array_class=None):
"""Parses a parameters arg to figure out what fields need to be loaded.
Parameters
----------
parameters : (list of) strings
The parameter(s) to retrieve. A parameter can be the name of any
field in ``samples_group``, a virtual field or method of
``FieldArray`` (as long as the file contains the necessary fields
to derive the virtual field or method), and/or a function of
these.
array_class : array class, optional
The type of array to use to parse the parameters. The class must
have a ``parse_parameters`` method. Default is to use a
``FieldArray``.
Returns
-------
list :
A list of strings giving the fields to load from the file.
"""
# get the type of array class to use
if array_class is None:
array_class = FieldArray
# get the names of fields needed for the given parameters
possible_fields = self[self.samples_group].keys()
return array_class.parse_parameters(parameters, possible_fields) | python | {
"resource": ""
} |
q31987 | BaseInferenceFile._get_optional_args | train | def _get_optional_args(args, opts, err_on_missing=False, **kwargs):
"""Convenience function to retrieve arguments from an argparse
namespace.
Parameters
----------
args : list of str
List of arguments to retreive.
opts : argparse.namespace
Namespace to retreive arguments for.
err_on_missing : bool, optional
If an argument is not found in the namespace, raise an
AttributeError. Otherwise, just pass. Default is False.
\**kwargs :
All other keyword arguments are added to the return dictionary.
Any keyword argument that is the same as an argument in ``args``
will override what was retrieved from ``opts``.
Returns
-------
dict :
Dictionary mapping arguments to values retrieved from ``opts``. If
keyword arguments were provided, these will also be included in the
dictionary.
"""
parsed = {}
for arg in args:
try:
parsed[arg] = getattr(opts, arg)
except AttributeError as e:
if err_on_missing:
raise AttributeError(e)
else:
continue
parsed.update(kwargs)
return parsed | python | {
"resource": ""
} |
q31988 | BaseInferenceFile.samples_from_cli | train | def samples_from_cli(self, opts, parameters=None, **kwargs):
"""Reads samples from the given command-line options.
Parameters
----------
opts : argparse Namespace
The options with the settings to use for loading samples (the sort
of thing returned by ``ArgumentParser().parse_args``).
parameters : (list of) str, optional
A list of the parameters to load. If none provided, will try to
get the parameters to load from ``opts.parameters``.
\**kwargs :
All other keyword arguments are passed to ``read_samples``. These
will override any options with the same name.
Returns
-------
FieldArray :
Array of the loaded samples.
"""
if parameters is None and opts.parameters is None:
parameters = self.variable_args
elif parameters is None:
parameters = opts.parameters
# parse optional arguments
_, extra_actions = self.extra_args_parser()
extra_args = [act.dest for act in extra_actions]
kwargs = self._get_optional_args(extra_args, opts, **kwargs)
return self.read_samples(parameters, **kwargs) | python | {
"resource": ""
} |
q31989 | BaseInferenceFile.write_logevidence | train | def write_logevidence(self, lnz, dlnz):
"""Writes the given log evidence and its error.
Results are saved to file's 'log_evidence' and 'dlog_evidence'
attributes.
Parameters
----------
lnz : float
The log of the evidence.
dlnz : float
The error in the estimate of the log evidence.
"""
self.attrs['log_evidence'] = lnz
self.attrs['dlog_evidence'] = dlnz | python | {
"resource": ""
} |
q31990 | BaseInferenceFile.write_random_state | train | def write_random_state(self, group=None, state=None):
"""Writes the state of the random number generator from the file.
The random state is written to ``sampler_group``/random_state.
Parameters
----------
group : str
Name of group to write random state to.
state : tuple, optional
Specify the random state to write. If None, will use
``numpy.random.get_state()``.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
if state is None:
state = numpy.random.get_state()
s, arr, pos, has_gauss, cached_gauss = state
if dataset_name in self:
self[dataset_name][:] = arr
else:
self.create_dataset(dataset_name, arr.shape, fletcher32=True,
dtype=arr.dtype)
self[dataset_name][:] = arr
self[dataset_name].attrs["s"] = s
self[dataset_name].attrs["pos"] = pos
self[dataset_name].attrs["has_gauss"] = has_gauss
self[dataset_name].attrs["cached_gauss"] = cached_gauss | python | {
"resource": ""
} |
q31991 | BaseInferenceFile.read_random_state | train | def read_random_state(self, group=None):
"""Reads the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state from.
Returns
-------
tuple
A tuple with 5 elements that can be passed to numpy.set_state.
"""
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
arr = self[dataset_name][:]
s = self[dataset_name].attrs["s"]
pos = self[dataset_name].attrs["pos"]
has_gauss = self[dataset_name].attrs["has_gauss"]
cached_gauss = self[dataset_name].attrs["cached_gauss"]
return s, arr, pos, has_gauss, cached_gauss | python | {
"resource": ""
} |
q31992 | BaseInferenceFile.write_strain | train | def write_strain(self, strain_dict, group=None):
"""Writes strain for each IFO to file.
Parameters
-----------
strain : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = self.data_group + "/{ifo}/strain"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo, strain in strain_dict.items():
self[group.format(ifo=ifo)] = strain
self[group.format(ifo=ifo)].attrs['delta_t'] = strain.delta_t
self[group.format(ifo=ifo)].attrs['start_time'] = \
float(strain.start_time) | python | {
"resource": ""
} |
q31993 | BaseInferenceFile.write_stilde | train | def write_stilde(self, stilde_dict, group=None):
"""Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = self.data_group + "/{ifo}/stilde"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo, stilde in stilde_dict.items():
self[group.format(ifo=ifo)] = stilde
self[group.format(ifo=ifo)].attrs['delta_f'] = stilde.delta_f
self[group.format(ifo=ifo)].attrs['epoch'] = float(stilde.epoch) | python | {
"resource": ""
} |
q31994 | BaseInferenceFile.write_psd | train | def write_psd(self, psds, group=None):
"""Writes PSD for each IFO to file.
Parameters
-----------
psds : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the psd to. Default is ``data_group``.
"""
subgroup = self.data_group + "/{ifo}/psds/0"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo in psds:
self[group.format(ifo=ifo)] = psds[ifo]
self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f | python | {
"resource": ""
} |
q31995 | BaseInferenceFile.write_injections | train | def write_injections(self, injection_file):
"""Writes injection parameters from the given injection file.
Everything in the injection file is copied to ``injections_group``.
Parameters
----------
injection_file : str
Path to HDF injection file.
"""
try:
with h5py.File(injection_file, "r") as fp:
super(BaseInferenceFile, self).copy(fp, self.injections_group)
except IOError:
logging.warn("Could not read %s as an HDF file", injection_file) | python | {
"resource": ""
} |
q31996 | BaseInferenceFile.read_injections | train | def read_injections(self):
"""Gets injection parameters.
Returns
-------
FieldArray
Array of the injection parameters.
"""
injset = InjectionSet(self.filename, hdf_group=self.injections_group)
injections = injset.table.view(FieldArray)
# close the new open filehandler to self
injset._injhandler.filehandler.close()
return injections | python | {
"resource": ""
} |
q31997 | BaseInferenceFile.write_command_line | train | def write_command_line(self):
"""Writes command line to attributes.
The command line is written to the file's ``attrs['cmd']``. If this
attribute already exists in the file (this can happen when resuming
from a checkpoint), ``attrs['cmd']`` will be a list storing the current
command line and all previous command lines.
"""
cmd = [" ".join(sys.argv)]
try:
previous = self.attrs["cmd"]
if isinstance(previous, str):
# convert to list
previous = [previous]
elif isinstance(previous, numpy.ndarray):
previous = previous.tolist()
except KeyError:
previous = []
self.attrs["cmd"] = cmd + previous | python | {
"resource": ""
} |
q31998 | BaseInferenceFile.get_slice | train | def get_slice(self, thin_start=None, thin_interval=None, thin_end=None):
"""Formats a slice using the given arguments that can be used to
retrieve a thinned array from an InferenceFile.
Parameters
----------
thin_start : int, optional
The starting index to use. If None, will use the ``thin_start``
attribute.
thin_interval : int, optional
The interval to use. If None, will use the ``thin_interval``
attribute.
thin_end : int, optional
The end index to use. If None, will use the ``thin_end`` attribute.
Returns
-------
slice :
The slice needed.
"""
if thin_start is None:
thin_start = int(self.thin_start)
else:
thin_start = int(thin_start)
if thin_interval is None:
thin_interval = self.thin_interval
else:
thin_interval = int(numpy.ceil(thin_interval))
if thin_end is None:
thin_end = self.thin_end
else:
thin_end = int(thin_end)
return slice(thin_start, thin_end, thin_interval) | python | {
"resource": ""
} |
q31999 | BaseInferenceFile.copy_metadata | train | def copy_metadata(self, other):
"""Copies all metadata from this file to the other file.
Metadata is defined as everything in the top-level ``.attrs``.
Parameters
----------
other : InferenceFile
An open inference file to write the data to.
"""
logging.info("Copying metadata")
# copy attributes
for key in self.attrs.keys():
other.attrs[key] = self.attrs[key] | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.