code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
self.logger.info('Generating TS cube')
schema = ConfigSchema(self.defaults['tscube'])
schema.add_option('make_plots', True)
schema.add_option('write_fits', True)
schema.add_option('write_npy', True)
config = schema.create_config(self.config['tscube'], **kwargs)
maps = self._make_ts_cube(prefix, **config)
if config['make_plots']:
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.make_tsmap_plots(maps, self.roi, suffix='tscube')
self.logger.info("Finished TS cube")
return maps | def tscube(self, prefix='', **kwargs) | Generate a spatial TS map for a source component with
properties defined by the `model` argument. This method uses
the `gttscube` ST application for source fitting and will
simultaneously fit the test source normalization as well as
the normalizations of any background components that are
currently free. The output of this method is a dictionary
containing `~fermipy.skymap.Map` objects with the TS and
amplitude of the best-fit test source. By default this method
will also save maps to FITS files and render them as image
files.
Parameters
----------
prefix : str
Optional string that will be prepended to all output files
(FITS and rendered images).
model : dict
Dictionary defining the properties of the test source.
do_sed : bool
Compute the energy bin-by-bin fits.
nnorm : int
Number of points in the likelihood v. normalization scan.
norm_sigma : float
Number of sigma to use for the scan range.
tol : float
Critetia for fit convergence (estimated vertical distance
to min < tol ).
tol_type : int
Absoulte (0) or relative (1) criteria for convergence.
max_iter : int
Maximum number of iterations for the Newton's method fitter
remake_test_source : bool
If true, recomputes the test source image (otherwise just shifts it)
st_scan_level : int
make_plots : bool
Write image files.
write_fits : bool
Write a FITS file with the results of the analysis.
Returns
-------
maps : dict
A dictionary containing the `~fermipy.skymap.Map` objects
for TS and source amplitude. | 4.433907 | 4.197336 | 1.056362 |
ewidth = utils.edge_to_width(ebins)
ectr = np.exp(utils.edge_to_center(np.log(ebins)))
r68 = psf.containment_angle(ectr, fraction=0.68)
if spatial_model != 'PointSource':
r68[r68 < spatial_size] = spatial_size
# * np.ones((len(ectr), 31))
theta_edges = np.linspace(0.0, 3.0, 31)[np.newaxis, :]
theta_edges = theta_edges * r68[:, np.newaxis]
theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])
domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)
if spatial_model == 'PointSource':
sig_pdf = domega * psf.interp(ectr[:, np.newaxis], theta)
elif spatial_model == 'RadialGaussian':
sig_pdf = domega * utils.convolve2d_gauss(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
theta, spatial_size / 1.5095921854516636, nstep=2000)
elif spatial_model == 'RadialDisk':
sig_pdf = domega * utils.convolve2d_disk(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
theta, spatial_size / 0.8246211251235321)
else:
raise ValueError('Invalid spatial model: {}'.format(spatial_model))
sig_pdf *= (np.pi / 180.)**2
sig_flux = fn.flux(ebins[:-1], ebins[1:])
# Background and signal counts
bkgc = bkg[..., np.newaxis] * domega * exp[..., np.newaxis] * \
ewidth[..., np.newaxis] * (np.pi / 180.)**2
sigc = sig_pdf * sig_flux[..., np.newaxis] * exp[..., np.newaxis]
return sigc, bkgc | def compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0, spatial_model='PointSource',
spatial_size=1E-3) | Calculate the observed signal and background counts given models
for the exposure, background intensity, PSF, and source flux.
Parameters
----------
ebins : `~numpy.ndarray`
Array of energy bin edges.
exp : `~numpy.ndarray`
Model for exposure.
psf : `~fermipy.irfs.PSFModel`
Model for average PSF.
bkg : `~numpy.ndarray`
Array of background intensities.
fn : `~fermipy.spectrum.SpectralFunction`
egy_dim : int
Index of energy dimension in ``bkg`` and ``exp`` arrays. | 3.479447 | 3.461125 | 1.005294 |
if sum_axes is None:
sum_axes = np.arange(sig.ndim)
sig = np.expand_dims(sig, -1)
bkg = np.expand_dims(bkg, -1)
sig_sum = np.apply_over_axes(np.sum, sig, sum_axes)
bkg_sum = np.apply_over_axes(np.sum, bkg, sum_axes)
bkg_fit_sum = None
if bkg_fit is not None:
bkg_fit = np.expand_dims(bkg_fit, -1)
bkg_fit_sum = np.apply_over_axes(np.sum, bkg_fit, sum_axes)
sig_rebin = sig
bkg_rebin = bkg
bkg_fit_rebin = bkg_fit
if rebin_axes:
sig_rebin = sig.copy()
bkg_rebin = bkg.copy()
if bkg_fit is not None:
bkg_fit_rebin = bkg_fit.copy()
for dim, rebin in zip(sum_axes, rebin_axes):
sig_rebin = sum_bins(sig_rebin, dim, rebin)
bkg_rebin = sum_bins(bkg_rebin, dim, rebin)
if bkg_fit is not None:
bkg_fit_rebin = sum_bins(bkg_fit_rebin, dim, rebin)
# Find approx solution using coarse binning and summed arrays
sig_scale = 10**np.linspace(0.0, 10.0, 51) * (min_counts / sig_sum)
vals_approx = _solve_norm(sig_rebin, bkg_rebin, ts_thresh, min_counts,
sig_scale, sum_axes, bkg_fit_rebin)
# Refine solution using an interval (0.1,10) around approx
# solution
sig_scale = (10**np.linspace(0.0, 1.0, 21) *
np.fmax(0.333 * vals_approx[..., None],
min_counts / sig_sum))
vals = _solve_norm(sig, bkg, ts_thresh, min_counts, sig_scale,
sum_axes, bkg_fit)
#sig_scale = 10**np.linspace(0.0, 10.0, 101)*(min_counts / sig_sum)
# vals = _solve_norm(sig, bkg, ts_thresh, min_counts, sig_scale2,
# sum_axes, bkg_fit)
return vals | def compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=None, bkg_fit=None,
rebin_axes=None) | Solve for the normalization of the signal distribution at which the
detection test statistic (twice delta-loglikelihood ratio) is >=
``ts_thresh`` AND the number of signal counts >= ``min_counts``.
This function uses the Asimov method to calculate the median
expected TS when the model for the background is fixed (no
uncertainty on the background amplitude).
Parameters
----------
sig : `~numpy.ndarray`
Array of signal amplitudes in counts.
bkg : `~numpy.ndarray`
Array of background amplitudes in counts.
ts_thresh : float
Test statistic threshold.
min_counts : float
Counts threshold.
sum_axes : list
Axes over which the source test statistic should be summed.
By default the summation will be performed over all
dimensions.
bkg_fit : `~numpy.ndarray`
Array of background amplitudes in counts for the fitting
model. If None then the fit model will be equal to the data
model. | 2.079542 | 2.081955 | 0.998841 |
irf = create_irf(event_class, event_type)
theta = np.degrees(np.arccos(cth))
m = np.zeros((len(dtheta), len(egy), len(cth)))
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m[:, i, j] = irf.psf().value(dtheta, x, y, 0.0)
return m | def create_psf(event_class, event_type, dtheta, egy, cth) | Create an array of PSF response values versus energy and
inclination angle.
Parameters
----------
egy : `~numpy.ndarray`
Energy in MeV.
cth : `~numpy.ndarray`
Cosine of the incidence angle. | 2.99386 | 3.334359 | 0.897882 |
irf = create_irf(event_class, event_type)
theta = np.degrees(np.arccos(cth))
v = np.zeros((len(erec), len(egy), len(cth)))
m = (erec[:,None] / egy[None,:] < 3.0) & (erec[:,None] / egy[None,:] > 0.33333)
# m |= ((erec[:,None] / egy[None,:] < 3.0) &
# (erec[:,None] / egy[None,:] > 0.5) & (egy[None,:] < 10**2.5))
m = np.broadcast_to(m[:,:,None], v.shape)
try:
x = np.ones(v.shape)*erec[:,None,None]
y = np.ones(v.shape)*egy[None,:,None]
z = np.ones(v.shape)*theta[None,None,:]
v[m] = irf.edisp().value(np.ravel(x[m]), np.ravel(y[m]), np.ravel(z[m]), 0.0)
except:
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m = (erec / x < 3.0) & (erec / x > 0.333)
v[m, i, j] = irf.edisp().value(erec[m], x, y, 0.0)
return v | def create_edisp(event_class, event_type, erec, egy, cth) | Create an array of energy response values versus energy and
inclination angle.
Parameters
----------
egy : `~numpy.ndarray`
Energy in MeV.
cth : `~numpy.ndarray`
Cosine of the incidence angle. | 2.670632 | 2.778268 | 0.961258 |
irf = create_irf(event_class, event_type)
irf.aeff().setPhiDependence(False)
theta = np.degrees(np.arccos(cth))
# Exposure Matrix
# Dimensions are Etrue and incidence angle
m = np.zeros((len(egy), len(cth)))
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m[i, j] = irf.aeff().value(x, y, 0.0)
return m | def create_aeff(event_class, event_type, egy, cth) | Create an array of effective areas versus energy and incidence
angle. Binning in energy and incidence angle is controlled with
the egy and cth input parameters.
Parameters
----------
event_class : str
Event class string (e.g. P8R2_SOURCE_V6).
event_type : list
egy : array_like
Evaluation points in energy (MeV).
cth : array_like
Evaluation points in cosine of the incidence angle. | 5.178773 | 5.381136 | 0.962394 |
if npts is None:
npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.025))
exp = np.zeros((len(egy), len(cth_bins) - 1))
cth_bins = utils.split_bin_edges(cth_bins, npts)
cth = edge_to_center(cth_bins)
ltw = ltc.get_skydir_lthist(skydir, cth_bins).reshape(-1, npts)
for et in event_types:
aeff = create_aeff(event_class, et, egy, cth)
aeff = aeff.reshape(exp.shape + (npts,))
exp += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)
return exp | def calc_exp(skydir, ltc, event_class, event_types,
egy, cth_bins, npts=None) | Calculate the exposure on a 2D grid of energy and incidence angle.
Parameters
----------
npts : int
Number of points by which to sample the response in each
incidence angle bin. If None then npts will be automatically
set such that incidence angle is sampled on intervals of <
0.05 in Cos(Theta).
Returns
-------
exp : `~numpy.ndarray`
2D Array of exposures vs. energy and incidence angle. | 3.24624 | 3.418835 | 0.949517 |
if npts is None:
npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05))
wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1))
exps = np.zeros((len(egy), len(cth_bins) - 1))
cth_bins = utils.split_bin_edges(cth_bins, npts)
cth = edge_to_center(cth_bins)
ltw = ltc.get_skydir_lthist(skydir, cth_bins)
ltw = ltw.reshape(-1, npts)
for et in event_types:
rsp = rsp_fn(event_class, et, x, egy, cth)
aeff = create_aeff(event_class, et, egy, cth)
rsp = rsp.reshape(wrsp.shape + (npts,))
aeff = aeff.reshape(exps.shape + (npts,))
wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] *
ltw[np.newaxis, np.newaxis, :, :], axis=-1)
exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)
exps_inv = np.zeros_like(exps)
exps_inv[exps > 0] = 1./exps[exps>0]
wrsp *= exps_inv[np.newaxis, :, :]
return wrsp | def create_avg_rsp(rsp_fn, skydir, ltc, event_class, event_types, x,
egy, cth_bins, npts=None) | Calculate the weighted response function. | 2.629653 | 2.644124 | 0.994527 |
return create_avg_rsp(create_psf, skydir, ltc,
event_class, event_types,
dtheta, egy, cth_bins, npts) | def create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
egy, cth_bins, npts=None) | Generate model for exposure-weighted PSF averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
Energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle. | 4.343996 | 6.506533 | 0.667636 |
return create_avg_rsp(create_edisp, skydir, ltc,
event_class, event_types,
erec, egy, cth_bins, npts) | def create_avg_edisp(skydir, ltc, event_class, event_types, erec,
egy, cth_bins, npts=None) | Generate model for exposure-weighted DRM averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
True energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle. | 4.188628 | 7.403777 | 0.565742 |
#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
etrue = 10**utils.edge_to_center(np.log10(etrue_bins))
psf = create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
etrue, cth_bins)
drm = calc_drm(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, nbin=nbin)
cnts = calc_counts(skydir, ltc, event_class, event_types,
etrue_bins, cth_bins, fn)
wts = drm * cnts[None, :, :]
wts_norm = np.sum(wts, axis=1)
wts_norm[wts_norm == 0] = 1.0
wts = wts / wts_norm[:, None, :]
wpsf = np.sum(wts[None, :, :, :] * psf[:, None, :, :], axis=2)
wts = np.sum(wts[None, :, :, :], axis=2)
if npts > 1:
shape = (wpsf.shape[0], int(wpsf.shape[1] / npts), npts, wpsf.shape[2])
wpsf = np.sum((wpsf * wts).reshape(shape), axis=2)
shape = (wts.shape[0], int(wts.shape[1] / npts), npts, wts.shape[2])
wpsf = wpsf / np.sum(wts.reshape(shape), axis=2)
return wpsf | def create_wtd_psf(skydir, ltc, event_class, event_types, dtheta,
egy_bins, cth_bins, fn, nbin=64, npts=1) | Create an exposure- and dispersion-weighted PSF model for a source
with spectral parameterization ``fn``. The calculation performed
by this method accounts for the influence of energy dispersion on
the PSF.
Parameters
----------
dtheta : `~numpy.ndarray`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
nbin : int
Number of bins per decade in true energy.
npts : int
Number of points by which to oversample each energy bin. | 2.613457 | 2.562814 | 1.019761 |
npts = int(np.ceil(128. / bins_per_dec(egy_bins)))
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
egy = 10**utils.edge_to_center(np.log10(egy_bins))
egy_width = utils.edge_to_width(egy_bins)
etrue = 10**utils.edge_to_center(np.log10(etrue_bins))
edisp = create_avg_edisp(skydir, ltc, event_class, event_types,
egy, etrue, cth_bins)
edisp = edisp * egy_width[:, None, None]
edisp = sum_bins(edisp, 0, npts)
return edisp | def calc_drm(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, nbin=64) | Calculate the detector response matrix. | 3.795197 | 3.882791 | 0.97744 |
#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
exp = calc_exp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins)
dnde = fn.dnde(egy_bins)
cnts = loglog_quad(egy_bins, exp * dnde[:, None], 0)
cnts = sum_bins(cnts, 0, npts)
return cnts | def calc_counts(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, npts=1) | Calculate the expected counts vs. true energy and incidence angle
for a source with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinate.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
npts : int
Number of points by which to oversample each energy bin. | 5.06945 | 5.087697 | 0.996414 |
#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
# Split energy bins
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
drm = calc_drm(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, nbin=nbin)
cnts_etrue = calc_counts(skydir, ltc, event_class, event_types,
etrue_bins, cth_bins, fn)
cnts = np.sum(cnts_etrue[None, :, :] * drm[:, :, :], axis=1)
cnts = sum_bins(cnts, 0, npts)
return cnts | def calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=16, npts=1) | Calculate the expected counts vs. observed energy and true
incidence angle for a source with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinate.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
nbin : int
Number of points per decade with which to sample true energy.
npts : int
Number of points by which to oversample each reconstructed energy bin. | 3.885319 | 3.972107 | 0.978151 |
cnts = calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=nbin)
flux = fn.flux(egy_bins[:-1], egy_bins[1:])
return cnts / flux[:, None] | def calc_wtd_exp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=16) | Calculate the effective exposure.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
nbin : int
Number of points per decade with which to sample true energy. | 3.648751 | 4.670244 | 0.781276 |
evals = np.sqrt(ebins[1:] * ebins[:-1])
exp = np.zeros((len(evals), ltc.hpx.npix))
for et in event_types:
aeff = create_aeff(event_class, et, evals, ltc.costh_center)
exp += np.sum(aeff.T[:, :, np.newaxis] *
ltc.data[:, np.newaxis, :], axis=0)
hpx = HPX(ltc.hpx.nside, ltc.hpx.nest,
ltc.hpx.coordsys, ebins=ebins)
return cls(exp, hpx) | def create(cls, ltc, event_class, event_types, ebins) | Create an exposure map from a livetime cube. This method will
generate an exposure map with the same geometry as the
livetime cube (nside, etc.).
Parameters
----------
ltc : `~fermipy.irfs.LTCube`
Livetime cube object.
event_class : str
Event class string.
event_types : list
List of event type strings, e.g. ['FRONT','BACK'].
ebins : `~numpy.ndarray`
Energy bin edges in MeV. | 4.021874 | 3.724232 | 1.07992 |
if scale_fn is None and self.scale_fn is not None:
scale_fn = self.scale_fn
if scale_fn is None:
scale_factor = 1.0
else:
dtheta = dtheta / scale_fn(self.energies[ebin])
scale_factor = 1. / scale_fn(self.energies[ebin])**2
vals = 10**np.interp(dtheta, self.dtheta, np.log10(self.val[:, ebin]))
return vals * scale_factor | def eval(self, ebin, dtheta, scale_fn=None) | Evaluate the PSF at the given energy bin index.
Parameters
----------
ebin : int
Index of energy bin.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV. | 2.681701 | 2.537277 | 1.056921 |
if scale_fn is None and self.scale_fn:
scale_fn = self.scale_fn
log_energies = np.log10(energies)
shape = (energies * dtheta).shape
scale_factor = np.ones(shape)
if scale_fn is not None:
dtheta = dtheta / scale_fn(energies)
scale_factor = 1. / scale_fn(energies)**2
vals = np.exp(self._psf_fn((dtheta, log_energies)))
return vals * scale_factor | def interp(self, energies, dtheta, scale_fn=None) | Evaluate the PSF model at an array of energies and angular
separations.
Parameters
----------
energies : array_like
Array of energies in MeV.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV. | 3.203911 | 3.206898 | 0.999068 |
npts = 4
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
egy = np.exp(utils.edge_to_center(np.log(egy_bins)))
log_energies = np.log10(egy)
vals = self.interp(egy[None, :], dtheta[:, None],
scale_fn=scale_fn)
wts = np.exp(self._wts_fn((log_energies,)))
wts = wts.reshape((1,) + wts.shape)
vals = np.sum(
(vals * wts).reshape((vals.shape[0], int(vals.shape[1] / npts), npts)), axis=2)
vals /= np.sum(wts.reshape(wts.shape[0],
int(wts.shape[1] / npts), npts), axis=2)
return vals | def interp_bin(self, egy_bins, dtheta, scale_fn=None) | Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``.
Parameters
----------
egy_bins : array_like
Energy bin edges in MeV.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV. | 3.158872 | 3.385141 | 0.933158 |
if energies is None:
energies = self.energies
vals = self.interp(energies[np.newaxis, :], self.dtheta[:, np.newaxis],
scale_fn=scale_fn)
dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))
return self._calc_containment(dtheta, vals, fraction) | def containment_angle(self, energies=None, fraction=0.68, scale_fn=None) | Evaluate the PSF containment angle at a sequence of energies. | 3.969383 | 3.780724 | 1.0499 |
vals = self.interp_bin(egy_bins, self.dtheta, scale_fn=scale_fn)
dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))
return self._calc_containment(dtheta, vals, fraction) | def containment_angle_bin(self, egy_bins, fraction=0.68, scale_fn=None) | Evaluate the PSF containment angle averaged over energy bins. | 4.888392 | 4.686756 | 1.043022 |
if isinstance(event_types, int):
event_types = bitmask_to_bits(event_types)
if fn is None:
fn = spectrum.PowerLaw([1E-13, -2.0])
dtheta = np.logspace(-4, 1.75, ndtheta)
dtheta = np.insert(dtheta, 0, [0])
log_energies = np.log10(energies)
egy_bins = 10**utils.center_to_edge(log_energies)
if cth_bins is None:
cth_bins = np.array([0.2, 1.0])
if use_edisp:
psf = create_wtd_psf(skydir, ltc, event_class, event_types,
dtheta, egy_bins, cth_bins, fn, nbin=nbin)
wts = calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=nbin)
else:
psf = create_avg_psf(skydir, ltc, event_class, event_types,
dtheta, energies, cth_bins)
wts = calc_counts(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn)
exp = calc_exp(skydir, ltc, event_class, event_types,
energies, cth_bins)
return cls(dtheta, energies, cth_bins, np.squeeze(exp), np.squeeze(psf),
np.squeeze(wts)) | def create(cls, skydir, ltc, event_class, event_types, energies, cth_bins=None,
ndtheta=500, use_edisp=False, fn=None, nbin=64) | Create a PSFModel object. This class can be used to evaluate the
exposure-weighted PSF for a source with a given observing
profile and energy distribution.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
energies : `~numpy.ndarray`
Grid of energies at which the PSF will be pre-computed.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the inclination angle.
use_edisp : bool
Generate the PSF model accounting for the influence of
energy dispersion.
fn : `~fermipy.spectrum.SpectralFunction`
Model for the spectral energy distribution of the source. | 2.711222 | 2.623994 | 1.033243 |
if dry_run:
sys.stdout.write("rm %s\n" % filepath)
else:
try:
os.remove(filepath)
except OSError:
pass | def remove_file(filepath, dry_run=False) | Remove the file at filepath
Catches exception if the file does not exist.
If dry_run is True, print name of file to be removed, but do not remove it. | 2.450669 | 2.961616 | 0.827477 |
remove_file(logfile, dry_run)
for outfile in outfiles.values():
remove_file(outfile, dry_run) | def clean_job(logfile, outfiles, dry_run=False) | Removes log file and files created by failed jobs.
If dry_run is True, print name of files to be removed, but do not remove them. | 2.996577 | 3.791335 | 0.790375 |
if not os.path.exists(logfile):
return JobStatus.ready
if exited in open(logfile).read():
return JobStatus.failed
elif successful in open(logfile).read():
return JobStatus.done
return JobStatus.running | def check_log(logfile, exited='Exited with exit code',
successful='Successfully completed') | Check a log file to determine status of LSF job
Often logfile doesn't exist because the job hasn't begun
to run. It is unclear what you want to do in that case...
Parameters
----------
logfile : str
String with path to logfile
exited : str
Value to check for in existing logfile for exit with failure
successful : str
Value to check for in existing logfile for success
Returns str, one of 'Pending', 'Running', 'Done', 'Failed' | 2.907384 | 3.196163 | 0.909648 |
return check_log(job_details.logfile, cls.string_exited, cls.string_successful) | def check_job(cls, job_details) | Check the status of a specfic job | 17.748749 | 18.218981 | 0.97419 |
raise NotImplementedError("SysInterface.dispatch_job_hook") | def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout) | Hook to dispatch a single job | 23.162207 | 24.2691 | 0.954391 |
try:
job_details = link.jobs[key]
except KeyError:
print(key, link.jobs)
job_config = job_details.job_config
link.update_args(job_config)
logfile = job_config['logfile']
try:
self.dispatch_job_hook(link, key, job_config, logfile, stream)
job_details.status = JobStatus.running
except IOError:
job_details.status = JobStatus.failed
if job_archive is not None:
job_archive.register_job(job_details)
return job_details | def dispatch_job(self, link, key, job_archive, stream=sys.stdout) | Function to dispatch a single job
Parameters
----------
link : `Link`
Link object that sendes the job
key : str
Key used to identify this particular job
job_archive : `JobArchive`
Archive used to keep track of jobs
Returns `JobDetails` object | 3.541371 | 3.513665 | 1.007885 |
failed = False
if job_dict is None:
job_dict = link.jobs
for job_key, job_details in sorted(job_dict.items()):
job_config = job_details.job_config
# clean failed jobs
if job_details.status == JobStatus.failed:
clean_job(job_details.logfile,
job_details.outfiles, self._dry_run)
# clean_job(job_details.logfile, {}, self._dry_run)
job_config['logfile'] = job_details.logfile
new_job_details = self.dispatch_job(
link, job_key, job_archive, stream)
if new_job_details.status == JobStatus.failed:
failed = True
clean_job(new_job_details.logfile,
new_job_details.outfiles, self._dry_run)
link.jobs[job_key] = new_job_details
if failed:
return JobStatus.failed
return JobStatus.done | def submit_jobs(self, link, job_dict=None, job_archive=None, stream=sys.stdout) | Run the `Link` with all of the items job_dict as input.
If job_dict is None, the job_dict will be take from link.jobs
Returns a `JobStatus` enum | 2.947506 | 2.866267 | 1.028343 |
failed = False
if job_dict is None:
job_dict = link.jobs
for job_details in job_dict.values():
# clean failed jobs
if job_details.status == JobStatus.failed or clean_all:
# clean_job(job_details.logfile, job_details.outfiles, self._dry_run)
clean_job(job_details.logfile, {}, self._dry_run)
job_details.status = JobStatus.ready
if failed:
return JobStatus.failed
return JobStatus.done | def clean_jobs(self, link, job_dict=None, clean_all=False) | Clean up all the jobs associated with this link.
Returns a `JobStatus` enum | 3.669559 | 3.655482 | 1.003851 |
if not hasattr(get_function_spec, 'fndict'):
modelfile = os.path.join('$FERMIPY_ROOT',
'data', 'models.yaml')
modelfile = os.path.expandvars(modelfile)
get_function_spec.fndict = yaml.load(open(modelfile))
if not name in get_function_spec.fndict.keys():
raise Exception('Invalid Function Name: %s' % name)
return get_function_spec.fndict[name] | def get_function_spec(name) | Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary. | 3.190712 | 3.390457 | 0.941086 |
if spatial_model in ['SkyDirFunction', 'PointSource',
'Gaussian']:
return 'SkyDirFunction'
elif spatial_model in ['SpatialMap']:
return 'SpatialMap'
elif spatial_model in ['RadialGaussian', 'RadialDisk']:
try:
import pyLikelihood
if hasattr(pyLikelihood, 'RadialGaussian'):
return spatial_model
else:
return 'SpatialMap'
except Exception:
return spatial_model
else:
return spatial_model | def get_spatial_type(spatial_model) | Translate a spatial model string to a spatial type. | 3.725383 | 3.767278 | 0.988879 |
o = get_function_defaults(name)
pars_dict = pars_dict.copy()
for k in o.keys():
if not k in pars_dict:
continue
v = pars_dict[k]
if not isinstance(v, dict):
v = {'name': k, 'value': v}
o[k].update(v)
kw = dict(update_bounds=update_bounds,
rescale=rescale)
if 'min' in v or 'max' in v:
kw['update_bounds'] = False
if 'scale' in v:
kw['rescale'] = False
o[k] = make_parameter_dict(o[k], **kw)
return o | def create_pars_from_dict(name, pars_dict, rescale=True, update_bounds=False) | Create a dictionary for the parameters of a function.
Parameters
----------
name : str
Name of the function.
pars_dict : dict
Existing parameter dict that will be merged with the
default dictionary created by this method.
rescale : bool
Rescale parameter values. | 2.65844 | 2.89035 | 0.919764 |
o = copy.deepcopy(pdict)
o.setdefault('scale', 1.0)
if rescale:
value, scale = utils.scale_parameter(o['value'] * o['scale'])
o['value'] = np.abs(value) * np.sign(o['value'])
o['scale'] = np.abs(scale) * np.sign(o['scale'])
if 'error' in o:
o['error'] /= np.abs(scale)
if update_bounds:
o['min'] = o['value'] * 1E-3
o['max'] = o['value'] * 1E3
if fixed_par:
o['min'] = o['value']
o['max'] = o['value']
if float(o['min']) > float(o['value']):
o['min'] = o['value']
if float(o['max']) < float(o['value']):
o['max'] = o['value']
return o | def make_parameter_dict(pdict, fixed_par=False, rescale=True,
update_bounds=False) | Update a parameter dictionary. This function will automatically
set the parameter scale and bounds if they are not defined.
Bounds are also adjusted to ensure that they encompass the
parameter value. | 2.210523 | 2.225763 | 0.993153 |
o = {}
for pname, pdict in pars_dict.items():
o[pname] = {}
for k, v in pdict.items():
if k == 'free':
o[pname][k] = bool(int(v))
elif k == 'name':
o[pname][k] = v
else:
o[pname][k] = float(v)
return o | def cast_pars_dict(pars_dict) | Cast the bool and float elements of a parameters dict to
the appropriate python types. | 2.39161 | 2.376287 | 1.006449 |
hlist = []
nskip = 3
for fname in flist:
fin = fits.open(fname)
if len(hlist) == 0:
if fin[1].name == 'SKYMAP':
nskip = 4
start = 0
else:
start = nskip
for h in fin[start:]:
hlist.append(h)
hdulistout = fits.HDUList(hlist)
return hdulistout | def do_gather(flist) | Gather all the HDUs from a list of files | 3.535014 | 3.278249 | 1.078324 |
usage = "usage: %(prog)s [options] "
description = "Gather source maps from Fermi-LAT files."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('-o', '--output', default=None, type=str,
help='Output file.')
parser.add_argument('--clobber', default=False, action='store_true',
help='Overwrite output file.')
parser.add_argument('--gzip', action='store_true',
help='Compress output file')
parser.add_argument('--rm', action='store_true',
help='Remove input files.')
parser.add_argument('files', nargs='+', default=None,
help='List of input files.')
args = parser.parse_args()
hdulistout = do_gather(args.files)
if args.output:
hdulistout.writeto(args.output, clobber=args.clobber)
if args.gzip:
os.system('gzip -9 %s'%args.output)
if args.rm:
for farg in args.files:
flist = glob.glob(farg)
for ffound in flist:
os.path.unlink(ffound) | def main() | Main function for command line usage | 2.791026 | 2.765627 | 1.009184 |
parser = argparse.ArgumentParser(usage="job_archive.py [options]",
description="Browse a job archive")
parser.add_argument('--jobs', action='store', dest='job_archive_table',
type=str, default='job_archive_temp2.fits', help="Job archive file")
parser.add_argument('--files', action='store', dest='file_archive_table',
type=str, default='file_archive_temp2.fits', help="File archive file")
parser.add_argument('--base', action='store', dest='base_path',
type=str, default=os.path.abspath('.'), help="File archive base path")
args = parser.parse_args(sys.argv[1:])
job_ar = JobArchive.build_archive(**args.__dict__)
job_ar.table.pprint() | def main_browse() | Entry point for command line use for browsing a JobArchive | 3.090541 | 2.860411 | 1.080453 |
return self._counters[JobStatus.no_job] +\
self._counters[JobStatus.unknown] +\
self._counters[JobStatus.not_ready] +\
self._counters[JobStatus.ready] | def n_waiting(self) | Return the number of jobs in various waiting states | 5.009769 | 4.034485 | 1.241737 |
return self._counters[JobStatus.failed] + self._counters[JobStatus.partial_failed] | def n_failed(self) | Return the number of failed jobs | 10.399579 | 8.199286 | 1.268352 |
if self.n_total == 0:
return JobStatus.no_job
elif self.n_done == self.n_total:
return JobStatus.done
elif self.n_failed > 0:
# If more that a quater of the jobs fail, fail the whole thing
if self.n_failed > self.n_total / 4.:
return JobStatus.failed
return JobStatus.partial_failed
elif self.n_running > 0:
return JobStatus.running
elif self.n_pending > 0:
return JobStatus.pending
return JobStatus.ready | def get_status(self) | Return an overall status based
on the number of jobs in various states. | 2.971392 | 2.782555 | 1.067865 |
col_dbkey = Column(name='dbkey', dtype=int)
col_jobname = Column(name='jobname', dtype='S64')
col_jobkey = Column(name='jobkey', dtype='S64')
col_appname = Column(name='appname', dtype='S64')
col_logfile = Column(name='logfile', dtype='S256')
col_job_config = Column(name='job_config', dtype='S1024')
col_timestamp = Column(name='timestamp', dtype=int)
col_infile_refs = Column(name='infile_refs', dtype=int, shape=(2))
col_outfile_refs = Column(name='outfile_refs', dtype=int, shape=(2))
col_rmfile_refs = Column(name='rmfile_refs', dtype=int, shape=(2))
col_intfile_refs = Column(name='intfile_refs', dtype=int, shape=(2))
col_status = Column(name='status', dtype=int)
columns = [col_dbkey, col_jobname, col_jobkey, col_appname,
col_logfile, col_job_config, col_timestamp,
col_infile_refs, col_outfile_refs,
col_rmfile_refs, col_intfile_refs,
col_status]
table = Table(data=columns)
col_file_ids = Column(name='file_id', dtype=int)
table_ids = Table(data=[col_file_ids])
for val in job_dict.values():
val.append_to_tables(table, table_ids)
return table, table_ids | def make_tables(job_dict) | Build and return an `astropy.table.Table' to store `JobDetails` | 2.104938 | 2.022204 | 1.040912 |
file_dict = copy.deepcopy(self.file_dict)
if self.sub_file_dict is not None:
file_dict.update(self.sub_file_dict.file_dict)
infiles = file_dict.input_files
outfiles = file_dict.output_files
rmfiles = file_dict.temp_files
int_files = file_dict.internal_files
if self.infile_ids is None:
if infiles is not None:
self.infile_ids = np.zeros((len(infiles)), int)
filelist = file_archive.get_file_ids(
infiles, creator, FileStatus.expected, file_dict)
JobDetails._fill_array_from_list(filelist, self.infile_ids)
else:
self.infile_ids = np.zeros((0), int)
if self.outfile_ids is None:
if outfiles is not None:
self.outfile_ids = np.zeros((len(outfiles)), int)
filelist = file_archive.get_file_ids(
outfiles, creator, status, file_dict)
JobDetails._fill_array_from_list(filelist, self.outfile_ids)
else:
self.outfile_ids = np.zeros((0), int)
if self.rmfile_ids is None:
if rmfiles is not None:
self.rmfile_ids = np.zeros((len(rmfiles)), int)
filelist = file_archive.get_file_ids(rmfiles)
JobDetails._fill_array_from_list(filelist, self.rmfile_ids)
else:
self.rmfile_ids = np.zeros((0), int)
if self.intfile_ids is None:
if int_files is not None:
self.intfile_ids = np.zeros((len(int_files)), int)
filelist = file_archive.get_file_ids(
int_files, creator, status)
JobDetails._fill_array_from_list(filelist, self.intfile_ids)
else:
self.intfile_ids = np.zeros((0), int) | def get_file_ids(self, file_archive, creator=None, status=FileStatus.no_file) | Fill the file id arrays from the file lists
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
creator : int
A unique key for the job that created these file
status : `FileStatus`
Enumeration giving current status thse files | 1.75825 | 1.739347 | 1.010868 |
full_list = []
status_dict = {}
full_list += file_archive.get_file_paths(
file_id_array[self.infile_ids])
full_list += file_archive.get_file_paths(
file_id_array[self.outfile_ids])
full_list += file_archive.get_file_paths(
file_id_array[self.rmfile_ids])
full_list += file_archive.get_file_paths(
file_id_array[self.intfile_ids])
for filepath in full_list:
handle = file_archive.get_handle(filepath)
status_dict[filepath] = handle.status
if self.file_dict is None:
self.file_dict = FileDict()
self.file_dict.update(status_dict) | def get_file_paths(self, file_archive, file_id_array) | Get the full paths of the files used by this object from the the id arrays
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
file_id_array : `numpy.array`
Array that remaps the file indexes | 2.291612 | 2.472236 | 0.926939 |
for i, val in enumerate(the_list):
the_array[i] = val
return the_array | def _fill_array_from_list(the_list, the_array) | Fill an `array` from a `list` | 2.392482 | 2.687796 | 0.890128 |
ret_dict = {}
for row in table:
job_details = cls.create_from_row(row)
ret_dict[job_details.dbkey] = job_details
return ret_dict | def make_dict(cls, table) | Build a dictionary map int to `JobDetails` from an `astropy.table.Table` | 5.089123 | 3.557168 | 1.430667 |
kwargs = {}
for key in table_row.colnames:
kwargs[key] = table_row[key]
infile_refs = kwargs.pop('infile_refs')
outfile_refs = kwargs.pop('outfile_refs')
rmfile_refs = kwargs.pop('rmfile_refs')
intfile_refs = kwargs.pop('intfile_refs')
kwargs['infile_ids'] = np.arange(infile_refs[0], infile_refs[1])
kwargs['outfile_ids'] = np.arange(outfile_refs[0], outfile_refs[1])
kwargs['rmfile_ids'] = np.arange(rmfile_refs[0], rmfile_refs[1])
kwargs['intfile_ids'] = np.arange(intfile_refs[0], intfile_refs[1])
return cls(**kwargs) | def create_from_row(cls, table_row) | Create a `JobDetails` from an `astropy.table.row.Row` | 1.953408 | 1.835836 | 1.064043 |
infile_refs = np.zeros((2), int)
outfile_refs = np.zeros((2), int)
rmfile_refs = np.zeros((2), int)
intfile_refs = np.zeros((2), int)
f_ptr = len(table_ids['file_id'])
infile_refs[0] = f_ptr
if self.infile_ids is not None:
for fid in self.infile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
infile_refs[1] = f_ptr
outfile_refs[0] = f_ptr
if self.outfile_ids is not None:
for fid in self.outfile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
outfile_refs[1] = f_ptr
rmfile_refs[0] = f_ptr
if self.rmfile_ids is not None:
for fid in self.rmfile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
rmfile_refs[1] = f_ptr
intfile_refs[0] = f_ptr
if self.intfile_ids is not None:
for fid in self.intfile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
intfile_refs[1] = f_ptr
table.add_row(dict(dbkey=self.dbkey,
jobname=self.jobname,
jobkey=self.jobkey,
appname=self.appname,
logfile=self.logfile,
job_config=str(self.job_config),
timestamp=self.timestamp,
infile_refs=infile_refs,
outfile_refs=outfile_refs,
rmfile_refs=rmfile_refs,
intfile_refs=intfile_refs,
status=self.status)) | def append_to_tables(self, table, table_ids) | Add this instance as a row on a `astropy.table.Table` | 1.790865 | 1.759445 | 1.017858 |
try:
table[row_idx]['timestamp'] = self.timestamp
table[row_idx]['status'] = self.status
except IndexError:
print("Index error", len(table), row_idx) | def update_table_row(self, table, row_idx) | Add this instance as a row on a `astropy.table.Table` | 3.910009 | 3.72739 | 1.048994 |
self.status = checker_func(self.logfile)
return self.status | def check_status_logfile(self, checker_func) | Check on the status of this particular job using the logfile | 4.965977 | 3.599287 | 1.379711 |
for irow in range(len(self._table)):
job_details = self.make_job_details(irow)
self._cache[job_details.fullkey] = job_details | def _fill_cache(self) | Fill the cache from the `astropy.table.Table` | 6.346903 | 5.184328 | 1.224248 |
self._table_file = table_file
if os.path.exists(self._table_file):
self._table = Table.read(self._table_file, hdu='JOB_ARCHIVE')
self._table_ids = Table.read(self._table_file, hdu='FILE_IDS')
else:
self._table, self._table_ids = JobDetails.make_tables({})
self._table_id_array = self._table_ids['file_id'].data
self._fill_cache() | def _read_table_file(self, table_file) | Read an `astropy.table.Table` from table_file to set up the `JobArchive` | 3.602016 | 3.061105 | 1.176705 |
row = self._table[row_idx]
job_details = JobDetails.create_from_row(row)
job_details.get_file_paths(self._file_archive, self._table_id_array)
self._cache[job_details.fullkey] = job_details
return job_details | def make_job_details(self, row_idx) | Create a `JobDetails` from an `astropy.table.row.Row` | 5.387494 | 4.443768 | 1.212371 |
fullkey = JobDetails.make_fullkey(jobname, jobkey)
return self._cache[fullkey] | def get_details(self, jobname, jobkey) | Get the `JobDetails` associated to a particular job instance | 6.833763 | 6.690856 | 1.021359 |
# check to see if the job already exists
try:
job_details_old = self.get_details(job_details.jobname,
job_details.jobkey)
if job_details_old.status <= JobStatus.running:
job_details_old.status = job_details.status
job_details_old.update_table_row(
self._table, job_details_old.dbkey - 1)
job_details = job_details_old
except KeyError:
job_details.dbkey = len(self._table) + 1
job_details.get_file_ids(
self._file_archive, creator=job_details.dbkey)
job_details.append_to_tables(self._table, self._table_ids)
self._table_id_array = self._table_ids['file_id'].data
self._cache[job_details.fullkey] = job_details
return job_details | def register_job(self, job_details) | Register a job in this `JobArchive` | 4.210932 | 4.053267 | 1.038898 |
njobs = len(job_dict)
sys.stdout.write("Registering %i total jobs: " % njobs)
for i, job_details in enumerate(job_dict.values()):
if i % 10 == 0:
sys.stdout.write('.')
sys.stdout.flush()
self.register_job(job_details)
sys.stdout.write('!\n') | def register_jobs(self, job_dict) | Register a bunch of jobs in this archive | 2.5234 | 2.391738 | 1.055048 |
job_config = kwargs.get('job_config', None)
if job_config is None:
job_config = link.args
status = kwargs.get('status', JobStatus.unknown)
job_details = JobDetails(jobname=link.linkname,
jobkey=key,
appname=link.appname,
logfile=kwargs.get('logfile'),
jobconfig=job_config,
timestamp=get_timestamp(),
file_dict=copy.deepcopy(link.files),
sub_file_dict=copy.deepcopy(link.sub_files),
status=status)
self.register_job(job_details)
return job_details | def register_job_from_link(self, link, key, **kwargs) | Register a job in the `JobArchive` from a `Link` object | 3.508797 | 3.466669 | 1.012152 |
other = self.get_details(job_details.jobname,
job_details.jobkey)
other.timestamp = job_details.timestamp
other.status = job_details.status
other.update_table_row(self._table, other.dbkey - 1)
return other | def update_job(self, job_details) | Update a job in the `JobArchive` | 6.774049 | 6.309466 | 1.073633 |
jobnames = self.table[mask]['jobname']
jobkey = self.table[mask]['jobkey']
self.table[mask]['status'] = JobStatus.removed
for jobname, jobkey in zip(jobnames, jobkey):
fullkey = JobDetails.make_fullkey(jobname, jobkey)
self._cache.pop(fullkey).status = JobStatus.removed
self.write_table_file() | def remove_jobs(self, mask) | Mark all jobs that match a mask as 'removed' | 4.079302 | 3.753827 | 1.086705 |
try:
os.unlink('job_archive_temp.fits')
os.unlink('file_archive_temp.fits')
except OSError:
pass
cls._archive = cls(job_archive_table='job_archive_temp.fits',
file_archive_table='file_archive_temp.fits',
base_path=os.path.abspath('.') + '/')
return cls._archive | def build_temp_job_archive(cls) | Build and return a `JobArchive` using defualt locations of
persistent files. | 3.843075 | 3.587473 | 1.071249 |
if self._table is None:
raise RuntimeError("No table to write")
if self._table_ids is None:
raise RuntimeError("No ID table to write")
if job_table_file is not None:
self._table_file = job_table_file
if self._table_file is None:
raise RuntimeError("No output file specified for table")
write_tables_to_fits(self._table_file, [self._table, self._table_ids], clobber=True,
namelist=['JOB_ARCHIVE', 'FILE_IDS'])
self._file_archive.write_table_file(file_table_file) | def write_table_file(self, job_table_file=None, file_table_file=None) | Write the table to self._table_file | 3.219176 | 3.015193 | 1.067652 |
njobs = len(self.cache.keys())
status_vect = np.zeros((8), int)
sys.stdout.write("Updating status of %i jobs: " % njobs)
sys.stdout.flush()
for i, key in enumerate(self.cache.keys()):
if i % 200 == 0:
sys.stdout.write('.')
sys.stdout.flush()
job_details = self.cache[key]
if job_details.status in [JobStatus.pending, JobStatus.running]:
if checker_func:
job_details.check_status_logfile(checker_func)
job_details.update_table_row(self._table, job_details.dbkey - 1)
status_vect[job_details.status] += 1
sys.stdout.write("!\n")
sys.stdout.flush()
sys.stdout.write("Summary:\n")
sys.stdout.write(" Unknown: %i\n" % status_vect[JobStatus.unknown])
sys.stdout.write(" Not Ready: %i\n" %
status_vect[JobStatus.not_ready])
sys.stdout.write(" Ready: %i\n" % status_vect[JobStatus.ready])
sys.stdout.write(" Pending: %i\n" % status_vect[JobStatus.pending])
sys.stdout.write(" Running: %i\n" % status_vect[JobStatus.running])
sys.stdout.write(" Done: %i\n" % status_vect[JobStatus.done])
sys.stdout.write(" Failed: %i\n" % status_vect[JobStatus.failed])
sys.stdout.write(" Partial: %i\n" %
status_vect[JobStatus.partial_failed]) | def update_job_status(self, checker_func) | Update the status of all the jobs in the archive | 2.136437 | 2.092626 | 1.020936 |
if cls._archive is None:
cls._archive = cls(**kwargs)
return cls._archive | def build_archive(cls, **kwargs) | Return the singleton `JobArchive` instance, building it if needed | 3.952583 | 2.758859 | 1.432688 |
# Timer is running
if self._t0 is not None:
return self._time + self._get_time()
else:
return self._time | def elapsed_time(self) | Get the elapsed time. | 7.745003 | 6.548865 | 1.182648 |
if self._t0 is None:
raise RuntimeError('Timer not started.')
self._time += self._get_time()
self._t0 = None | def stop(self) | Stop the timer. | 6.719191 | 4.910707 | 1.368274 |
data = dict(Spatial_Filename=Spatial_Filename,
ra=0.0, dec=0.0,
SpatialType='SpatialMap',
Source_Name=name)
if spectrum is not None:
data.update(spectrum)
return roi_model.Source(name, data) | def make_spatialmap_source(name, Spatial_Filename, spectrum) | Construct and return a `fermipy.roi_model.Source` object | 4.926134 | 4.245527 | 1.160311 |
data = dict(Spatial_Filename=Spatial_Filename)
if spectrum is not None:
data.update(spectrum)
return roi_model.MapCubeSource(name, data) | def make_mapcube_source(name, Spatial_Filename, spectrum) | Construct and return a `fermipy.roi_model.MapCubeSource` object | 4.377075 | 3.183988 | 1.374715 |
data = dict(Spectrum_Filename=Spectrum_Filename)
if spectrum is not None:
data.update(spectrum)
return roi_model.IsoSource(name, data) | def make_isotropic_source(name, Spectrum_Filename, spectrum) | Construct and return a `fermipy.roi_model.IsoSource` object | 5.496774 | 3.467387 | 1.585278 |
data = dict(SpatialType='CompositeSource',
SpatialModel='CompositeSource',
SourceType='CompositeSource')
if spectrum is not None:
data.update(spectrum)
return roi_model.CompositeSource(name, data) | def make_composite_source(name, spectrum) | Construct and return a `fermipy.roi_model.CompositeSource` object | 6.543774 | 5.127343 | 1.27625 |
sources = {}
for source_name in source_names:
sources[source_name] = catalog_roi_model[source_name]
return sources | def make_catalog_sources(catalog_roi_model, source_names) | Construct and return dictionary of sources that are a subset of sources
in catalog_roi_model.
Parameters
----------
catalog_roi_model : dict or `fermipy.roi_model.ROIModel`
Input set of sources
source_names : list
Names of sourcs to extract
Returns dict mapping source_name to `fermipy.roi_model.Source` object | 2.150801 | 2.372647 | 0.906498 |
srcdict = OrderedDict()
try:
comp_info = comp_dict.info
except AttributeError:
comp_info = comp_dict
try:
spectrum = comp_dict.spectrum
except AttributeError:
spectrum = None
model_type = comp_info.model_type
if model_type == 'PointSource':
srcdict[comp_key] = make_point_source(comp_info.source_name,
comp_info.src_dict)
elif model_type == 'SpatialMap':
srcdict[comp_key] = make_spatialmap_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'MapCubeSource':
srcdict[comp_key] = make_mapcube_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'IsoSource':
srcdict[comp_key] = make_isotropic_source(comp_info.source_name,
comp_info.Spectral_Filename,
spectrum)
elif model_type == 'CompositeSource':
srcdict[comp_key] = make_composite_source(comp_info.source_name,
spectrum)
elif model_type == 'CatalogSources':
srcdict.update(make_catalog_sources(comp_info.roi_model,
comp_info.source_names))
else:
raise ValueError("Unrecognized model_type %s" % model_type)
return srcdict | def make_sources(comp_key, comp_dict) | Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source` | 2.251637 | 2.122326 | 1.060929 |
self._source_info_dict.update(source_info_dict)
for key, value in source_info_dict.items():
self._sources.update(make_sources(key, value)) | def add_sources(self, source_info_dict) | Add all of the sources in source_info_dict to this factory | 3.027704 | 2.952786 | 1.025372 |
catalog_type = kwargs.get('catalog_type')
catalog_file = kwargs.get('catalog_file')
catalog_extdir = kwargs.get('catalog_extdir')
if catalog_type == '2FHL':
return catalog.Catalog2FHL(fitsfile=catalog_file, extdir=catalog_extdir)
elif catalog_type == '3FGL':
return catalog.Catalog3FGL(fitsfile=catalog_file, extdir=catalog_extdir)
elif catalog_type == '4FGLP':
return catalog.Catalog4FGLP(fitsfile=catalog_file, extdir=catalog_extdir)
elif catalog_type == 'FL8Y':
return catalog.CatalogFL8Y(fitsfile=catalog_file, extdir=catalog_extdir)
else:
table = Table.read(catalog_file)
return catalog.Catalog(table, extdir=catalog_extdir) | def build_catalog(**kwargs) | Build a `fermipy.catalog.Catalog` object
Parameters
----------
catalog_type : str
Specifies catalog type, options include 2FHL | 3FGL | 4FGLP
catalog_file : str
FITS file with catalog tables
catalog_extdir : str
Path to directory with extended source templates | 2.173869 | 1.708962 | 1.27204 |
data = dict(catalogs=cataloglist,
src_roiwidth=360.)
return roi_model.ROIModel(data, skydir=SkyCoord(0.0, 0.0, unit='deg')) | def make_fermipy_roi_model_from_catalogs(cataloglist) | Build and return a `fermipy.roi_model.ROIModel object from
a list of fermipy.catalog.Catalog` objects | 7.176908 | 7.620221 | 0.941824 |
if sources is None:
sources = {}
src_fact = cls()
src_fact.add_sources(sources)
ret_model = roi_model.ROIModel(
{}, skydir=SkyCoord(0.0, 0.0, unit='deg'))
for source in src_fact.sources.values():
ret_model.load_source(source,
build_index=False, merge_sources=False)
return ret_model | def make_roi(cls, sources=None) | Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources | 5.271457 | 4.740364 | 1.112036 |
roi_new = cls.make_roi()
for source_name in source_names:
try:
src_cp = roi.copy_source(source_name)
except Exception:
continue
roi_new.load_source(src_cp, build_index=False)
return roi_new | def copy_selected_sources(cls, roi, source_names) | Build and return a `fermipy.roi_model.ROIModel` object
by copying selected sources from another such object | 4.146867 | 4.049819 | 1.023963 |
d = yaml.load(open(yamlfile))
return MktimeFilterDict(d['aliases'], d['selections']) | def build_from_yamlfile(yamlfile) | Build a list of components from a yaml file | 18.902016 | 17.96627 | 1.052083 |
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but don't have ST")
if is_not_null(args.roi_baseline):
gta = GTAnalysis.create(args.roi_baseline, args.config)
else:
gta = GTAnalysis(args.config,
logging={'verbosity': 3},
fileio={'workdir_regex': '\.xml$|\.npy$'})
gta.print_roi()
test_source = args.target
gta.sed(test_source, outfile='sed_%s.fits' % 'FL8Y', make_plots=True)
gta.extension(test_source, make_plots=True)
return gta | def run_analysis(self, argv) | Run this analysis | 9.233802 | 9.179781 | 1.005885 |
jobs = []
for dirname in sorted(dirs):
o = dict(cfgfile=os.path.join(dirname, 'config.yaml'),
logfile=os.path.join(
dirname, os.path.splitext(runscript)[0] + '.log'),
runscript=os.path.join(dirname, runscript))
if not os.path.isfile(o['cfgfile']):
continue
if not os.path.isfile(o['runscript']):
continue
if not os.path.isfile(o['logfile']):
jobs.append(o)
continue
age = file_age_in_seconds(o['logfile']) / 60.
job_status = check_log(o['logfile'])
print(dirname, job_status, age)
if job_status is False or overwrite:
jobs.append(o)
elif job_status == 'Exited':
print("Job Exited. Resending command.")
jobs.append(o)
elif job_status == 'None' and age > max_job_age:
print(
"Job did not exit, but no activity on log file for > %.2f min. Resending command." % max_job_age)
jobs.append(o)
# elif job_status is True:
# print("Job Completed. Resending command.")
# jobs.append(o)
return jobs | def collect_jobs(dirs, runscript, overwrite=False, max_job_age=90) | Construct a list of job dictionaries. | 2.867116 | 2.809165 | 1.020629 |
if rebin > 1:
npix = npix * rebin
xpix = xpix * rebin + (rebin - 1.0) / 2.
ypix = ypix * rebin + (rebin - 1.0) / 2.
cdelt = cdelt / rebin
if spatial_model == 'RadialGaussian':
k = utils.make_cgauss_kernel(psf, sigma, npix, cdelt,
xpix, ypix, psf_scale_fn)
elif spatial_model == 'RadialDisk':
k = utils.make_cdisk_kernel(psf, sigma, npix, cdelt,
xpix, ypix, psf_scale_fn)
elif spatial_model == 'PointSource':
k = utils.make_psf_kernel(psf, npix, cdelt,
xpix, ypix, psf_scale_fn)
else:
raise Exception('Unsupported spatial model: %s', spatial_model)
if rebin > 1:
k = utils.sum_bins(k, 1, rebin)
k = utils.sum_bins(k, 2, rebin)
k *= psf.exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2
return k | def make_srcmap_old(psf, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,
cdelt=0.01, rebin=1, psf_scale_fn=None) | Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
rebin : int
Factor by which the original map will be oversampled in the
spatial dimension when computing the model.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV. | 2.115596 | 2.237235 | 0.94563 |
if spatial_model == 'RadialGaussian':
k = utils.make_radial_kernel(psf, utils.convolve2d_gauss,
sigma / 1.5095921854516636, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
elif spatial_model == 'RadialDisk':
k = utils.make_radial_kernel(psf, utils.convolve2d_disk,
sigma / 0.8246211251235321, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
elif spatial_model == 'PointSource':
k = utils.make_radial_kernel(psf, None, None, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
else:
raise Exception('Unsupported spatial model: %s', spatial_model)
if klims is not None:
exp = exp[klims[0]:klims[1] + 1, ...]
k *= exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2
return k | def make_srcmap(psf, exp, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,
cdelt=0.01, psf_scale_fn=None, klims=None, sparse=False) | Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
exp : `~numpy.ndarray`
Array of exposures.
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
klims : tuple
Indices of lower and upper range of energy.
sparse : bool
Skip pixels in which the source amplitude is small. | 2.502163 | 2.633793 | 0.950023 |
with fits.open(srcmap_file) as hdulist:
hdunames = [hdu.name.upper() for hdu in hdulist]
if not isinstance(names, list):
names = [names]
for name in names:
if not name.upper() in hdunames:
continue
del hdulist[name.upper()]
hdulist.writeto(srcmap_file, overwrite=True) | def delete_source_map(srcmap_file, names, logger=None) | Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted. | 2.241584 | 2.374878 | 0.943873 |
idx = []
for i in range(self.ndim):
if i == 0:
idx += [0]
else:
npix1 = int(self.shape[i])
pix0 = int(pix[i - 1]) - npix1 // 2
idx += [pix0]
return idx | def get_offsets(self, pix) | Get offset of the first pixel in each dimension in the
global coordinate system.
Parameters
----------
pix : `~numpy.ndarray`
Pixel coordinates in global coordinate system. | 4.163716 | 4.363596 | 0.954194 |
pix_offset = self.get_offsets(pix)
dpix = np.zeros(len(self.shape) - 1)
for i in range(len(self.shape) - 1):
x = self.rebin * (pix[i] - pix_offset[i + 1]
) + (self.rebin - 1.0) / 2.
dpix[i] = x - self._pix_ref[i]
pos = [pix_offset[i] + self.shape[i] // 2
for i in range(self.data.ndim)]
s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos)
k = np.zeros(self.data.shape)
for i in range(k.shape[0]):
k[i] = shift(self._data_spline[i], dpix, cval=np.nan,
order=2, prefilter=False)
for i in range(1, len(self.shape)):
k = utils.sum_bins(k, i, self.rebin)
k0 = np.ones(self.shape_out) * fill_value
if k[s1].size == 0 or k0[s0].size == 0:
return k0
k0[s0] = k[s1]
return k0 | def shift_to_coords(self, pix, fill_value=np.nan) | Create a new map that is shifted to the pixel coordinates
``pix``. | 3.298705 | 3.363761 | 0.98066 |
k0 = self._m0.shift_to_coords(pix)
k1 = self._m1.shift_to_coords(pix)
k0[np.isfinite(k1)] = k1[np.isfinite(k1)]
k0[~np.isfinite(k0)] = 0
return k0 | def create_map(self, pix) | Create a new map with reference pixel coordinates shifted
to the pixel coordinates ``pix``.
Parameters
----------
pix : `~numpy.ndarray`
Reference pixel of new map.
Returns
-------
out_map : `~numpy.ndarray`
The shifted map. | 3.50109 | 4.137265 | 0.846233 |
if vcs is None:
return None
tags = vcs.split('-')
# Bare version number
if len(tags) == 1:
return tags[0]
else:
return tags[0] + '+' + '.'.join(tags[1:]) | def render_pep440(vcs) | Convert git release tag into a form that is PEP440 compliant. | 3.731776 | 3.178349 | 1.174124 |
import re
dirname = os.path.abspath(os.path.dirname(__file__))
try:
f = open(os.path.join(dirname, "_version.py"), "rt")
for line in f.readlines():
m = re.match("__version__ = '([^']+)'", line)
if m:
ver = m.group(1)
return ver
except:
return None
return None | def read_release_version() | Read the release version from ``_version.py``. | 2.530577 | 2.484493 | 1.018549 |
dirname = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(dirname, "_version.py"), "wt")
f.write("__version__ = '%s'\n" % version)
f.close() | def write_release_version(version) | Write the release version to ``_version.py``. | 2.108273 | 1.951954 | 1.080083 |
return os.path.join(basedir, outkey,
os.path.basename(origname).replace('.fits',
'_%s.fits' % outkey)) | def make_full_path(basedir, outkey, origname) | Make a full file path by combining tokens
Parameters
-----------
basedir : str
The top level output area
outkey : str
The key for the particular instance of the analysis
origname : str
Template for the output file name
Returns
-------
outpath : str
This will be <basedir>:<outkey>:<newname>.fits
Where newname = origname.replace('.fits', '_<outkey>.fits') | 2.851071 | 3.233214 | 0.881807 |
job_configs = {}
comp_file = args.get('comp', None)
if comp_file is not None:
comp_dict = yaml.safe_load(open(comp_file))
coordsys = comp_dict.pop('coordsys')
for v in comp_dict.values():
v['coordsys'] = coordsys
else:
return job_configs
datafile = args['data']
if datafile is None or datafile == 'None':
return job_configs
NAME_FACTORY.update_base_dict(args['data'])
inputfiles = create_inputlist(args['ft1file'])
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
data_ver = NAME_FACTORY.base_dict['data_ver']
for idx, infile in enumerate(inputfiles):
key = "%06i" % idx
key_scfile = "%03i" % (idx + 1)
output_dir = os.path.join(outdir_base, key)
try:
os.mkdir(output_dir)
except OSError:
pass
scfile = args['ft2file'].replace('.lst', '_%s.fits' % key_scfile)
logfile = make_nfs_path(os.path.join(output_dir,
'scatter_mk_%s_%s.log' % (data_ver, key)))
job_configs[key] = comp_dict.copy()
job_configs[key].update(dict(ft1file=infile,
scfile=scfile,
comp=args['comp'],
hpx_order_max=args['hpx_order_max'],
outdir=outdir_base,
outkey=key,
logfile=logfile,
pfiles=output_dir))
return job_configs | def build_job_configs(self, args) | Hook to build job configurations | 3.978924 | 3.941646 | 1.009458 |
data = args.get('data')
comp = args.get('comp')
ft1file = args.get('ft1file')
ft2file = args.get('ft2file')
scratch = args.get('scratch', None)
dry_run = args.get('dry_run', None)
self._set_link('split-and-mktime', SplitAndMktime_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_ccube', 9),
ft1file=ft1file,
ft2file=ft2file,
do_ltsum=args.get('do_ltsum', False),
scratch=scratch,
dry_run=dry_run)
self._set_link('coadd-split', CoaddSplit_SG,
comp=comp, data=data,
ft1file=ft1file)
self._set_link('ltsum', Gtltsum_SG,
comp=comp, data=data,
ft1file=args['ft1file'],
dry_run=dry_run)
self._set_link('expcube2', Gtexpcube2_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_expcube', 5),
dry_run=dry_run) | def _map_arguments(self, args) | Map from the top-level arguments to the arguments provided to
the indiviudal links | 3.217833 | 3.131141 | 1.027687 |
import matplotlib
try:
os.environ['DISPLAY']
except KeyError:
matplotlib.use('Agg')
else:
if backend is not None:
matplotlib.use(backend) | def init_matplotlib_backend(backend=None) | This function initializes the matplotlib backend. When no
DISPLAY is available the backend is automatically set to 'Agg'.
Parameters
----------
backend : str
matplotlib backend name. | 3.014151 | 3.319115 | 0.908119 |
infile = resolve_path(infile, workdir=workdir)
infile, ext = os.path.splitext(infile)
if os.path.isfile(infile + '.npy'):
infile += '.npy'
elif os.path.isfile(infile + '.yaml'):
infile += '.yaml'
else:
raise Exception('Input file does not exist.')
ext = os.path.splitext(infile)[1]
if ext == '.npy':
return infile, load_npy(infile)
elif ext == '.yaml':
return infile, load_yaml(infile)
else:
raise Exception('Unrecognized extension.') | def load_data(infile, workdir=None) | Load python data structure from either a YAML or numpy file. | 2.224995 | 2.032796 | 1.094549 |
files = []
with open(pathlist, 'r') as f:
files = [line.strip() for line in f]
newfiles = []
for f in files:
f = os.path.expandvars(f)
if os.path.isfile(f):
newfiles += [f]
else:
newfiles += [os.path.join(workdir, f)]
if randomize:
_, tmppath = tempfile.mkstemp(prefix=prefix, dir=workdir)
else:
tmppath = os.path.join(workdir, prefix)
tmppath += '.txt'
with open(tmppath, 'w') as tmpfile:
tmpfile.write("\n".join(newfiles))
return tmppath | def resolve_file_path_list(pathlist, workdir, prefix='',
randomize=False) | Resolve the path of each file name in the file ``pathlist`` and
write the updated paths to a new file. | 2.186681 | 2.172112 | 1.006708 |
if not os.path.isdir(path):
return []
o = [path]
if max_depth == 0:
return o
for subdir in os.listdir(path):
subdir = os.path.join(path, subdir)
if not os.path.isdir(subdir):
continue
o += [subdir]
if os.path.islink(subdir) and not followlinks:
continue
if max_depth > 0:
o += collect_dirs(subdir, max_depth=max_depth - 1)
return list(set(o)) | def collect_dirs(path, max_depth=1, followlinks=True) | Recursively find directories under the given path. | 1.909391 | 1.878907 | 1.016224 |
for p in patterns:
if re.findall(p, string):
return True
return False | def match_regex_list(patterns, string) | Perform a regex match of a string against a list of patterns.
Returns true if the string matches at least one pattern in the
list. | 4.892393 | 5.785648 | 0.845608 |
mask = np.empty(len(tab), dtype=bool)
mask.fill(False)
names = [name.lower().replace(' ', '') for name in names]
for colname in colnames:
if colname not in tab.columns:
continue
col = tab[[colname]].copy()
col[colname] = defchararray.replace(defchararray.lower(col[colname]).astype(str),
' ', '')
for name in names:
mask |= col[colname] == name
return mask | def find_rows_by_string(tab, names, colnames=['assoc']) | Find the rows in a table ``tab`` that match at least one of the
strings in ``names``. This method ignores whitespace and case
when matching strings.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of strings.
colname : str
Name of the table column that will be searched for matching string.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask for rows with matching strings. | 3.658126 | 3.8745 | 0.944154 |
costh = np.cos(np.pi / 2. - lat0)
cosphi = np.cos(lon0)
sinth = np.sin(np.pi / 2. - lat0)
sinphi = np.sin(lon0)
xyz = lonlat_to_xyz(lon1, lat1)
x1 = xyz[0]
y1 = xyz[1]
z1 = xyz[2]
x1p = x1 * costh * cosphi + y1 * costh * sinphi - z1 * sinth
y1p = -x1 * sinphi + y1 * cosphi
z1p = x1 * sinth * cosphi + y1 * sinth * sinphi + z1 * costh
r = np.arctan2(np.sqrt(x1p ** 2 + y1p ** 2), z1p)
phi = np.arctan2(y1p, x1p)
return r * np.cos(phi), r * np.sin(phi) | def project(lon0, lat0, lon1, lat1) | This function performs a stereographic projection on the unit
vector (lon1,lat1) with the pole defined at the reference unit
vector (lon0,lat0). | 1.941522 | 1.913399 | 1.014698 |
return (np.sin(lat1) * np.sin(lat0) + np.cos(lat1) * np.cos(lat0) *
np.cos(lon1 - lon0)) | def separation_cos_angle(lon0, lat0, lon1, lat1) | Evaluate the cosine of the angular separation between two
direction vectors. | 1.890553 | 1.994576 | 0.947847 |
theta = np.array(np.pi / 2. - lat)
return np.vstack((np.sin(theta) * np.cos(lon),
np.sin(theta) * np.sin(lon),
np.cos(theta))).T | def angle_to_cartesian(lon, lat) | Convert spherical coordinates to cartesian unit vectors. | 2.158332 | 2.150686 | 1.003556 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.