sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def stack_nll(shape, components, ylims, weights=None):
"""Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : `~fermipy.castro.CastroData_Base`
The components to be stacked
weights : array-like
Returns
-------
norm_vals : `numpy.ndarray`
N X M array of Normalization values
nll_vals : `numpy.ndarray`
N X M array of log-likelihood values
nll_offsets : `numpy.ndarray`
N array of maximum log-likelihood values in each bin
"""
n_bins = shape[0]
n_vals = shape[1]
if weights is None:
weights = np.ones((len(components)))
norm_vals = np.zeros(shape)
nll_vals = np.zeros(shape)
nll_offsets = np.zeros((n_bins))
for i in range(n_bins):
log_min = np.log10(ylims[0])
log_max = np.log10(ylims[1])
norm_vals[i, 1:] = np.logspace(log_min, log_max, n_vals - 1)
for c, w in zip(components, weights):
nll_vals[i] += w * c[i].interp(norm_vals[i]) - c.nll_offsets[i]
# Reset the offsets
nll_obj = LnLFn(norm_vals[i], nll_vals[i])
ll_offset = nll_obj.fn_mle()
nll_vals[i] -= ll_offset
nll_offsets[i] = -ll_offset
pass
return norm_vals, nll_vals, nll_offsets | Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : `~fermipy.castro.CastroData_Base`
The components to be stacked
weights : array-like
Returns
-------
norm_vals : `numpy.ndarray`
N X M array of Normalization values
nll_vals : `numpy.ndarray`
N X M array of log-likelihood values
nll_offsets : `numpy.ndarray`
N array of maximum log-likelihood values in each bin | entailment |
def create_from_yamlfile(cls, yamlfile):
"""Create a Castro data object from a yaml file contains
the likelihood data."""
data = load_yaml(yamlfile)
nebins = len(data)
emin = np.array([data[i]['emin'] for i in range(nebins)])
emax = np.array([data[i]['emax'] for i in range(nebins)])
ref_flux = np.array([data[i]['flux'][1] for i in range(nebins)])
ref_eflux = np.array([data[i]['eflux'][1] for i in range(nebins)])
conv = np.array([data[i]['eflux2npred'] for i in range(nebins)])
ref_npred = conv*ref_eflux
ones = np.ones(ref_flux.shape)
ref_spec = ReferenceSpec(emin, emax, ones, ref_flux, ref_eflux, ref_npred)
norm_data = np.array([data[i]['eflux'] for i in range(nebins)])
ll_data = np.array([data[i]['logLike'] for i in range(nebins)])
max_ll = ll_data.max(1)
nll_data = (max_ll - ll_data.T).T
return cls(norm_data, nll_data, ref_spec, 'eflux') | Create a Castro data object from a yaml file contains
the likelihood data. | entailment |
def create_from_flux_points(cls, txtfile):
"""Create a Castro data object from a text file containing a
sequence of differential flux points."""
tab = Table.read(txtfile, format='ascii.ecsv')
dnde_unit = u.ph / (u.MeV * u.cm ** 2 * u.s)
loge = np.log10(np.array(tab['e_ref'].to(u.MeV)))
norm = np.array(tab['norm'].to(dnde_unit))
norm_errp = np.array(tab['norm_errp'].to(dnde_unit))
norm_errn = np.array(tab['norm_errn'].to(dnde_unit))
norm_err = 0.5 * (norm_errp + norm_errn)
dloge = loge[1:] - loge[:-1]
dloge = np.insert(dloge, 0, dloge[0])
emin = 10 ** (loge - dloge * 0.5)
emax = 10 ** (loge + dloge * 0.5)
ectr = 10 ** loge
deltae = emax - emin
flux = norm * deltae
eflux = norm * deltae * ectr
ones = np.ones(flux.shape)
ref_spec = ReferenceSpec(emin, emax, ones, ones, ones, ones)
spec_data = SpecData(ref_spec, norm, flux, eflux, norm_err)
stephi = np.linspace(0, 1, 11)
steplo = -np.linspace(0, 1, 11)[1:][::-1]
loscale = 3 * norm_err
hiscale = 3 * norm_err
loscale[loscale > norm] = norm[loscale > norm]
norm_vals_hi = norm[:, np.newaxis] + \
stephi[np.newaxis, :] * hiscale[:, np.newaxis]
norm_vals_lo = norm[:, np.newaxis] + \
steplo[np.newaxis, :] * loscale[:, np.newaxis]
norm_vals = np.hstack((norm_vals_lo, norm_vals_hi))
nll_vals = 0.5 * \
(norm_vals - norm[:, np.newaxis]) ** 2 / \
norm_err[:, np.newaxis] ** 2
norm_vals *= flux[:, np.newaxis] / norm[:, np.newaxis]
return cls(norm_vals, nll_vals, spec_data, 'flux') | Create a Castro data object from a text file containing a
sequence of differential flux points. | entailment |
def create_from_tables(cls, norm_type='eflux',
tab_s="SCANDATA",
tab_e="EBOUNDS"):
"""Create a CastroData object from two tables
Parameters
----------
norm_type : str
Type of normalization to use. Valid options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
tab_s : str
table scan data
tab_e : str
table energy binning and normalization data
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if norm_type in ['flux', 'eflux', 'dnde']:
norm_vals = np.array(tab_s['norm_scan'] *
tab_e['ref_%s' % norm_type][:, np.newaxis])
elif norm_type == "norm":
norm_vals = np.array(tab_s['norm_scan'])
else:
raise Exception('Unrecognized normalization type: %s' % norm_type)
nll_vals = -np.array(tab_s['dloglike_scan'])
rs = ReferenceSpec.create_from_table(tab_e)
return cls(norm_vals, nll_vals, rs, norm_type) | Create a CastroData object from two tables
Parameters
----------
norm_type : str
Type of normalization to use. Valid options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
tab_s : str
table scan data
tab_e : str
table energy binning and normalization data
Returns
-------
castro : `~fermipy.castro.CastroData` | entailment |
def create_from_fits(cls, fitsfile, norm_type='eflux',
hdu_scan="SCANDATA",
hdu_energies="EBOUNDS",
irow=None):
"""Create a CastroData object from a tscube FITS file.
Parameters
----------
fitsfile : str
Name of the fits file
norm_type : str
Type of normalization to use. Valid options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
hdu_scan : str
Name of the FITS HDU with the scan data
hdu_energies : str
Name of the FITS HDU with the energy binning and
normalization data
irow : int or None
If none, then this assumes that there is a single row in
the scan data table Otherwise, this specifies which row of
the table to use
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if irow is not None:
tab_s = Table.read(fitsfile, hdu=hdu_scan)[irow]
else:
tab_s = Table.read(fitsfile, hdu=hdu_scan)
tab_e = Table.read(fitsfile, hdu=hdu_energies)
tab_s = convert_sed_cols(tab_s)
tab_e = convert_sed_cols(tab_e)
return cls.create_from_tables(norm_type, tab_s, tab_e) | Create a CastroData object from a tscube FITS file.
Parameters
----------
fitsfile : str
Name of the fits file
norm_type : str
Type of normalization to use. Valid options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
hdu_scan : str
Name of the FITS HDU with the scan data
hdu_energies : str
Name of the FITS HDU with the energy binning and
normalization data
irow : int or None
If none, then this assumes that there is a single row in
the scan data table Otherwise, this specifies which row of
the table to use
Returns
-------
castro : `~fermipy.castro.CastroData` | entailment |
def create_from_sedfile(cls, fitsfile, norm_type='eflux'):
"""Create a CastroData object from an SED fits file
Parameters
----------
fitsfile : str
Name of the fits file
norm_type : str
Type of normalization to use, options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
tab_s = Table.read(fitsfile, hdu=1)
tab_s = convert_sed_cols(tab_s)
if norm_type in ['flux', 'eflux', 'dnde']:
ref_colname = 'ref_%s' % norm_type
norm_vals = np.array(tab_s['norm_scan'] *
tab_s[ref_colname][:, np.newaxis])
elif norm_type == "norm":
norm_vals = np.array(tab_s['norm_scan'])
else:
raise ValueError('Unrecognized normalization type: %s' % norm_type)
nll_vals = -np.array(tab_s['dloglike_scan'])
ref_spec = ReferenceSpec.create_from_table(tab_s)
spec_data = SpecData(ref_spec, tab_s['norm'], tab_s['norm_err'])
return cls(norm_vals, nll_vals, spec_data, norm_type) | Create a CastroData object from an SED fits file
Parameters
----------
fitsfile : str
Name of the fits file
norm_type : str
Type of normalization to use, options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
Returns
-------
castro : `~fermipy.castro.CastroData` | entailment |
def create_from_stack(cls, shape, components, ylims, weights=None):
""" Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if len(components) == 0:
return None
norm_vals, nll_vals, nll_offsets = CastroData_Base.stack_nll(shape,
components,
ylims,
weights)
return cls(norm_vals, nll_vals,
components[0].refSpec,
components[0].norm_type) | Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData` | entailment |
def spectrum_loglike(self, specType, params, scale=1E3):
""" return the log-likelihood for a particular spectrum
Parameters
----------
specTypes : str
The type of spectrum to try
params : array-like
The spectral parameters
scale : float
The energy scale or 'pivot' energy
"""
sfn = self.create_functor(specType, scale)[0]
return self.__call__(sfn(params)) | return the log-likelihood for a particular spectrum
Parameters
----------
specTypes : str
The type of spectrum to try
params : array-like
The spectral parameters
scale : float
The energy scale or 'pivot' energy | entailment |
def create_functor(self, specType, initPars=None, scale=1E3):
"""Create a functor object that computes normalizations in a
sequence of energy bins for a given spectral model.
Parameters
----------
specType : str
The type of spectrum to use. This can be a string
corresponding to the spectral model class name or a
`~fermipy.spectrum.SpectralFunction` object.
initPars : `~numpy.ndarray`
Arrays of parameter values with which the spectral
function will be initialized.
scale : float
The 'pivot energy' or energy scale to use for the spectrum
Returns
-------
fn : `~fermipy.spectrum.SEDFunctor`
A functor object.
"""
emin = self._refSpec.emin
emax = self._refSpec.emax
fn = SpectralFunction.create_functor(specType,
self.norm_type,
emin,
emax,
scale=scale)
if initPars is None:
if specType == 'PowerLaw':
initPars = np.array([5e-13, -2.0])
elif specType == 'LogParabola':
initPars = np.array([5e-13, -2.0, 0.0])
elif specType == 'PLExpCutoff':
initPars = np.array([5e-13, -1.0, 1E4])
fn.params = initPars
return fn | Create a functor object that computes normalizations in a
sequence of energy bins for a given spectral model.
Parameters
----------
specType : str
The type of spectrum to use. This can be a string
corresponding to the spectral model class name or a
`~fermipy.spectrum.SpectralFunction` object.
initPars : `~numpy.ndarray`
Arrays of parameter values with which the spectral
function will be initialized.
scale : float
The 'pivot energy' or energy scale to use for the spectrum
Returns
-------
fn : `~fermipy.spectrum.SEDFunctor`
A functor object. | entailment |
def create_from_fits(cls, fitsfile, norm_type='flux'):
"""Build a TSCube object from a fits file created by gttscube
Parameters
----------
fitsfile : str
Path to the tscube FITS file.
norm_type : str
String specifying the quantity used for the normalization
"""
tsmap = WcsNDMap.read(fitsfile)
tab_e = Table.read(fitsfile, 'EBOUNDS')
tab_s = Table.read(fitsfile, 'SCANDATA')
tab_f = Table.read(fitsfile, 'FITDATA')
tab_e = convert_sed_cols(tab_e)
tab_s = convert_sed_cols(tab_s)
tab_f = convert_sed_cols(tab_f)
emin = np.array(tab_e['e_min'])
emax = np.array(tab_e['e_max'])
try:
if str(tab_e['e_min'].unit) == 'keV':
emin /= 1000.
except:
pass
try:
if str(tab_e['e_max'].unit) == 'keV':
emax /= 1000.
except:
pass
nebins = len(tab_e)
npred = tab_e['ref_npred']
ndim = len(tsmap.data.shape)
if ndim == 2:
cube_shape = (tsmap.data.shape[0],
tsmap.data.shape[1], nebins)
elif ndim == 1:
cube_shape = (tsmap.data.shape[0], nebins)
else:
raise RuntimeError("Counts map has dimension %i" % (ndim))
refSpec = ReferenceSpec.create_from_table(tab_e)
nll_vals = -np.array(tab_s["dloglike_scan"])
norm_vals = np.array(tab_s["norm_scan"])
axis = MapAxis.from_edges(np.concatenate((emin, emax[-1:])),
interp='log')
geom_3d = tsmap.geom.to_cube([axis])
tscube = WcsNDMap(geom_3d,
np.rollaxis(tab_s["ts"].reshape(cube_shape), 2, 0))
ncube = WcsNDMap(geom_3d,
np.rollaxis(tab_s["norm"].reshape(cube_shape), 2, 0))
nmap = WcsNDMap(tsmap.geom,
tab_f['fit_norm'].reshape(tsmap.data.shape))
ref_colname = 'ref_%s' % norm_type
norm_vals *= tab_e[ref_colname][np.newaxis, :, np.newaxis]
return cls(tsmap, nmap, tscube, ncube,
norm_vals, nll_vals, refSpec,
norm_type) | Build a TSCube object from a fits file created by gttscube
Parameters
----------
fitsfile : str
Path to the tscube FITS file.
norm_type : str
String specifying the quantity used for the normalization | entailment |
def castroData_from_ipix(self, ipix, colwise=False):
""" Build a CastroData object for a particular pixel """
# pix = utils.skydir_to_pix
if colwise:
ipix = self._tsmap.ipix_swap_axes(ipix, colwise)
norm_d = self._norm_vals[ipix]
nll_d = self._nll_vals[ipix]
return CastroData(norm_d, nll_d, self._refSpec, self._norm_type) | Build a CastroData object for a particular pixel | entailment |
def castroData_from_pix_xy(self, xy, colwise=False):
""" Build a CastroData object for a particular pixel """
ipix = self._tsmap.xy_pix_to_ipix(xy, colwise)
return self.castroData_from_ipix(ipix) | Build a CastroData object for a particular pixel | entailment |
def find_and_refine_peaks(self, threshold, min_separation=1.0,
use_cumul=False):
"""Run a simple peak-finding algorithm, and fit the peaks to
paraboloids to extract their positions and error ellipses.
Parameters
----------
threshold : float
Peak threshold in TS.
min_separation : float
Radius of region size in degrees. Sets the minimum allowable
separation between peaks.
use_cumul : bool
If true, used the cumulative TS map (i.e., the TS summed
over the energy bins) instead of the TS Map from the fit
to and index=2 powerlaw.
Returns
-------
peaks : list
List of dictionaries containing the location and amplitude of
each peak. Output of `~fermipy.sourcefind.find_peaks`
"""
if use_cumul:
theMap = self._ts_cumul
else:
theMap = self._tsmap
peaks = find_peaks(theMap, threshold, min_separation)
for peak in peaks:
o, skydir = fit_error_ellipse(theMap, (peak['ix'], peak['iy']),
dpix=2)
peak['fit_loc'] = o
peak['fit_skydir'] = skydir
if o['fit_success']:
skydir = peak['fit_skydir']
else:
skydir = peak['skydir']
return peaks | Run a simple peak-finding algorithm, and fit the peaks to
paraboloids to extract their positions and error ellipses.
Parameters
----------
threshold : float
Peak threshold in TS.
min_separation : float
Radius of region size in degrees. Sets the minimum allowable
separation between peaks.
use_cumul : bool
If true, used the cumulative TS map (i.e., the TS summed
over the energy bins) instead of the TS Map from the fit
to and index=2 powerlaw.
Returns
-------
peaks : list
List of dictionaries containing the location and amplitude of
each peak. Output of `~fermipy.sourcefind.find_peaks` | entailment |
def make_lat_lons(cvects):
""" Convert from directional cosines to latitidue and longitude
Parameters
----------
cvects : directional cosine (i.e., x,y,z component) values
returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values
"""
lats = np.degrees(np.arcsin(cvects[2]))
lons = np.degrees(np.arctan2(cvects[0], cvects[1]))
return np.hstack([lats, lons]) | Convert from directional cosines to latitidue and longitude
Parameters
----------
cvects : directional cosine (i.e., x,y,z component) values
returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values | entailment |
def make_cos_vects(lon_vect, lat_vect):
""" Convert from longitude (RA or GLON) and latitude (DEC or GLAT) values to directional cosines
Parameters
----------
lon_vect,lat_vect : np.ndarray(nsrc)
Input values
returns (np.ndarray(3,nsrc)) with the directional cosine (i.e., x,y,z component) values
"""
lon_rad = np.radians(lon_vect)
lat_rad = np.radians(lat_vect)
cvals = np.cos(lat_rad)
xvals = cvals * np.sin(lon_rad)
yvals = cvals * np.cos(lon_rad)
zvals = np.sin(lat_rad)
cvects = np.vstack([xvals, yvals, zvals])
return cvects | Convert from longitude (RA or GLON) and latitude (DEC or GLAT) values to directional cosines
Parameters
----------
lon_vect,lat_vect : np.ndarray(nsrc)
Input values
returns (np.ndarray(3,nsrc)) with the directional cosine (i.e., x,y,z component) values | entailment |
def find_matches_by_distance(cos_vects, cut_dist):
"""Find all the pairs of sources within a given distance of each
other.
Parameters
----------
cos_vects : np.ndarray(e,nsrc)
Directional cosines (i.e., x,y,z component) values of all the
sources
cut_dist : float
Angular cut in degrees that will be used to select pairs by
their separation.
Returns
-------
match_dict : dict((int,int):float).
Each entry gives a pair of source indices, and the
corresponding distance
"""
dist_rad = np.radians(cut_dist)
cos_t_cut = np.cos(dist_rad)
nsrc = cos_vects.shape[1]
match_dict = {}
for i, v1 in enumerate(cos_vects.T):
cos_t_vect = (v1 * cos_vects.T).sum(1)
cos_t_vect[cos_t_vect < -1.0] = -1.0
cos_t_vect[cos_t_vect > 1.0] = 1.0
mask = cos_t_vect > cos_t_cut
acos_t_vect = np.ndarray(nsrc)
# The 1e-6 is here b/c we use 0.0 for sources that failed the cut elsewhere.
# We should maybe do this better, but it works for now.
acos_t_vect[mask] = np.degrees(np.arccos(cos_t_vect[mask])) + 1e-6
for j in np.where(mask[:i])[0]:
match_dict[(j, i)] = acos_t_vect[j]
return match_dict | Find all the pairs of sources within a given distance of each
other.
Parameters
----------
cos_vects : np.ndarray(e,nsrc)
Directional cosines (i.e., x,y,z component) values of all the
sources
cut_dist : float
Angular cut in degrees that will be used to select pairs by
their separation.
Returns
-------
match_dict : dict((int,int):float).
Each entry gives a pair of source indices, and the
corresponding distance | entailment |
def find_matches_by_sigma(cos_vects, unc_vect, cut_sigma):
"""Find all the pairs of sources within a given distance of each
other.
Parameters
----------
cos_vects : np.ndarray(3,nsrc)
Directional cosines (i.e., x,y,z component) values of all the sources
unc_vect : np.ndarray(nsrc)
Uncertainties on the source positions
cut_sigma : float
Angular cut in positional errors standard deviations that will
be used to select pairs by their separation.
Returns
-------
match_dict : dict((int,int):float)
Each entry gives a pair of source indices, and the
corresponding sigma
"""
match_dict = {}
sig_2_vect = unc_vect * unc_vect
for i, v1 in enumerate(cos_vects.T):
cos_t_vect = (v1 * cos_vects.T).sum(1)
cos_t_vect[cos_t_vect < -1.0] = -1.0
cos_t_vect[cos_t_vect > 1.0] = 1.0
sig_2_i = sig_2_vect[i]
acos_t_vect = np.degrees(np.arccos(cos_t_vect))
total_unc = np.sqrt(sig_2_i + sig_2_vect)
sigma_vect = acos_t_vect / total_unc
mask = sigma_vect < cut_sigma
for j in np.where(mask[:i])[0]:
match_dict[(j, i)] = sigma_vect[j]
return match_dict | Find all the pairs of sources within a given distance of each
other.
Parameters
----------
cos_vects : np.ndarray(3,nsrc)
Directional cosines (i.e., x,y,z component) values of all the sources
unc_vect : np.ndarray(nsrc)
Uncertainties on the source positions
cut_sigma : float
Angular cut in positional errors standard deviations that will
be used to select pairs by their separation.
Returns
-------
match_dict : dict((int,int):float)
Each entry gives a pair of source indices, and the
corresponding sigma | entailment |
def fill_edge_matrix(nsrcs, match_dict):
""" Create and fill a matrix with the graph 'edges' between sources.
Parameters
----------
nsrcs : int
number of sources (used to allocate the size of the matrix)
match_dict : dict((int,int):float)
Each entry gives a pair of source indices, and the
corresponding measure (either distance or sigma)
Returns
-------
e_matrix : `~numpy.ndarray`
numpy.ndarray((nsrcs,nsrcs)) filled with zeros except for the
matches, which are filled with the edge measures (either
distances or sigmas)
"""
e_matrix = np.zeros((nsrcs, nsrcs))
for k, v in match_dict.items():
e_matrix[k[0], k[1]] = v
return e_matrix | Create and fill a matrix with the graph 'edges' between sources.
Parameters
----------
nsrcs : int
number of sources (used to allocate the size of the matrix)
match_dict : dict((int,int):float)
Each entry gives a pair of source indices, and the
corresponding measure (either distance or sigma)
Returns
-------
e_matrix : `~numpy.ndarray`
numpy.ndarray((nsrcs,nsrcs)) filled with zeros except for the
matches, which are filled with the edge measures (either
distances or sigmas) | entailment |
def make_rev_dict_unique(cdict):
""" Make a reverse dictionary
Parameters
----------
in_dict : dict(int:dict(int:True))
A dictionary of clusters. Each cluster is a source index and
the dictionary of other sources in the cluster.
Returns
-------
rev_dict : dict(int:dict(int:True))
A dictionary pointing from source index to the clusters it is
included in.
"""
rev_dict = {}
for k, v in cdict.items():
if k in rev_dict:
rev_dict[k][k] = True
else:
rev_dict[k] = {k: True}
for vv in v.keys():
if vv in rev_dict:
rev_dict[vv][k] = True
else:
rev_dict[vv] = {k: True}
return rev_dict | Make a reverse dictionary
Parameters
----------
in_dict : dict(int:dict(int:True))
A dictionary of clusters. Each cluster is a source index and
the dictionary of other sources in the cluster.
Returns
-------
rev_dict : dict(int:dict(int:True))
A dictionary pointing from source index to the clusters it is
included in. | entailment |
def make_clusters(span_tree, cut_value):
""" Find clusters from the spanning tree
Parameters
----------
span_tree : a sparse nsrcs x nsrcs array
Filled with zeros except for the active edges, which are filled with the
edge measures (either distances or sigmas
cut_value : float
Value used to cluster group. All links with measures above this calue will be cut.
returns dict(int:[int,...])
A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
"""
iv0, iv1 = span_tree.nonzero()
# This is the dictionary of all the pairings for each source
match_dict = {}
for i0, i1 in zip(iv0, iv1):
d = span_tree[i0, i1]
# Cut on the link distance
if d > cut_value:
continue
imin = int(min(i0, i1))
imax = int(max(i0, i1))
if imin in match_dict:
match_dict[imin][imax] = True
else:
match_dict[imin] = {imax: True}
working = True
while working:
working = False
rev_dict = make_rev_dict_unique(match_dict)
k_sort = rev_dict.keys()
k_sort.sort()
for k in k_sort:
v = rev_dict[k]
# Multiple mappings
if len(v) > 1:
working = True
v_sort = v.keys()
v_sort.sort()
cluster_idx = v_sort[0]
for vv in v_sort[1:]:
try:
to_merge = match_dict.pop(vv)
except:
continue
try:
match_dict[cluster_idx].update(to_merge)
match_dict[cluster_idx][vv] = True
except:
continue
# remove self references
try:
match_dict[cluster_idx].pop(cluster_idx)
except:
pass
# Convert to a int:list dictionary
cdict = {}
for k, v in match_dict.items():
cdict[k] = v.keys()
# make the reverse dictionary
rdict = make_reverse_dict(cdict)
return cdict, rdict | Find clusters from the spanning tree
Parameters
----------
span_tree : a sparse nsrcs x nsrcs array
Filled with zeros except for the active edges, which are filled with the
edge measures (either distances or sigmas
cut_value : float
Value used to cluster group. All links with measures above this calue will be cut.
returns dict(int:[int,...])
A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster. | entailment |
def select_from_cluster(idx_key, idx_list, measure_vect):
""" Select a single source from a cluster and make it the new cluster key
Parameters
----------
idx_key : int
index of the current key for a cluster
idx_list : [int,...]
list of the other source indices in the cluster
measure_vect : np.narray((nsrc),float)
vector of the measure used to select the best source in the cluster
returns best_idx:out_list
where best_idx is the index of the best source in the cluster and
out_list is the list of all the other indices
"""
best_idx = idx_key
best_measure = measure_vect[idx_key]
out_list = [idx_key] + idx_list
for idx, measure in zip(idx_list, measure_vect[idx_list]):
if measure < best_measure:
best_idx = idx
best_measure = measure
out_list.remove(best_idx)
return best_idx, out_list | Select a single source from a cluster and make it the new cluster key
Parameters
----------
idx_key : int
index of the current key for a cluster
idx_list : [int,...]
list of the other source indices in the cluster
measure_vect : np.narray((nsrc),float)
vector of the measure used to select the best source in the cluster
returns best_idx:out_list
where best_idx is the index of the best source in the cluster and
out_list is the list of all the other indices | entailment |
def find_centroid(cvects, idx_list, weights=None):
""" Find the centroid for a set of vectors
Parameters
----------
cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values
idx_list : [int,...]
list of the source indices in the cluster
weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting
returns (np.ndarray(3)) with the directional cosine (i.e., x,y,z component) values of the centroid
"""
if weights is None:
weighted = cvects.T[idx_list].sum(0)
sum_weights = float(len(idx_list))
else:
weighted = ((cvects * weights).T[idx_list]).sum(0)
sum_weights = weights.sum(0)
weighted /= sum_weights
# make sure it is normalized
norm = np.sqrt((weighted * weighted).sum())
weighted /= norm
return weighted | Find the centroid for a set of vectors
Parameters
----------
cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values
idx_list : [int,...]
list of the source indices in the cluster
weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting
returns (np.ndarray(3)) with the directional cosine (i.e., x,y,z component) values of the centroid | entailment |
def count_sources_in_cluster(n_src, cdict, rev_dict):
""" Make a vector of sources in each cluster
Parameters
----------
n_src : number of sources
cdict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and
the list of other source in the cluster.
rev_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster. Note that the key
does not point to itself.
Returns
----------
`np.ndarray((n_src),int)' with the number of in the cluster a given source
belongs to.
"""
ret_val = np.zeros((n_src), int)
for i in range(n_src):
try:
key = rev_dict[i]
except KeyError:
key = i
try:
n = len(cdict[key])
except:
n = 0
ret_val[i] = n
return ret_val | Make a vector of sources in each cluster
Parameters
----------
n_src : number of sources
cdict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and
the list of other source in the cluster.
rev_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster. Note that the key
does not point to itself.
Returns
----------
`np.ndarray((n_src),int)' with the number of in the cluster a given source
belongs to. | entailment |
def find_dist_to_centroid(cvects, idx_list, weights=None):
""" Find the centroid for a set of vectors
Parameters
----------
cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values
idx_list : [int,...]
list of the source indices in the cluster
weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting
returns (np.ndarray(nsrc)) distances to the centroid (in degrees)
"""
centroid = find_centroid(cvects, idx_list, weights)
dist_vals = np.degrees(np.arccos((centroid * cvects.T[idx_list]).sum(1)))
return dist_vals, centroid | Find the centroid for a set of vectors
Parameters
----------
cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values
idx_list : [int,...]
list of the source indices in the cluster
weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting
returns (np.ndarray(nsrc)) distances to the centroid (in degrees) | entailment |
def find_dist_to_centroids(cluster_dict, cvects, weights=None):
""" Find the centroids and the distances to the centroid for all sources in a set of clusters
Parameters
----------
cluster_dict : dict(int:[int,...])
Each cluster is a source index and the list of other sources in the cluster.
cvects : np.ndarray(3,nsrc)
Directional cosines (i.e., x,y,z component) values of all the sources
weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting
Returns
----------
distances : ~numpy.ndarray(nsrc) with the distances to the centroid of the cluster. 0 for unclustered sources
cent_dict : dict(int:numpy.ndarray(2)), dictionary for the centroid locations
"""
distances = np.zeros((cvects.shape[1]))
cent_dict = {}
for k, v in cluster_dict.items():
l = [k] + v
distances[l], centroid = find_dist_to_centroid(cvects, l, weights)
cent_dict[k] = make_lat_lons(centroid)
return distances, cent_dict | Find the centroids and the distances to the centroid for all sources in a set of clusters
Parameters
----------
cluster_dict : dict(int:[int,...])
Each cluster is a source index and the list of other sources in the cluster.
cvects : np.ndarray(3,nsrc)
Directional cosines (i.e., x,y,z component) values of all the sources
weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting
Returns
----------
distances : ~numpy.ndarray(nsrc) with the distances to the centroid of the cluster. 0 for unclustered sources
cent_dict : dict(int:numpy.ndarray(2)), dictionary for the centroid locations | entailment |
def select_from_clusters(cluster_dict, measure_vect):
""" Select a single source from each cluster and make it the new cluster key
cluster_dict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster.
measure_vect : np.narray((nsrc),float)
vector of the measure used to select the best source in the cluster
returns dict(int:[int,...])
New dictionary of clusters keyed by the best source in each cluster
"""
out_dict = {}
for idx_key, idx_list in cluster_dict.items():
out_idx, out_list = select_from_cluster(
idx_key, idx_list, measure_vect)
out_dict[out_idx] = out_list
return out_dict | Select a single source from each cluster and make it the new cluster key
cluster_dict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster.
measure_vect : np.narray((nsrc),float)
vector of the measure used to select the best source in the cluster
returns dict(int:[int,...])
New dictionary of clusters keyed by the best source in each cluster | entailment |
def make_reverse_dict(in_dict, warn=True):
""" Build a reverse dictionary from a cluster dictionary
Parameters
----------
in_dict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and
the list of other source in the cluster.
Returns
-------
out_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster. Note that the key
does not point to itself.
"""
out_dict = {}
for k, v in in_dict.items():
for vv in v:
if vv in out_dict:
if warn:
print("Dictionary collision %i" % vv)
out_dict[vv] = k
return out_dict | Build a reverse dictionary from a cluster dictionary
Parameters
----------
in_dict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and
the list of other source in the cluster.
Returns
-------
out_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster. Note that the key
does not point to itself. | entailment |
def make_cluster_vector(rev_dict, n_src):
""" Converts the cluster membership dictionary to an array
Parameters
----------
rev_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster.
n_src : int
Number of source in the array
Returns
-------
out_array : `numpy.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not.
"""
out_array = -1 * np.ones((n_src), int)
for k, v in rev_dict.items():
out_array[k] = v
# We need this to make sure the see source points at itself
out_array[v] = v
return out_array | Converts the cluster membership dictionary to an array
Parameters
----------
rev_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster.
n_src : int
Number of source in the array
Returns
-------
out_array : `numpy.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not. | entailment |
def make_cluster_name_vector(cluster_vect, src_names):
""" Converts the cluster membership dictionary to an array
Parameters
----------
cluster_vect : `numpy.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not.
src_names :
An array with the source names
Returns
-------
out_array : `numpy.ndarray'
An array filled with the name of the seed of a cluster if a source belongs to a cluster,
and with an empty string if it does not.
"""
out_array = np.where(cluster_vect >= 0, src_names[cluster_vect], "")
return out_array | Converts the cluster membership dictionary to an array
Parameters
----------
cluster_vect : `numpy.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not.
src_names :
An array with the source names
Returns
-------
out_array : `numpy.ndarray'
An array filled with the name of the seed of a cluster if a source belongs to a cluster,
and with an empty string if it does not. | entailment |
def make_dict_from_vector(in_array):
""" Converts the cluster membership array stored in a fits file back to a dictionary
Parameters
----------
in_array : `np.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not.
Returns
-------
returns dict(int:[int,...])
Dictionary of clusters keyed by the best source in each cluster
"""
out_dict = {}
for i, k in enumerate(in_array):
if k < 0:
continue
try:
out_dict[k].append(i)
except KeyError:
out_dict[k] = [i]
return out_dict | Converts the cluster membership array stored in a fits file back to a dictionary
Parameters
----------
in_array : `np.ndarray'
An array filled with the index of the seed of a cluster if a source belongs to a cluster,
and with -1 if it does not.
Returns
-------
returns dict(int:[int,...])
Dictionary of clusters keyed by the best source in each cluster | entailment |
def filter_and_copy_table(tab, to_remove):
""" Filter and copy a FITS table.
Parameters
----------
tab : FITS Table object
to_remove : [int ...}
list of indices to remove from the table
returns FITS Table object
"""
nsrcs = len(tab)
mask = np.zeros((nsrcs), '?')
mask[to_remove] = True
inv_mask = np.invert(mask)
out_tab = tab[inv_mask]
return out_tab | Filter and copy a FITS table.
Parameters
----------
tab : FITS Table object
to_remove : [int ...}
list of indices to remove from the table
returns FITS Table object | entailment |
def baseline_roi_fit(gta, make_plots=False, minmax_npred=[1e3, np.inf]):
"""Do baseline fitting for a target Region of Interest
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
make_plots : bool
Flag to make standard analysis plots
minmax_npred : tuple or list
Range of number of predicted coutns for which to free sources in initial fitting.
"""
gta.free_sources(False)
gta.write_roi('base_roi', make_plots=make_plots)
gta.free_sources(True, minmax_npred=[1e3, np.inf])
gta.optimize()
gta.free_sources(False)
gta.print_roi() | Do baseline fitting for a target Region of Interest
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
make_plots : bool
Flag to make standard analysis plots
minmax_npred : tuple or list
Range of number of predicted coutns for which to free sources in initial fitting. | entailment |
def localize_sources(gta, **kwargs):
"""Relocalize sources in the region of interest
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
kwargs :
These are passed to the gta.localize function
"""
# Localize all point sources
for src in sorted(gta.roi.sources, key=lambda t: t['ts'], reverse=True):
# for s in gta.roi.sources:
if not src['SpatialModel'] == 'PointSource':
continue
if src['offset_roi_edge'] > -0.1:
continue
gta.localize(src.name, **kwargs)
gta.optimize()
gta.print_roi() | Relocalize sources in the region of interest
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
kwargs :
These are passed to the gta.localize function | entailment |
def add_source_get_correlated(gta, name, src_dict, correl_thresh=0.25, non_null_src=False):
"""Add a source and get the set of correlated sources
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
name : str
Name of the source we are adding
src_dict : dict
Dictionary of the source parameters
correl_thresh : float
Threshold for considering a source to be correlated
non_null_src : bool
If True, don't zero the source
Returns
-------
cdict : dict
Dictionary with names and correlation factors of correlated sources
test_src_name : bool
Name of the test source
"""
if gta.roi.has_source(name):
gta.zero_source(name)
gta.update_source(name)
test_src_name = "%s_test" % name
else:
test_src_name = name
gta.add_source(test_src_name, src_dict)
gta.free_norm(test_src_name)
gta.free_shape(test_src_name, free=False)
fit_result = gta.fit(covar=True)
mask = fit_result['is_norm']
src_names = np.array(fit_result['src_names'])[mask]
idx = (src_names == test_src_name).argmax()
correl_vals = fit_result['correlation'][idx][mask]
cdict = {}
for src_name, correl_val in zip(src_names, correl_vals):
if src_name == name:
continue
if np.fabs(correl_val) > 0.25:
cdict[src_name] = correl_val
if not non_null_src:
gta.zero_source(test_src_name)
gta.fit(covar=True)
return cdict, test_src_name | Add a source and get the set of correlated sources
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
name : str
Name of the source we are adding
src_dict : dict
Dictionary of the source parameters
correl_thresh : float
Threshold for considering a source to be correlated
non_null_src : bool
If True, don't zero the source
Returns
-------
cdict : dict
Dictionary with names and correlation factors of correlated sources
test_src_name : bool
Name of the test source | entailment |
def build_profile_dict(basedir, profile_name):
"""Get the name and source dictionary for the test source.
Parameters
----------
basedir : str
Path to the analysis directory
profile_name : str
Key for the spatial from of the target
Returns
-------
profile_name : str
Name of for this particular profile
src_name : str
Name of the source for this particular profile
profile_dict : dict
Dictionary with the source parameters
"""
profile_path = os.path.join(basedir, "profile_%s.yaml" % profile_name)
profile_config = load_yaml(profile_path)
src_name = profile_config['name']
profile_dict = profile_config['source_model']
return profile_name, src_name, profile_dict | Get the name and source dictionary for the test source.
Parameters
----------
basedir : str
Path to the analysis directory
profile_name : str
Key for the spatial from of the target
Returns
-------
profile_name : str
Name of for this particular profile
src_name : str
Name of the source for this particular profile
profile_dict : dict
Dictionary with the source parameters | entailment |
def get_batch_job_args(job_time=1500):
""" Get the correct set of batch jobs arguments.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
Returns
-------
job_args : dict
Dictionary of arguments used to submit a batch job
"""
if DEFAULT_JOB_TYPE == 'slac':
from fermipy.jobs.slac_impl import get_slac_default_args
return get_slac_default_args(job_time)
elif DEFAULT_JOB_TYPE == 'native':
from fermipy.jobs.native_impl import get_native_default_args
return get_native_default_args()
return None | Get the correct set of batch jobs arguments.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
Returns
-------
job_args : dict
Dictionary of arguments used to submit a batch job | entailment |
def get_batch_job_interface(job_time=1500):
""" Create a batch job interface object.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
Returns
-------
job_interfact : `SysInterface`
Object that manages interactions with batch farm
"""
batch_job_args = get_batch_job_args(job_time)
if DEFAULT_JOB_TYPE == 'slac':
from fermipy.jobs.slac_impl import SlacInterface
return SlacInterface(**batch_job_args)
elif DEFAULT_JOB_TYPE == 'native':
from fermipy.jobs.native_impl import NativeInterface
return NativeInterface(**batch_job_args)
return None | Create a batch job interface object.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
Returns
-------
job_interfact : `SysInterface`
Object that manages interactions with batch farm | entailment |
def main():
import sys
import argparse
# Argument defintion
usage = "usage: %(prog)s [options]"
description = "Collect all the new source"
parser = argparse.ArgumentParser(usage, description=__abstract__)
parser.add_argument("-i", "--input", type=argparse.FileType('r'), required=True,
help="Input file")
parser.add_argument("-e", "--extension", type=str, default="SKYMAP",
help="FITS HDU with HEALPix map")
parser.add_argument("--ebin", type=str, default=None,
help="Energy bin, integer or 'ALL'")
parser.add_argument("--zscale", type=str, default='log',
help="Scaling for color scale")
parser.add_argument("--zmin", type=float, default=None,
help="Minimum z-axis value")
parser.add_argument("--zmax", type=float, default=None,
help="Maximum z-axis value")
parser.add_argument("--cbar", action='store_true', default=False,
help="draw color bar")
parser.add_argument("-o", "--output", type=argparse.FileType('w'),
help="Output file. Leave blank for interactive.")
# Parse the command line
args = parser.parse_args(sys.argv[1:])
hpxmap = Map.read(args.input.name, hdu=args.extension)
outdata = []
if args.zscale == 'sqrt':
the_norm = PowerNorm(gamma=0.5)
elif args.zscale == 'log':
the_norm= LogNorm()
elif args.zscale == 'lin':
the_norm = Normalize()
else:
the_norm = Normalize()
fig, ax, im = hpxmap.plot(norm=the_norm, vmin=args.zmin, vmax=args.zmax)
outdata.append(fig)
if args.cbar:
cbar = plt.colorbar(im, orientation='horizontal',shrink=0.7,pad=0.15, fraction=0.05)
"""
if args.ebin == "ALL":
wcsproj = hpxmap.geom.make_wcs(
naxis=2, proj='MOL', energies=None, oversample=2)
mapping = HpxToWcsMapping(hpxmap.hpx, wcsproj)
for i, data in enumerate(hpxmap.counts):
ip = ImagePlotter(data=data, proj=hpxmap.hpx, mapping=mapping)
fig = plt.figure(i)
im, ax = ip.plot(zscale=args.zscale,
vmin=args.zmin, vmax=args.zmax)
outdata.append(fig)
elif args.ebin is None:
ip = ImagePlotter(data=hpxmap.counts, proj=hpxmap.hpx)
im, ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax)
outdata.append((im, ax))
else:
try:
ibin = int(args.ebin)
ip = ImagePlotter(data=hpxmap.counts[ibin], proj=hpxmap.hpx)
im, ax = ip.plot(zscale=args.zscale,
vmin=args.zmin, vmax=args.zmax)
outdata.append((im, ax))
except:
raise ValueError("--ebin argument must be an integer or 'ALL'")
"""
if args.output is None:
plt.show()
else:
if len(outdata) == 1:
plt.savefig(args.output.name)
else:
base, ext = os.path.splitext(args.output.name)
for i, fig in enumerate(outdata):
fig.savefig("%s_%02i%s" % (base, i, ext)) | if args.ebin == "ALL":
wcsproj = hpxmap.geom.make_wcs(
naxis=2, proj='MOL', energies=None, oversample=2)
mapping = HpxToWcsMapping(hpxmap.hpx, wcsproj)
for i, data in enumerate(hpxmap.counts):
ip = ImagePlotter(data=data, proj=hpxmap.hpx, mapping=mapping)
fig = plt.figure(i)
im, ax = ip.plot(zscale=args.zscale,
vmin=args.zmin, vmax=args.zmax)
outdata.append(fig)
elif args.ebin is None:
ip = ImagePlotter(data=hpxmap.counts, proj=hpxmap.hpx)
im, ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax)
outdata.append((im, ax))
else:
try:
ibin = int(args.ebin)
ip = ImagePlotter(data=hpxmap.counts[ibin], proj=hpxmap.hpx)
im, ax = ip.plot(zscale=args.zscale,
vmin=args.zmin, vmax=args.zmax)
outdata.append((im, ax))
except:
raise ValueError("--ebin argument must be an integer or 'ALL'") | entailment |
def register_classes():
"""Register these classes with the `LinkFactory` """
CopyBaseROI.register_class()
CopyBaseROI_SG.register_class()
SimulateROI.register_class()
SimulateROI_SG.register_class()
RandomDirGen.register_class()
RandomDirGen_SG.register_class() | Register these classes with the `LinkFactory` | entailment |
def copy_analysis_files(cls, orig_dir, dest_dir, copyfiles):
""" Copy a list of files from orig_dir to dest_dir"""
for pattern in copyfiles:
glob_path = os.path.join(orig_dir, pattern)
files = glob.glob(glob_path)
for ff in files:
f = os.path.basename(ff)
orig_path = os.path.join(orig_dir, f)
dest_path = os.path.join(dest_dir, f)
try:
copyfile(orig_path, dest_path)
except IOError:
sys.stderr.write("WARNING: failed to copy %s\n" % orig_path) | Copy a list of files from orig_dir to dest_dir | entailment |
def copy_target_dir(cls, orig_dir, dest_dir, roi_baseline, extracopy):
""" Create and populate directoris for target analysis
"""
try:
os.makedirs(dest_dir)
except OSError:
pass
copyfiles = ['%s.fits' % roi_baseline,
'%s.npy' % roi_baseline,
'%s_*.xml' % roi_baseline] + cls.copyfiles
if isinstance(extracopy, list):
copyfiles += extracopy
cls.copy_analysis_files(orig_dir, dest_dir, copyfiles) | Create and populate directoris for target analysis | entailment |
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
name_keys = dict(target_type=args.ttype,
target_name=args.target,
sim_name=args.sim,
fullpath=True)
orig_dir = NAME_FACTORY.targetdir(**name_keys)
dest_dir = NAME_FACTORY.sim_targetdir(**name_keys)
self.copy_target_dir(orig_dir, dest_dir,
args.roi_baseline, args.extracopy) | Run this analysis | entailment |
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(sim_targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)
targets = load_yaml(sim_targets_yaml)
base_config = dict(ttype=ttype,
roi_baseline=args['roi_baseline'],
extracopy = args['extracopy'],
sim=sim)
for target_name in targets.keys():
targetdir = NAME_FACTORY.sim_targetdir(target_type=ttype,
target_name=target_name,
sim_name=sim)
logfile = os.path.join(targetdir, 'copy_base_dir.log')
job_config = base_config.copy()
job_config.update(dict(target=target_name,
logfile=logfile))
job_configs[target_name] = job_config
return job_configs | Hook to build job configurations | entailment |
def _make_wcsgeom_from_config(config):
"""Build a `WCS.Geom` object from a fermipy coniguration file"""
binning = config['binning']
binsz = binning['binsz']
coordsys = binning.get('coordsys', 'GAL')
roiwidth = binning['roiwidth']
proj = binning.get('proj', 'AIT')
ra = config['selection']['ra']
dec = config['selection']['dec']
npix = int(np.round(roiwidth / binsz))
skydir = SkyCoord(ra * u.deg, dec * u.deg)
wcsgeom = WcsGeom.create(npix=npix, binsz=binsz,
proj=proj, coordsys=coordsys,
skydir=skydir)
return wcsgeom | Build a `WCS.Geom` object from a fermipy coniguration file | entailment |
def _build_skydir_dict(wcsgeom, rand_config):
"""Build a dictionary of random directions"""
step_x = rand_config['step_x']
step_y = rand_config['step_y']
max_x = rand_config['max_x']
max_y = rand_config['max_y']
seed = rand_config['seed']
nsims = rand_config['nsims']
cdelt = wcsgeom.wcs.wcs.cdelt
pixstep_x = step_x / cdelt[0]
pixstep_y = -1. * step_y / cdelt[1]
pixmax_x = max_x / cdelt[0]
pixmax_y = max_y / cdelt[0]
nstep_x = int(np.ceil(2. * pixmax_x / pixstep_x)) + 1
nstep_y = int(np.ceil(2. * pixmax_y / pixstep_y)) + 1
center = np.array(wcsgeom._center_pix)
grid = np.meshgrid(np.linspace(-1 * pixmax_x, pixmax_x, nstep_x),
np.linspace(-1 * pixmax_y, pixmax_y, nstep_y))
grid[0] += center[0]
grid[1] += center[1]
test_grid = wcsgeom.pix_to_coord(grid)
glat_vals = test_grid[0].flat
glon_vals = test_grid[1].flat
conv_vals = SkyCoord(glat_vals * u.deg, glon_vals *
u.deg, frame=Galactic).transform_to(ICRS)
ra_vals = conv_vals.ra.deg[seed:nsims]
dec_vals = conv_vals.dec.deg[seed:nsims]
o_dict = {}
for i, (ra, dec) in enumerate(zip(ra_vals, dec_vals)):
key = i + seed
o_dict[key] = dict(ra=ra, dec=dec)
return o_dict | Build a dictionary of random directions | entailment |
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if is_null(args.config):
raise ValueError("Config yaml file must be specified")
if is_null(args.rand_config):
raise ValueError(
"Random direction config yaml file must be specified")
config = load_yaml(args.config)
rand_config = load_yaml(args.rand_config)
wcsgeom = self._make_wcsgeom_from_config(config)
dir_dict = self._build_skydir_dict(wcsgeom, rand_config)
if is_not_null(args.outfile):
write_yaml(dir_dict, args.outfile) | Run this analysis | entailment |
def _clone_config_and_srcmaps(config_path, seed):
"""Clone the configuration"""
workdir = os.path.dirname(config_path)
new_config_path = config_path.replace('.yaml', '_%06i.yaml' % seed)
config = load_yaml(config_path)
comps = config.get('components', [config])
for i, comp in enumerate(comps):
comp_name = "%02i" % i
if 'gtlike' not in comp:
comp['gtlike'] = {}
orig_srcmap = os.path.abspath(os.path.join(workdir, 'srcmap_%s.fits' % (comp_name)))
new_srcmap = os.path.abspath(os.path.join(workdir, 'srcmap_%06i_%s.fits' % (seed, comp_name)))
comp['gtlike']['srcmap'] = os.path.abspath(os.path.join(workdir, 'srcmap_%06i_%s.fits' % (seed, comp_name)))
comp['gtlike']['use_external_srcmap'] = True
copyfile(orig_srcmap, new_srcmap)
write_yaml(config, new_config_path)
return new_config_path | Clone the configuration | entailment |
def _run_simulation(gta, roi_baseline,
injected_name, test_sources, current_seed, seed, non_null_src):
"""Simulate a realization of this analysis"""
gta.load_roi('sim_baseline_%06i.npy' % current_seed)
gta.set_random_seed(seed)
gta.simulate_roi()
if injected_name:
gta.zero_source(injected_name)
gta.optimize()
gta.find_sources(sqrt_ts_threshold=5.0, search_skydir=gta.roi.skydir,
search_minmax_radius=[1.0, np.nan])
gta.optimize()
gta.free_sources(skydir=gta.roi.skydir, distance=1.0, pars='norm')
gta.fit(covar=True)
gta.write_roi('sim_refit_%06i' % current_seed)
for test_source in test_sources:
test_source_name = test_source['name']
sedfile = "sed_%s_%06i.fits" % (test_source_name, seed)
correl_dict, test_src_name = add_source_get_correlated(gta, test_source_name,
test_source['source_model'],
correl_thresh=0.25,
non_null_src=non_null_src)
# Write the list of correlated sources
correl_yaml = os.path.join(gta.workdir,
"correl_%s_%06i.yaml" % (test_source_name, seed))
write_yaml(correl_dict, correl_yaml)
gta.free_sources(False)
for src_name in correl_dict.keys():
gta.free_source(src_name, pars='norm')
gta.sed(test_source_name, prefix=pkey, outfile=sedfile)
# Set things back to how they were
gta.delete_source(test_source_name)
gta.load_xml('sim_refit_%06i' % current_seed) | Simulate a realization of this analysis | entailment |
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but don't have ST")
workdir = os.path.dirname(args.config)
_config_file = self._clone_config_and_srcmaps(args.config, args.seed)
gta = GTAnalysis(_config_file, logging={'verbosity': 3},
fileio={'workdir_regex': '\.xml$|\.npy$'})
gta.load_roi(args.roi_baseline)
simfile = os.path.join(workdir, 'sim_%s_%s.yaml' %
(args.sim, args.sim_profile))
mcube_file = "%s_%s_%06i" % (args.sim, args.sim_profile, args.seed)
sim_config = utils.load_yaml(simfile)
injected_source = sim_config.get('injected_source', None)
if injected_source is not None:
src_dict = injected_source['source_model']
src_dict['ra'] = gta.config['selection']['ra']
src_dict['dec'] = gta.config['selection']['dec']
injected_name = injected_source['name']
gta.add_source(injected_name, src_dict)
gta.write_model_map(mcube_file)
mc_spec_dict = dict(true_counts=gta.model_counts_spectrum(injected_name),
energies=gta.energies,
model=src_dict)
mcspec_file = os.path.join(workdir,
"mcspec_%s_%06i.yaml" % (mcube_file, args.seed))
utils.write_yaml(mc_spec_dict, mcspec_file)
else:
injected_name = None
gta.write_roi('sim_baseline_%06i' % args.seed)
test_sources = []
for profile in args.profiles:
profile_path = os.path.join(workdir, 'profile_%s.yaml' % profile)
test_source = load_yaml(profile_path)
test_sources.append(test_source)
first = args.seed
last = first + args.nsims
for seed in range(first, last):
self._run_simulation(gta, args.roi_baseline,
injected_name, test_sources, first, seed,
non_null_src=args.non_null_src) | Run this analysis | entailment |
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)
if targets_yaml is None:
return job_configs
config_yaml = 'config.yaml'
config_override = args.get('config')
if is_not_null(config_override):
config_yaml = config_override
rand_yaml = NAME_FACTORY.resolve_randconfig(args)
targets = load_yaml(targets_yaml)
base_config = dict(rand_config=rand_yaml)
for target_name in targets.keys():
name_keys = dict(target_type=ttype,
target_name=target_name,
sim_name=sim,
fullpath=True)
simdir = NAME_FACTORY.sim_targetdir(**name_keys)
config_path = os.path.join(simdir, config_yaml)
outfile = os.path.join(simdir, 'skydirs.yaml')
logfile = make_nfs_path(outfile.replace('yaml', 'log'))
job_config = base_config.copy()
job_config.update(dict(config=config_path,
outfile=outfile,
logfile=logfile))
job_configs[target_name] = job_config
return job_configs | Hook to build job configurations | entailment |
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)
if targets_yaml is None:
return job_configs
config_yaml = 'config.yaml'
config_override = args.get('config')
if is_not_null(config_override):
config_yaml = config_override
targets = load_yaml(targets_yaml)
nsims_job = args['nsims_job']
first_seed = args['seed']
nsims = args['nsims']
last_seed = first_seed + nsims
base_config = dict(sim_profile=args['sim_profile'],
roi_baseline=args['roi_baseline'],
non_null_src=args['non_null_src'],
sim=sim)
for target_name, target_list in targets.items():
name_keys = dict(target_type=ttype,
target_name=target_name,
sim_name=sim,
fullpath=True)
simdir = NAME_FACTORY.sim_targetdir(**name_keys)
config_path = os.path.join(simdir, config_yaml)
job_config = base_config.copy()
job_config.update(dict(config=config_path,
profiles=target_list))
current_seed = first_seed
while current_seed < last_seed:
fullkey = "%s_%06i" % (target_name, current_seed)
logfile = make_nfs_path(os.path.join(simdir, "%s_%s_%06i.log" % (self.linkname,
target_name, current_seed)))
if nsims_job <= 0 or current_seed + nsims_job >= last_seed:
nsims_current = last_seed - current_seed
else:
nsims_current = nsims_job
job_config.update(dict(seed=current_seed,
nsims=nsims_current,
logfile=logfile))
job_configs[fullkey] = job_config.copy()
current_seed += nsims_current
return job_configs | Hook to build job configurations | entailment |
def get_branches(aliases):
"""Get unique branch names from an alias dictionary."""
ignore = ['pow', 'log10', 'sqrt', 'max']
branches = []
for k, v in aliases.items():
tokens = re.sub('[\(\)\+\*\/\,\=\<\>\&\!\-\|]', ' ', v).split()
for t in tokens:
if bool(re.search(r'^\d', t)) or len(t) <= 3:
continue
if bool(re.search(r'[a-zA-Z]', t)) and t not in ignore:
branches += [t]
return list(set(branches)) | Get unique branch names from an alias dictionary. | entailment |
def load_friend_chains(chain, friend_chains, txt, nfiles=None):
"""Load a list of trees from a file and add them as friends to the
chain."""
if re.search('.root?', txt) is not None:
c = ROOT.TChain(chain.GetName())
c.SetDirectory(0)
c.Add(txt)
friend_chains.append(c)
chain.AddFriend(c, rand_str())
return
files = np.loadtxt(txt, unpack=True, dtype='str')
if files.ndim == 0:
files = np.array([files])
if nfiles is not None:
files = files[:nfiles]
print("Loading %i files..." % len(files))
c = ROOT.TChain(chain.GetName())
c.SetDirectory(0)
for f in files:
c.Add(f)
friend_chains.append(c)
chain.AddFriend(c, rand_str())
return | Load a list of trees from a file and add them as friends to the
chain. | entailment |
def find_and_read_ebins(hdulist):
""" Reads and returns the energy bin edges.
This works for both the CASE where the energies are in the ENERGIES HDU
and the case where they are in the EBOUND HDU
"""
from fermipy import utils
ebins = None
if 'ENERGIES' in hdulist:
hdu = hdulist['ENERGIES']
ectr = hdu.data.field(hdu.columns[0].name)
ebins = np.exp(utils.center_to_edge(np.log(ectr)))
elif 'EBOUNDS' in hdulist:
hdu = hdulist['EBOUNDS']
emin = hdu.data.field('E_MIN') / 1E3
emax = hdu.data.field('E_MAX') / 1E3
ebins = np.append(emin, emax[-1])
return ebins | Reads and returns the energy bin edges.
This works for both the CASE where the energies are in the ENERGIES HDU
and the case where they are in the EBOUND HDU | entailment |
def read_energy_bounds(hdu):
""" Reads and returns the energy bin edges from a FITs HDU
"""
nebins = len(hdu.data)
ebin_edges = np.ndarray((nebins + 1))
try:
ebin_edges[0:-1] = np.log10(hdu.data.field("E_MIN")) - 3.
ebin_edges[-1] = np.log10(hdu.data.field("E_MAX")[-1]) - 3.
except KeyError:
ebin_edges[0:-1] = np.log10(hdu.data.field("energy_MIN"))
ebin_edges[-1] = np.log10(hdu.data.field("energy_MAX")[-1])
return ebin_edges | Reads and returns the energy bin edges from a FITs HDU | entailment |
def read_spectral_data(hdu):
""" Reads and returns the energy bin edges, fluxes and npreds from
a FITs HDU
"""
ebins = read_energy_bounds(hdu)
fluxes = np.ndarray((len(ebins)))
try:
fluxes[0:-1] = hdu.data.field("E_MIN_FL")
fluxes[-1] = hdu.data.field("E_MAX_FL")[-1]
npreds = hdu.data.field("NPRED")
except:
fluxes = np.ones((len(ebins)))
npreds = np.ones((len(ebins)))
return ebins, fluxes, npreds | Reads and returns the energy bin edges, fluxes and npreds from
a FITs HDU | entailment |
def make_energies_hdu(energy_vals, extname="ENERGIES"):
""" Builds and returns a FITs HDU with the energy values
extname : The HDU extension name
"""
cols = [fits.Column("Energy", "D", unit='MeV', array=energy_vals)]
hdu = fits.BinTableHDU.from_columns(cols, name=extname)
return hdu | Builds and returns a FITs HDU with the energy values
extname : The HDU extension name | entailment |
def read_projection_from_fits(fitsfile, extname=None):
"""
Load a WCS or HPX projection.
"""
f = fits.open(fitsfile)
nhdu = len(f)
# Try and get the energy bounds
try:
ebins = find_and_read_ebins(f)
except:
ebins = None
if extname is None:
# If there is an image in the Primary HDU we can return a WCS-based
# projection
if f[0].header['NAXIS'] != 0:
proj = WCS(f[0].header)
return proj, f, f[0]
else:
if f[extname].header['XTENSION'] == 'IMAGE':
proj = WCS(f[extname].header)
return proj, f, f[extname]
elif extname in ['SKYMAP', 'SKYMAP2']:
proj = HPX.create_from_hdu(f[extname], ebins)
return proj, f, f[extname]
elif f[extname].header['XTENSION'] == 'BINTABLE':
try:
if f[extname].header['PIXTYPE'] == 'HEALPIX':
proj = HPX.create_from_hdu(f[extname], ebins)
return proj, f, f[extname]
except:
pass
return None, f, None
# Loop on HDU and look for either an image or a table with HEALPix data
for i in range(1, nhdu):
# if there is an image we can return a WCS-based projection
if f[i].header['XTENSION'] == 'IMAGE':
proj = WCS(f[i].header)
return proj, f, f[i]
elif f[i].header['XTENSION'] == 'BINTABLE':
if f[i].name in ['SKYMAP', 'SKYMAP2']:
proj = HPX.create_from_hdu(f[i], ebins)
return proj, f, f[i]
try:
if f[i].header['PIXTYPE'] == 'HEALPIX':
proj = HPX.create_from_hdu(f[i], ebins)
return proj, f, f[i]
except:
pass
return None, f, None | Load a WCS or HPX projection. | entailment |
def write_tables_to_fits(filepath, tablelist, clobber=False,
namelist=None, cardslist=None, hdu_list=None):
"""
Write some astropy.table.Table objects to a single fits file
"""
outhdulist = [fits.PrimaryHDU()]
rmlist = []
for i, table in enumerate(tablelist):
ft_name = "%s._%i" % (filepath, i)
rmlist.append(ft_name)
try:
os.unlink(ft_name)
except:
pass
table.write(ft_name, format="fits")
ft_in = fits.open(ft_name)
if namelist:
ft_in[1].name = namelist[i]
if cardslist:
for k, v in cardslist[i].items():
ft_in[1].header[k] = v
ft_in[1].update()
outhdulist += [ft_in[1]]
if hdu_list is not None:
for h in hdu_list:
outhdulist.append(h)
fits.HDUList(outhdulist).writeto(filepath, overwrite=clobber)
for rm in rmlist:
os.unlink(rm) | Write some astropy.table.Table objects to a single fits file | entailment |
def update_docstring(docstring, options_dict):
"""Update a method docstring by inserting option docstrings defined in
the options dictionary. The input docstring should define `{options}`
at the location where the options docstring block should be inserted.
Parameters
----------
docstring : str
Existing method docstring.
options_dict : dict
Dictionary defining the options that will be appended to the
method docstring. Dictionary keys are mapped to option names
and each element of the dictionary should have the format
(default value, docstring, type).
Returns
-------
docstring : str
Updated method docstring.
"""
options_str = []
for i, (k, v) in enumerate(sorted(options_dict.items())):
option_str = ''
if i == 0:
option_str += '%s : %s\n' % (k, v[2].__name__)
else:
option_str += ' ' * 8 + '%s : %s\n' % (k, v[2].__name__)
option_doc = v[1]
option_doc += ' (default : %s)' % v[0]
option_doc = textwrap.wrap(option_doc, 72 - 12)
option_str += ' ' * 12 + ('\n' + ' ' * 12).join(option_doc)
options_str += [option_str]
options_str = '\n\n'.join(options_str)
return docstring.format(options=options_str) | Update a method docstring by inserting option docstrings defined in
the options dictionary. The input docstring should define `{options}`
at the location where the options docstring block should be inserted.
Parameters
----------
docstring : str
Existing method docstring.
options_dict : dict
Dictionary defining the options that will be appended to the
method docstring. Dictionary keys are mapped to option names
and each element of the dictionary should have the format
(default value, docstring, type).
Returns
-------
docstring : str
Updated method docstring. | entailment |
def convolve_map(m, k, cpix, threshold=0.001, imin=0, imax=None, wmap=None):
"""
Perform an energy-dependent convolution on a sequence of 2-D spatial maps.
Parameters
----------
m : `~numpy.ndarray`
3-D map containing a sequence of 2-D spatial maps. First
dimension should be energy.
k : `~numpy.ndarray`
3-D map containing a sequence of convolution kernels (PSF) for
each slice in m. This map should have the same dimension as m.
cpix : list
Indices of kernel reference pixel in the two spatial dimensions.
threshold : float
Kernel amplitude
imin : int
Minimum index in energy dimension.
imax : int
Maximum index in energy dimension.
wmap : `~numpy.ndarray`
3-D map containing a sequence of 2-D spatial maps of weights. First
dimension should be energy. This map should have the same dimension as m.
"""
islice = slice(imin, imax)
o = np.zeros(m[islice, ...].shape)
ix = int(cpix[0])
iy = int(cpix[1])
# Loop over energy
for i in range(m[islice, ...].shape[0]):
ks = k[islice, ...][i, ...]
ms = m[islice, ...][i, ...]
mx = ks[ix, :] > ks[ix, iy] * threshold
my = ks[:, iy] > ks[ix, iy] * threshold
nx = int(max(3, np.round(np.sum(mx) / 2.)))
ny = int(max(3, np.round(np.sum(my) / 2.)))
# Ensure that there is an odd number of pixels in the kernel
# array
if ix + nx + 1 >= ms.shape[0] or ix - nx < 0:
nx -= 1
ny -= 1
sx = slice(ix - nx, ix + nx + 1)
sy = slice(iy - ny, iy + ny + 1)
ks = ks[sx, sy]
# origin = [0, 0]
# if ks.shape[0] % 2 == 0: origin[0] += 1
# if ks.shape[1] % 2 == 0: origin[1] += 1
# o[i,...] = ndimage.convolve(ms, ks, mode='constant',
# origin=origin, cval=0.0)
o[i, ...] = scipy.signal.fftconvolve(ms, ks, mode='same')
if wmap is not None:
o[i, ...] *= wmap[islice, ...][i, ...]
return o | Perform an energy-dependent convolution on a sequence of 2-D spatial maps.
Parameters
----------
m : `~numpy.ndarray`
3-D map containing a sequence of 2-D spatial maps. First
dimension should be energy.
k : `~numpy.ndarray`
3-D map containing a sequence of convolution kernels (PSF) for
each slice in m. This map should have the same dimension as m.
cpix : list
Indices of kernel reference pixel in the two spatial dimensions.
threshold : float
Kernel amplitude
imin : int
Minimum index in energy dimension.
imax : int
Maximum index in energy dimension.
wmap : `~numpy.ndarray`
3-D map containing a sequence of 2-D spatial maps of weights. First
dimension should be energy. This map should have the same dimension as m. | entailment |
def convolve_map_hpx_gauss(m, sigmas, imin=0, imax=None, wmap=None):
"""
Perform an energy-dependent convolution on a sequence of 2-D spatial maps.
Parameters
----------
m : `HpxMap`
2-D map containing a sequence of 1-D HEALPix maps. First
dimension should be energy.
sigmas : `~numpy.ndarray`
1-D map containing a sequence gaussian widths for smoothing
imin : int
Minimum index in energy dimension.
imax : int
Maximum index in energy dimension.
wmap : `~numpy.ndarray`
2-D map containing a sequence of 1-D HEALPix maps of weights. First
dimension should be energy. This map should have the same dimension as m.
"""
islice = slice(imin, imax)
o = np.zeros(m.data.shape)
nside = m.geom.nside
nest = m.geom.nest
# Loop over energy
for i, ms in enumerate(m.data[islice, ...]):
sigma = sigmas[islice][i]
# Need to be in RING scheme
if nest:
ms = hp.pixelfunc.reorder(ms, n2r=True)
o[islice, ...][i] = hp.sphtfunc.smoothing(ms, sigma=sigma)
if nest:
o[islice, ...][i] = hp.pixelfunc.reorder(
o[islice, ...][i], r2n=True)
if wmap is not None:
o[islice, ...][i] *= wmap.data[islice, ...][i]
return HpxNDMap(m.geom, o) | Perform an energy-dependent convolution on a sequence of 2-D spatial maps.
Parameters
----------
m : `HpxMap`
2-D map containing a sequence of 1-D HEALPix maps. First
dimension should be energy.
sigmas : `~numpy.ndarray`
1-D map containing a sequence gaussian widths for smoothing
imin : int
Minimum index in energy dimension.
imax : int
Maximum index in energy dimension.
wmap : `~numpy.ndarray`
2-D map containing a sequence of 1-D HEALPix maps of weights. First
dimension should be energy. This map should have the same dimension as m. | entailment |
def get_source_kernel(gta, name, kernel=None):
"""Get the PDF for the given source."""
sm = []
zs = 0
for c in gta.components:
z = c.model_counts_map(name).data.astype('float')
if kernel is not None:
shape = (z.shape[0],) + kernel.shape
z = np.apply_over_axes(np.sum, z, axes=[1, 2]) * np.ones(
shape) * kernel[np.newaxis, :, :]
zs += np.sum(z)
else:
zs += np.sum(z)
sm.append(z)
sm2 = 0
for i, m in enumerate(sm):
sm[i] /= zs
sm2 += np.sum(sm[i] ** 2)
for i, m in enumerate(sm):
sm[i] /= sm2
return sm | Get the PDF for the given source. | entailment |
def residmap(self, prefix='', **kwargs):
"""Generate 2-D spatial residual maps using the current ROI
model and the convolution kernel defined with the `model`
argument.
Parameters
----------
prefix : str
String that will be prefixed to the output residual map files.
{options}
Returns
-------
maps : dict
A dictionary containing the `~fermipy.utils.Map` objects
for the residual significance and amplitude.
"""
timer = Timer.create(start=True)
self.logger.info('Generating residual maps')
schema = ConfigSchema(self.defaults['residmap'])
config = schema.create_config(self.config['residmap'], **kwargs)
# Defining default properties of test source model
config['model'].setdefault('Index', 2.0)
config['model'].setdefault('SpectrumType', 'PowerLaw')
config['model'].setdefault('SpatialModel', 'PointSource')
config['model'].setdefault('Prefactor', 1E-13)
o = self._make_residual_map(prefix, **config)
if config['make_plots']:
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.make_residmap_plots(o, self.roi)
self.logger.info('Finished residual maps')
outfile = utils.format_filename(self.workdir, 'residmap',
prefix=[o['name']])
if config['write_fits']:
o['file'] = os.path.basename(outfile) + '.fits'
self._make_residmap_fits(o, outfile + '.fits')
if config['write_npy']:
np.save(outfile + '.npy', o)
self.logger.info('Execution time: %.2f s', timer.elapsed_time)
return o | Generate 2-D spatial residual maps using the current ROI
model and the convolution kernel defined with the `model`
argument.
Parameters
----------
prefix : str
String that will be prefixed to the output residual map files.
{options}
Returns
-------
maps : dict
A dictionary containing the `~fermipy.utils.Map` objects
for the residual significance and amplitude. | entailment |
def create(appname, **kwargs):
"""Create a `Link` of a particular class, using the kwargs as options"""
if appname in LinkFactory._class_dict:
return LinkFactory._class_dict[appname].create(**kwargs)
else:
raise KeyError(
"Could not create object associated to app %s" % appname) | Create a `Link` of a particular class, using the kwargs as options | entailment |
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
comp_file = args.get('comp', None)
datafile = args.get('data', None)
if is_null(comp_file):
return
if is_null(datafile):
return
NAME_FACTORY.update_base_dict(datafile)
outdir = args.get('outdir', None)
outkey = args.get('outkey', None)
ft1file = args['ft1file']
if is_null(outdir) or is_null(outkey):
return
pfiles = os.path.join(outdir, outkey)
self.comp_dict = yaml.safe_load(open(comp_file))
coordsys = self.comp_dict.pop('coordsys')
full_out_dir = make_nfs_path(os.path.join(outdir, outkey))
for key_e, comp_e in sorted(self.comp_dict.items()):
emin = math.pow(10., comp_e['log_emin'])
emax = math.pow(10., comp_e['log_emax'])
enumbins = comp_e['enumbins']
zmax = comp_e['zmax']
zcut = "zmax%i" % comp_e['zmax']
evclassstr = NAME_FACTORY.base_dict['evclass']
kwargs_select = dict(zcut=zcut,
ebin=key_e,
psftype='ALL',
coordsys=coordsys,
mktime='none')
selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select))
linkname = 'select-energy-%s-%s' % (key_e, zcut)
self._set_link(linkname, Gtlink_select,
infile=ft1file,
outfile=selectfile_energy,
zmax=zmax,
emin=emin,
emax=emax,
evclass=NAME_FACTORY.evclassmask(evclassstr),
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname))
if 'evtclasses' in comp_e:
evtclasslist_vals = comp_e['evtclasses']
else:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
for evtclassval in evtclasslist_vals:
for psf_type, psf_dict in sorted(comp_e['psf_types'].items()):
linkname_select = 'select-type-%s-%s-%s-%s' % (
key_e, zcut, evtclassval, psf_type)
linkname_bin = 'bin-%s-%s-%s-%s' % (key_e, zcut, evtclassval, psf_type)
hpx_order = psf_dict['hpx_order']
kwargs_bin = kwargs_select.copy()
kwargs_bin['psftype'] = psf_type
selectfile_psf = make_full_path(
outdir, outkey, NAME_FACTORY.select(**kwargs_bin))
binfile = make_full_path(outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin))
self._set_link(linkname_select, Gtlink_select,
infile=selectfile_energy,
outfile=selectfile_psf,
zmax=zmax,
emin=emin,
emax=emax,
evtype=EVT_TYPE_DICT[psf_type],
evclass=NAME_FACTORY.evclassmask(evtclassval),
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_select))
self._set_link(linkname_bin, Gtlink_bin,
coordsys=coordsys,
hpx_order=hpx_order,
evfile=selectfile_psf,
outfile=binfile,
emin=emin,
emax=emax,
enumbins=enumbins,
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_bin)) | Map from the top-level arguments to the arguments provided to
the indiviudal links | entailment |
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
ft1file = args.get('ft1file')
scratch = args.get('scratch', None)
dry_run = args.get('dry_run', None)
self._set_link('split-and-bin', SplitAndBin_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_ccube', 9),
ft1file=ft1file,
scratch=scratch,
dry_run=dry_run)
self._set_link('coadd-split', CoaddSplit_SG,
comp=comp, data=data,
ft1file=ft1file)
self._set_link('expcube2', Gtexpcube2_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_expcube', 5),
dry_run=dry_run) | Map from the top-level arguments to the arguments provided to
the indiviudal links | entailment |
def _replace_none(self, aDict):
""" Replace all None values in a dict with 'none' """
for k, v in aDict.items():
if v is None:
aDict[k] = 'none' | Replace all None values in a dict with 'none' | entailment |
def irfs(self, **kwargs):
""" Get the name of IFRs associted with a particular dataset
"""
dsval = kwargs.get('dataset', self.dataset(**kwargs))
tokens = dsval.split('_')
irf_name = "%s_%s_%s" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])],
EVCLASS_NAME_DICTIONARY[tokens[3]],
kwargs.get('irf_ver'))
return irf_name | Get the name of IFRs associted with a particular dataset | entailment |
def dataset(self, **kwargs):
""" Return a key that specifies the data selection
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.dataset_format.format(**kwargs_copy)
except KeyError:
return None | Return a key that specifies the data selection | entailment |
def component(self, **kwargs):
""" Return a key that specifies data the sub-selection
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.component_format.format(**kwargs_copy)
except KeyError:
return None | Return a key that specifies data the sub-selection | entailment |
def sourcekey(self, **kwargs):
""" Return a key that specifies the name and version of a source or component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.sourcekey_format.format(**kwargs_copy)
except KeyError:
return None | Return a key that specifies the name and version of a source or component | entailment |
def galprop_ringkey(self, **kwargs):
""" return the sourcekey for galprop input maps : specifies the component and ring
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.galprop_ringkey_format.format(**kwargs_copy)
except KeyError:
return None | return the sourcekey for galprop input maps : specifies the component and ring | entailment |
def galprop_sourcekey(self, **kwargs):
""" return the sourcekey for merged galprop maps :
specifies the merged component and merging scheme
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.galprop_sourcekey_format.format(**kwargs_copy)
except KeyError:
return None | return the sourcekey for merged galprop maps :
specifies the merged component and merging scheme | entailment |
def merged_sourcekey(self, **kwargs):
""" return the sourcekey for merged sets of point sources :
specifies the catalog and merging rule
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.merged_sourcekey_format.format(**kwargs_copy)
except KeyError:
return None | return the sourcekey for merged sets of point sources :
specifies the catalog and merging rule | entailment |
def galprop_gasmap(self, **kwargs):
""" return the file name for Galprop input gasmaps
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.galprop_gasmap_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for Galprop input gasmaps | entailment |
def merged_gasmap(self, **kwargs):
""" return the file name for Galprop merged gasmaps
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for Galprop merged gasmaps | entailment |
def diffuse_template(self, **kwargs):
""" return the file name for other diffuse map templates
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.diffuse_template_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for other diffuse map templates | entailment |
def spectral_template(self, **kwargs):
""" return the file name for spectral templates
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
localpath = NameFactory.spectral_template_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for spectral templates | entailment |
def srcmdl_xml(self, **kwargs):
""" return the file name for source model xml files
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for source model xml files | entailment |
def nested_srcmdl_xml(self, **kwargs):
""" return the file name for source model xml files of nested sources
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.nested_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for source model xml files of nested sources | entailment |
def ft1file(self, **kwargs):
""" return the name of the input ft1 file list
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ft1file_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of the input ft1 file list | entailment |
def ft2file(self, **kwargs):
""" return the name of the input ft2 file list
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['data_time'] = kwargs.get(
'data_time', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ft2file_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of the input ft2 file list | entailment |
def ltcube(self, **kwargs):
""" return the name of a livetime cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
localpath = NameFactory.ltcube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a livetime cube file | entailment |
def select(self, **kwargs):
""" return the name of a selected events ft1file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.select_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a selected events ft1file | entailment |
def mktime(self, **kwargs):
""" return the name of a selected events ft1file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.mktime_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a selected events ft1file | entailment |
def ccube(self, **kwargs):
""" return the name of a counts cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
localpath = NameFactory.ccube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a counts cube file | entailment |
def bexpcube(self, **kwargs):
""" return the name of a binned exposure cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.bexpcube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a binned exposure cube file | entailment |
def srcmaps(self, **kwargs):
""" return the name of a source map file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.srcmaps_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a source map file | entailment |
def mcube(self, **kwargs):
""" return the name of a model cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.mcube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a model cube file | entailment |
def ltcube_sun(self, **kwargs):
""" return the name of a livetime cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ltcubesun_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a livetime cube file | entailment |
def ltcube_moon(self, **kwargs):
""" return the name of a livetime cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ltcubemoon_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a livetime cube file | entailment |
def bexpcube_sun(self, **kwargs):
""" return the name of a binned exposure cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.bexpcubesun_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a binned exposure cube file | entailment |
def bexpcube_moon(self, **kwargs):
""" return the name of a binned exposure cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.bexpcubemoon_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a binned exposure cube file | entailment |
def angprofile(self, **kwargs):
""" return the file name for sun or moon angular profiles
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.angprofile_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for sun or moon angular profiles | entailment |
def template_sunmoon(self, **kwargs):
""" return the file name for sun or moon template files
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for sun or moon template files | entailment |
def residual_cr(self, **kwargs):
"""Return the name of the residual CR analysis output files"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
localpath = NameFactory.residual_cr_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | Return the name of the residual CR analysis output files | entailment |
def galprop_rings_yaml(self, **kwargs):
""" return the name of a galprop rings merging yaml file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.galprop_rings_yaml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a galprop rings merging yaml file | entailment |
def catalog_split_yaml(self, **kwargs):
""" return the name of a catalog split yaml file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.catalog_split_yaml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a catalog split yaml file | entailment |
def model_yaml(self, **kwargs):
""" return the name of a model yaml file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.model_yaml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a model yaml file | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.