sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def build_from_yamlfile(yamlfile):
""" Build a list of components from a yaml file
"""
d = yaml.load(open(yamlfile))
return MktimeFilterDict(d['aliases'], d['selections'])
|
Build a list of components from a yaml file
|
entailment
|
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if not HAVE_ST:
raise RuntimeError(
"Trying to run fermipy analysis, but don't have ST")
if is_not_null(args.roi_baseline):
gta = GTAnalysis.create(args.roi_baseline, args.config)
else:
gta = GTAnalysis(args.config,
logging={'verbosity': 3},
fileio={'workdir_regex': '\.xml$|\.npy$'})
gta.print_roi()
test_source = args.target
gta.sed(test_source, outfile='sed_%s.fits' % 'FL8Y', make_plots=True)
gta.extension(test_source, make_plots=True)
return gta
|
Run this analysis
|
entailment
|
def collect_jobs(dirs, runscript, overwrite=False, max_job_age=90):
"""Construct a list of job dictionaries."""
jobs = []
for dirname in sorted(dirs):
o = dict(cfgfile=os.path.join(dirname, 'config.yaml'),
logfile=os.path.join(
dirname, os.path.splitext(runscript)[0] + '.log'),
runscript=os.path.join(dirname, runscript))
if not os.path.isfile(o['cfgfile']):
continue
if not os.path.isfile(o['runscript']):
continue
if not os.path.isfile(o['logfile']):
jobs.append(o)
continue
age = file_age_in_seconds(o['logfile']) / 60.
job_status = check_log(o['logfile'])
print(dirname, job_status, age)
if job_status is False or overwrite:
jobs.append(o)
elif job_status == 'Exited':
print("Job Exited. Resending command.")
jobs.append(o)
elif job_status == 'None' and age > max_job_age:
print(
"Job did not exit, but no activity on log file for > %.2f min. Resending command." % max_job_age)
jobs.append(o)
# elif job_status is True:
# print("Job Completed. Resending command.")
# jobs.append(o)
return jobs
|
Construct a list of job dictionaries.
|
entailment
|
def make_srcmap_old(psf, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,
cdelt=0.01, rebin=1, psf_scale_fn=None):
"""Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
rebin : int
Factor by which the original map will be oversampled in the
spatial dimension when computing the model.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
"""
if rebin > 1:
npix = npix * rebin
xpix = xpix * rebin + (rebin - 1.0) / 2.
ypix = ypix * rebin + (rebin - 1.0) / 2.
cdelt = cdelt / rebin
if spatial_model == 'RadialGaussian':
k = utils.make_cgauss_kernel(psf, sigma, npix, cdelt,
xpix, ypix, psf_scale_fn)
elif spatial_model == 'RadialDisk':
k = utils.make_cdisk_kernel(psf, sigma, npix, cdelt,
xpix, ypix, psf_scale_fn)
elif spatial_model == 'PointSource':
k = utils.make_psf_kernel(psf, npix, cdelt,
xpix, ypix, psf_scale_fn)
else:
raise Exception('Unsupported spatial model: %s', spatial_model)
if rebin > 1:
k = utils.sum_bins(k, 1, rebin)
k = utils.sum_bins(k, 2, rebin)
k *= psf.exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2
return k
|
Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
rebin : int
Factor by which the original map will be oversampled in the
spatial dimension when computing the model.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
|
entailment
|
def make_srcmap(psf, exp, spatial_model, sigma, npix=500, xpix=0.0, ypix=0.0,
cdelt=0.01, psf_scale_fn=None, klims=None, sparse=False):
"""Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
exp : `~numpy.ndarray`
Array of exposures.
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
klims : tuple
Indices of lower and upper range of energy.
sparse : bool
Skip pixels in which the source amplitude is small.
"""
if spatial_model == 'RadialGaussian':
k = utils.make_radial_kernel(psf, utils.convolve2d_gauss,
sigma / 1.5095921854516636, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
elif spatial_model == 'RadialDisk':
k = utils.make_radial_kernel(psf, utils.convolve2d_disk,
sigma / 0.8246211251235321, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
elif spatial_model == 'PointSource':
k = utils.make_radial_kernel(psf, None, None, npix, cdelt,
xpix, ypix, psf_scale_fn, klims=klims,
sparse=sparse)
else:
raise Exception('Unsupported spatial model: %s', spatial_model)
if klims is not None:
exp = exp[klims[0]:klims[1] + 1, ...]
k *= exp[:, np.newaxis, np.newaxis] * np.radians(cdelt) ** 2
return k
|
Compute the source map for a given spatial model.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
exp : `~numpy.ndarray`
Array of exposures.
spatial_model : str
Spatial model.
sigma : float
Spatial size parameter for extended models.
xpix : float
Source position in pixel coordinates in X dimension.
ypix : float
Source position in pixel coordinates in Y dimension.
psf_scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
klims : tuple
Indices of lower and upper range of energy.
sparse : bool
Skip pixels in which the source amplitude is small.
|
entailment
|
def delete_source_map(srcmap_file, names, logger=None):
"""Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted.
"""
with fits.open(srcmap_file) as hdulist:
hdunames = [hdu.name.upper() for hdu in hdulist]
if not isinstance(names, list):
names = [names]
for name in names:
if not name.upper() in hdunames:
continue
del hdulist[name.upper()]
hdulist.writeto(srcmap_file, overwrite=True)
|
Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted.
|
entailment
|
def get_offsets(self, pix):
"""Get offset of the first pixel in each dimension in the
global coordinate system.
Parameters
----------
pix : `~numpy.ndarray`
Pixel coordinates in global coordinate system.
"""
idx = []
for i in range(self.ndim):
if i == 0:
idx += [0]
else:
npix1 = int(self.shape[i])
pix0 = int(pix[i - 1]) - npix1 // 2
idx += [pix0]
return idx
|
Get offset of the first pixel in each dimension in the
global coordinate system.
Parameters
----------
pix : `~numpy.ndarray`
Pixel coordinates in global coordinate system.
|
entailment
|
def shift_to_coords(self, pix, fill_value=np.nan):
"""Create a new map that is shifted to the pixel coordinates
``pix``."""
pix_offset = self.get_offsets(pix)
dpix = np.zeros(len(self.shape) - 1)
for i in range(len(self.shape) - 1):
x = self.rebin * (pix[i] - pix_offset[i + 1]
) + (self.rebin - 1.0) / 2.
dpix[i] = x - self._pix_ref[i]
pos = [pix_offset[i] + self.shape[i] // 2
for i in range(self.data.ndim)]
s0, s1 = utils.overlap_slices(self.shape_out, self.shape, pos)
k = np.zeros(self.data.shape)
for i in range(k.shape[0]):
k[i] = shift(self._data_spline[i], dpix, cval=np.nan,
order=2, prefilter=False)
for i in range(1, len(self.shape)):
k = utils.sum_bins(k, i, self.rebin)
k0 = np.ones(self.shape_out) * fill_value
if k[s1].size == 0 or k0[s0].size == 0:
return k0
k0[s0] = k[s1]
return k0
|
Create a new map that is shifted to the pixel coordinates
``pix``.
|
entailment
|
def create_map(self, pix):
"""Create a new map with reference pixel coordinates shifted
to the pixel coordinates ``pix``.
Parameters
----------
pix : `~numpy.ndarray`
Reference pixel of new map.
Returns
-------
out_map : `~numpy.ndarray`
The shifted map.
"""
k0 = self._m0.shift_to_coords(pix)
k1 = self._m1.shift_to_coords(pix)
k0[np.isfinite(k1)] = k1[np.isfinite(k1)]
k0[~np.isfinite(k0)] = 0
return k0
|
Create a new map with reference pixel coordinates shifted
to the pixel coordinates ``pix``.
Parameters
----------
pix : `~numpy.ndarray`
Reference pixel of new map.
Returns
-------
out_map : `~numpy.ndarray`
The shifted map.
|
entailment
|
def render_pep440(vcs):
"""Convert git release tag into a form that is PEP440 compliant."""
if vcs is None:
return None
tags = vcs.split('-')
# Bare version number
if len(tags) == 1:
return tags[0]
else:
return tags[0] + '+' + '.'.join(tags[1:])
|
Convert git release tag into a form that is PEP440 compliant.
|
entailment
|
def read_release_version():
"""Read the release version from ``_version.py``."""
import re
dirname = os.path.abspath(os.path.dirname(__file__))
try:
f = open(os.path.join(dirname, "_version.py"), "rt")
for line in f.readlines():
m = re.match("__version__ = '([^']+)'", line)
if m:
ver = m.group(1)
return ver
except:
return None
return None
|
Read the release version from ``_version.py``.
|
entailment
|
def write_release_version(version):
"""Write the release version to ``_version.py``."""
dirname = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(dirname, "_version.py"), "wt")
f.write("__version__ = '%s'\n" % version)
f.close()
|
Write the release version to ``_version.py``.
|
entailment
|
def make_full_path(basedir, outkey, origname):
"""Make a full file path by combining tokens
Parameters
-----------
basedir : str
The top level output area
outkey : str
The key for the particular instance of the analysis
origname : str
Template for the output file name
Returns
-------
outpath : str
This will be <basedir>:<outkey>:<newname>.fits
Where newname = origname.replace('.fits', '_<outkey>.fits')
"""
return os.path.join(basedir, outkey,
os.path.basename(origname).replace('.fits',
'_%s.fits' % outkey))
|
Make a full file path by combining tokens
Parameters
-----------
basedir : str
The top level output area
outkey : str
The key for the particular instance of the analysis
origname : str
Template for the output file name
Returns
-------
outpath : str
This will be <basedir>:<outkey>:<newname>.fits
Where newname = origname.replace('.fits', '_<outkey>.fits')
|
entailment
|
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
comp_file = args.get('comp', None)
datafile = args.get('data', None)
if is_null(comp_file):
return
if is_null(datafile):
return
NAME_FACTORY.update_base_dict(args['data'])
outdir = args.get('outdir')
outkey = args.get('outkey')
ft1file = args['ft1file']
ft2file = args['ft2file']
if is_null(outdir) or is_null(outkey):
return
pfiles = os.path.join(outdir, outkey)
self.comp_dict = yaml.safe_load(open(comp_file))
coordsys = self.comp_dict.pop('coordsys')
full_out_dir = make_nfs_path(os.path.join(outdir, outkey))
for key_e, comp_e in sorted(self.comp_dict.items()):
emin = math.pow(10., comp_e['log_emin'])
emax = math.pow(10., comp_e['log_emax'])
enumbins = comp_e['enumbins']
zmax = comp_e['zmax']
zcut = "zmax%i" % comp_e['zmax']
evclassstr = NAME_FACTORY.base_dict['evclass']
kwargs_select = dict(zcut=zcut,
ebin=key_e,
psftype='ALL',
coordsys=coordsys)
linkname = 'select-energy-%s-%s' % (key_e, zcut)
selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select))
self._set_link(linkname, Gtlink_select,
infile=ft1file,
outfile=selectfile_energy,
zmax=zmax,
emin=emin,
emax=emax,
evclass=NAME_FACTORY.evclassmask(evclassstr),
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname))
if 'mktimefilters' in comp_e:
mktimefilters = comp_e['mktimefilters']
else:
mktimefilters = ['none']
for mktimekey in mktimefilters:
kwargs_mktime = kwargs_select.copy()
kwargs_mktime['mktime'] = mktimekey
filterstring = MKTIME_DICT[mktimekey]
mktime_file = make_full_path(outdir, outkey, NAME_FACTORY.mktime(**kwargs_mktime))
ltcube_file = make_full_path(outdir, outkey, NAME_FACTORY.ltcube(**kwargs_mktime))
linkname_mktime = 'mktime-%s-%s-%s' % (key_e, zcut, mktimekey)
linkname_ltcube = 'ltcube-%s-%s-%s' % (key_e, zcut, mktimekey)
self._set_link(linkname_mktime, Gtlink_mktime,
evfile=selectfile_energy,
outfile=mktime_file,
scfile=ft2file,
filter=filterstring,
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_mktime))
self._set_link(linkname_ltcube, Gtlink_ltcube,
evfile=mktime_file,
outfile=ltcube_file,
scfile=ft2file,
zmax=zmax,
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_ltcube))
if 'evtclasses' in comp_e:
evtclasslist_vals = comp_e['evtclasses']
else:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
for evtclassval in evtclasslist_vals:
for psf_type, psf_dict in sorted(comp_e['psf_types'].items()):
linkname_select = 'select-type-%s-%s-%s-%s-%s' % (
key_e, zcut, mktimekey, evtclassval, psf_type)
linkname_bin = 'bin-%s-%s-%s-%s-%s' % (key_e,
zcut, mktimekey,
evtclassval, psf_type)
kwargs_bin = kwargs_mktime.copy()
kwargs_bin['psftype'] = psf_type
kwargs_bin['coordsys'] = coordsys
kwargs_bin['evclass'] = evtclassval
selectfile_psf = make_full_path(
outdir, outkey, NAME_FACTORY.select(**kwargs_bin))
binfile_psf = make_full_path(
outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin))
hpx_order_psf = min(args['hpx_order_max'], psf_dict['hpx_order'])
linkname_select = 'select-type-%s-%s-%s-%s-%s' % (key_e, zcut,
mktimekey, evtclassval,
psf_type)
linkname_bin = 'bin-%s-%s-%s-%s-%s' % (key_e, zcut, mktimekey,
evtclassval, psf_type)
self._set_link(linkname_select, Gtlink_select,
infile=selectfile_energy,
outfile=selectfile_psf,
zmax=zmax,
emin=emin,
emax=emax,
evtype=EVT_TYPE_DICT[psf_type],
evclass=NAME_FACTORY.evclassmask(evtclassval),
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_select))
self._set_link(linkname_bin, Gtlink_bin,
coordsys=coordsys,
hpx_order=hpx_order_psf,
evfile=selectfile_psf,
outfile=binfile_psf,
emin=emin,
emax=emax,
enumbins=enumbins,
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_bin))
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
comp_file = args.get('comp', None)
if comp_file is not None:
comp_dict = yaml.safe_load(open(comp_file))
coordsys = comp_dict.pop('coordsys')
for v in comp_dict.values():
v['coordsys'] = coordsys
else:
return job_configs
datafile = args['data']
if datafile is None or datafile == 'None':
return job_configs
NAME_FACTORY.update_base_dict(args['data'])
inputfiles = create_inputlist(args['ft1file'])
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
data_ver = NAME_FACTORY.base_dict['data_ver']
for idx, infile in enumerate(inputfiles):
key = "%06i" % idx
key_scfile = "%03i" % (idx + 1)
output_dir = os.path.join(outdir_base, key)
try:
os.mkdir(output_dir)
except OSError:
pass
scfile = args['ft2file'].replace('.lst', '_%s.fits' % key_scfile)
logfile = make_nfs_path(os.path.join(output_dir,
'scatter_mk_%s_%s.log' % (data_ver, key)))
job_configs[key] = comp_dict.copy()
job_configs[key].update(dict(ft1file=infile,
scfile=scfile,
comp=args['comp'],
hpx_order_max=args['hpx_order_max'],
outdir=outdir_base,
outkey=key,
logfile=logfile,
pfiles=output_dir))
return job_configs
|
Hook to build job configurations
|
entailment
|
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
ft1file = args.get('ft1file')
ft2file = args.get('ft2file')
scratch = args.get('scratch', None)
dry_run = args.get('dry_run', None)
self._set_link('split-and-mktime', SplitAndMktime_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_ccube', 9),
ft1file=ft1file,
ft2file=ft2file,
do_ltsum=args.get('do_ltsum', False),
scratch=scratch,
dry_run=dry_run)
self._set_link('coadd-split', CoaddSplit_SG,
comp=comp, data=data,
ft1file=ft1file)
self._set_link('ltsum', Gtltsum_SG,
comp=comp, data=data,
ft1file=args['ft1file'],
dry_run=dry_run)
self._set_link('expcube2', Gtexpcube2_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_expcube', 5),
dry_run=dry_run)
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def init_matplotlib_backend(backend=None):
"""This function initializes the matplotlib backend. When no
DISPLAY is available the backend is automatically set to 'Agg'.
Parameters
----------
backend : str
matplotlib backend name.
"""
import matplotlib
try:
os.environ['DISPLAY']
except KeyError:
matplotlib.use('Agg')
else:
if backend is not None:
matplotlib.use(backend)
|
This function initializes the matplotlib backend. When no
DISPLAY is available the backend is automatically set to 'Agg'.
Parameters
----------
backend : str
matplotlib backend name.
|
entailment
|
def load_data(infile, workdir=None):
"""Load python data structure from either a YAML or numpy file. """
infile = resolve_path(infile, workdir=workdir)
infile, ext = os.path.splitext(infile)
if os.path.isfile(infile + '.npy'):
infile += '.npy'
elif os.path.isfile(infile + '.yaml'):
infile += '.yaml'
else:
raise Exception('Input file does not exist.')
ext = os.path.splitext(infile)[1]
if ext == '.npy':
return infile, load_npy(infile)
elif ext == '.yaml':
return infile, load_yaml(infile)
else:
raise Exception('Unrecognized extension.')
|
Load python data structure from either a YAML or numpy file.
|
entailment
|
def resolve_file_path_list(pathlist, workdir, prefix='',
randomize=False):
"""Resolve the path of each file name in the file ``pathlist`` and
write the updated paths to a new file.
"""
files = []
with open(pathlist, 'r') as f:
files = [line.strip() for line in f]
newfiles = []
for f in files:
f = os.path.expandvars(f)
if os.path.isfile(f):
newfiles += [f]
else:
newfiles += [os.path.join(workdir, f)]
if randomize:
_, tmppath = tempfile.mkstemp(prefix=prefix, dir=workdir)
else:
tmppath = os.path.join(workdir, prefix)
tmppath += '.txt'
with open(tmppath, 'w') as tmpfile:
tmpfile.write("\n".join(newfiles))
return tmppath
|
Resolve the path of each file name in the file ``pathlist`` and
write the updated paths to a new file.
|
entailment
|
def collect_dirs(path, max_depth=1, followlinks=True):
"""Recursively find directories under the given path."""
if not os.path.isdir(path):
return []
o = [path]
if max_depth == 0:
return o
for subdir in os.listdir(path):
subdir = os.path.join(path, subdir)
if not os.path.isdir(subdir):
continue
o += [subdir]
if os.path.islink(subdir) and not followlinks:
continue
if max_depth > 0:
o += collect_dirs(subdir, max_depth=max_depth - 1)
return list(set(o))
|
Recursively find directories under the given path.
|
entailment
|
def match_regex_list(patterns, string):
"""Perform a regex match of a string against a list of patterns.
Returns true if the string matches at least one pattern in the
list."""
for p in patterns:
if re.findall(p, string):
return True
return False
|
Perform a regex match of a string against a list of patterns.
Returns true if the string matches at least one pattern in the
list.
|
entailment
|
def find_rows_by_string(tab, names, colnames=['assoc']):
"""Find the rows in a table ``tab`` that match at least one of the
strings in ``names``. This method ignores whitespace and case
when matching strings.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of strings.
colname : str
Name of the table column that will be searched for matching string.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask for rows with matching strings.
"""
mask = np.empty(len(tab), dtype=bool)
mask.fill(False)
names = [name.lower().replace(' ', '') for name in names]
for colname in colnames:
if colname not in tab.columns:
continue
col = tab[[colname]].copy()
col[colname] = defchararray.replace(defchararray.lower(col[colname]).astype(str),
' ', '')
for name in names:
mask |= col[colname] == name
return mask
|
Find the rows in a table ``tab`` that match at least one of the
strings in ``names``. This method ignores whitespace and case
when matching strings.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of strings.
colname : str
Name of the table column that will be searched for matching string.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask for rows with matching strings.
|
entailment
|
def project(lon0, lat0, lon1, lat1):
"""This function performs a stereographic projection on the unit
vector (lon1,lat1) with the pole defined at the reference unit
vector (lon0,lat0)."""
costh = np.cos(np.pi / 2. - lat0)
cosphi = np.cos(lon0)
sinth = np.sin(np.pi / 2. - lat0)
sinphi = np.sin(lon0)
xyz = lonlat_to_xyz(lon1, lat1)
x1 = xyz[0]
y1 = xyz[1]
z1 = xyz[2]
x1p = x1 * costh * cosphi + y1 * costh * sinphi - z1 * sinth
y1p = -x1 * sinphi + y1 * cosphi
z1p = x1 * sinth * cosphi + y1 * sinth * sinphi + z1 * costh
r = np.arctan2(np.sqrt(x1p ** 2 + y1p ** 2), z1p)
phi = np.arctan2(y1p, x1p)
return r * np.cos(phi), r * np.sin(phi)
|
This function performs a stereographic projection on the unit
vector (lon1,lat1) with the pole defined at the reference unit
vector (lon0,lat0).
|
entailment
|
def separation_cos_angle(lon0, lat0, lon1, lat1):
"""Evaluate the cosine of the angular separation between two
direction vectors."""
return (np.sin(lat1) * np.sin(lat0) + np.cos(lat1) * np.cos(lat0) *
np.cos(lon1 - lon0))
|
Evaluate the cosine of the angular separation between two
direction vectors.
|
entailment
|
def angle_to_cartesian(lon, lat):
"""Convert spherical coordinates to cartesian unit vectors."""
theta = np.array(np.pi / 2. - lat)
return np.vstack((np.sin(theta) * np.cos(lon),
np.sin(theta) * np.sin(lon),
np.cos(theta))).T
|
Convert spherical coordinates to cartesian unit vectors.
|
entailment
|
def create_model_name(src):
"""Generate a name for a source object given its spatial/spectral
properties.
Parameters
----------
src : `~fermipy.roi_model.Source`
A source object.
Returns
-------
name : str
A source name.
"""
o = ''
spatial_type = src['SpatialModel'].lower()
o += spatial_type
if spatial_type == 'gaussian':
o += '_s%04.2f' % src['SpatialWidth']
if src['SpectrumType'] == 'PowerLaw':
o += '_powerlaw_%04.2f' % float(src.spectral_pars['Index']['value'])
else:
o += '_%s' % (src['SpectrumType'].lower())
return o
|
Generate a name for a source object given its spatial/spectral
properties.
Parameters
----------
src : `~fermipy.roi_model.Source`
A source object.
Returns
-------
name : str
A source name.
|
entailment
|
def cov_to_correlation(cov):
"""Compute the correlation matrix given the covariance matrix.
Parameters
----------
cov : `~numpy.ndarray`
N x N matrix of covariances among N parameters.
Returns
-------
corr : `~numpy.ndarray`
N x N matrix of correlations among N parameters.
"""
err = np.sqrt(np.diag(cov))
errinv = np.ones_like(err) * np.nan
m = np.isfinite(err) & (err != 0)
errinv[m] = 1. / err[m]
corr = np.array(cov)
return corr * np.outer(errinv, errinv)
|
Compute the correlation matrix given the covariance matrix.
Parameters
----------
cov : `~numpy.ndarray`
N x N matrix of covariances among N parameters.
Returns
-------
corr : `~numpy.ndarray`
N x N matrix of correlations among N parameters.
|
entailment
|
def ellipse_to_cov(sigma_maj, sigma_min, theta):
"""Compute the covariance matrix in two variables x and y given
the std. deviation along the semi-major and semi-minor axes and
the rotation angle of the error ellipse.
Parameters
----------
sigma_maj : float
Std. deviation along major axis of error ellipse.
sigma_min : float
Std. deviation along minor axis of error ellipse.
theta : float
Rotation angle in radians from x-axis to ellipse major axis.
"""
cth = np.cos(theta)
sth = np.sin(theta)
covxx = cth**2 * sigma_maj**2 + sth**2 * sigma_min**2
covyy = sth**2 * sigma_maj**2 + cth**2 * sigma_min**2
covxy = cth * sth * sigma_maj**2 - cth * sth * sigma_min**2
return np.array([[covxx, covxy], [covxy, covyy]])
|
Compute the covariance matrix in two variables x and y given
the std. deviation along the semi-major and semi-minor axes and
the rotation angle of the error ellipse.
Parameters
----------
sigma_maj : float
Std. deviation along major axis of error ellipse.
sigma_min : float
Std. deviation along minor axis of error ellipse.
theta : float
Rotation angle in radians from x-axis to ellipse major axis.
|
entailment
|
def onesided_cl_to_dlnl(cl):
"""Compute the delta-loglikehood values that corresponds to an
upper limit of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
"""
alpha = 1.0 - cl
return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)
|
Compute the delta-loglikehood values that corresponds to an
upper limit of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
|
entailment
|
def find_function_root(fn, x0, xb, delta=0.0, bounds=None):
"""Find the root of a function: f(x)+delta in the interval encompassed
by x0 and xb.
Parameters
----------
fn : function
Python function.
x0 : float
Fixed bound for the root search. This will either be used as
the lower or upper bound depending on the relative value of xb.
xb : float
Upper or lower bound for the root search. If a root is not
found in the interval [x0,xb]/[xb,x0] this value will be
increased/decreased until a change in sign is found.
"""
if x0 == xb:
return np.nan
for i in range(10):
if np.sign(fn(xb) + delta) != np.sign(fn(x0) + delta):
break
if bounds is not None and (xb < bounds[0] or xb > bounds[1]):
break
if xb < x0:
xb *= 0.5
else:
xb *= 2.0
# Failed to find a root
if np.sign(fn(xb) + delta) == np.sign(fn(x0) + delta):
return np.nan
if x0 == 0:
xtol = 1e-10 * np.abs(xb)
else:
xtol = 1e-10 * np.abs(xb + x0)
return brentq(lambda t: fn(t) + delta, x0, xb, xtol=xtol)
|
Find the root of a function: f(x)+delta in the interval encompassed
by x0 and xb.
Parameters
----------
fn : function
Python function.
x0 : float
Fixed bound for the root search. This will either be used as
the lower or upper bound depending on the relative value of xb.
xb : float
Upper or lower bound for the root search. If a root is not
found in the interval [x0,xb]/[xb,x0] this value will be
increased/decreased until a change in sign is found.
|
entailment
|
def get_parameter_limits(xval, loglike, cl_limit=0.95, cl_err=0.68269, tol=1E-2,
bounds=None):
"""Compute upper/lower limits, peak position, and 1-sigma errors
from a 1-D likelihood function. This function uses the
delta-loglikelihood method to evaluate parameter limits by
searching for the point at which the change in the log-likelihood
value with respect to the maximum equals a specific value. A
cubic spline fit to the log-likelihood values is used to
improve the accuracy of the calculation.
Parameters
----------
xval : `~numpy.ndarray`
Array of parameter values.
loglike : `~numpy.ndarray`
Array of log-likelihood values.
cl_limit : float
Confidence level to use for limit calculation.
cl_err : float
Confidence level to use for two-sided confidence interval
calculation.
tol : float
Absolute precision of likelihood values.
Returns
-------
x0 : float
Coordinate at maximum of likelihood function.
err_lo : float
Lower error for two-sided confidence interval with CL
``cl_err``. Corresponds to point (x < x0) at which the
log-likelihood falls by a given value with respect to the
maximum (0.5 for 1 sigma). Set to nan if the change in the
log-likelihood function at the lower bound of the ``xval``
input array is less than than the value for the given CL.
err_hi : float
Upper error for two-sided confidence interval with CL
``cl_err``. Corresponds to point (x > x0) at which the
log-likelihood falls by a given value with respect to the
maximum (0.5 for 1 sigma). Set to nan if the change in the
log-likelihood function at the upper bound of the ``xval``
input array is less than the value for the given CL.
err : float
Symmetric 1-sigma error. Average of ``err_lo`` and ``err_hi``
if both are defined.
ll : float
Lower limit evaluated at confidence level ``cl_limit``.
ul : float
Upper limit evaluated at confidence level ``cl_limit``.
lnlmax : float
Log-likelihood value at ``x0``.
"""
dlnl_limit = onesided_cl_to_dlnl(cl_limit)
dlnl_err = twosided_cl_to_dlnl(cl_err)
try:
# Pad the likelihood function
# if len(xval) >= 3 and np.max(loglike) - loglike[-1] < 1.5*dlnl_limit:
# p = np.polyfit(xval[-3:], loglike[-3:], 2)
# x = np.linspace(xval[-1], 10 * xval[-1], 3)[1:]
# y = np.polyval(p, x)
# x = np.concatenate((xval, x))
# y = np.concatenate((loglike, y))
# else:
x, y = xval, loglike
spline = UnivariateSpline(x, y, k=2,
#k=min(len(xval) - 1, 3),
w=(1 / tol) * np.ones(len(x)))
except:
print("Failed to create spline: ", xval, loglike)
return {'x0': np.nan, 'ul': np.nan, 'll': np.nan,
'err_lo': np.nan, 'err_hi': np.nan, 'err': np.nan,
'lnlmax': np.nan}
sd = spline.derivative()
imax = np.argmax(loglike)
ilo = max(imax - 1, 0)
ihi = min(imax + 1, len(xval) - 1)
# Find the peak
x0 = xval[imax]
# Refine the peak position
if np.sign(sd(xval[ilo])) != np.sign(sd(xval[ihi])):
x0 = find_function_root(sd, xval[ilo], xval[ihi])
lnlmax = float(spline(x0))
def fn(t): return spline(t) - lnlmax
fn_val = fn(xval)
if np.any(fn_val[imax:] < -dlnl_limit):
xhi = xval[imax:][fn_val[imax:] < -dlnl_limit][0]
else:
xhi = xval[-1]
# EAC: brute force check that xhi is greater than x0
# The fabs is here in case x0 is negative
if xhi <= x0:
xhi = x0 + np.fabs(x0)
if np.any(fn_val[:imax] < -dlnl_limit):
xlo = xval[:imax][fn_val[:imax] < -dlnl_limit][-1]
else:
xlo = xval[0]
# EAC: brute force check that xlo is less than x0
# The fabs is here in case x0 is negative
if xlo >= x0:
xlo = x0 - 0.5*np.fabs(x0)
ul = find_function_root(fn, x0, xhi, dlnl_limit, bounds=bounds)
ll = find_function_root(fn, x0, xlo, dlnl_limit, bounds=bounds)
err_lo = np.abs(x0 - find_function_root(fn, x0, xlo, dlnl_err,
bounds=bounds))
err_hi = np.abs(x0 - find_function_root(fn, x0, xhi, dlnl_err,
bounds=bounds))
err = np.nan
if np.isfinite(err_lo) and np.isfinite(err_hi):
err = 0.5 * (err_lo + err_hi)
elif np.isfinite(err_hi):
err = err_hi
elif np.isfinite(err_lo):
err = err_lo
o = {'x0': x0, 'ul': ul, 'll': ll,
'err_lo': err_lo, 'err_hi': err_hi, 'err': err,
'lnlmax': lnlmax}
return o
|
Compute upper/lower limits, peak position, and 1-sigma errors
from a 1-D likelihood function. This function uses the
delta-loglikelihood method to evaluate parameter limits by
searching for the point at which the change in the log-likelihood
value with respect to the maximum equals a specific value. A
cubic spline fit to the log-likelihood values is used to
improve the accuracy of the calculation.
Parameters
----------
xval : `~numpy.ndarray`
Array of parameter values.
loglike : `~numpy.ndarray`
Array of log-likelihood values.
cl_limit : float
Confidence level to use for limit calculation.
cl_err : float
Confidence level to use for two-sided confidence interval
calculation.
tol : float
Absolute precision of likelihood values.
Returns
-------
x0 : float
Coordinate at maximum of likelihood function.
err_lo : float
Lower error for two-sided confidence interval with CL
``cl_err``. Corresponds to point (x < x0) at which the
log-likelihood falls by a given value with respect to the
maximum (0.5 for 1 sigma). Set to nan if the change in the
log-likelihood function at the lower bound of the ``xval``
input array is less than than the value for the given CL.
err_hi : float
Upper error for two-sided confidence interval with CL
``cl_err``. Corresponds to point (x > x0) at which the
log-likelihood falls by a given value with respect to the
maximum (0.5 for 1 sigma). Set to nan if the change in the
log-likelihood function at the upper bound of the ``xval``
input array is less than the value for the given CL.
err : float
Symmetric 1-sigma error. Average of ``err_lo`` and ``err_hi``
if both are defined.
ll : float
Lower limit evaluated at confidence level ``cl_limit``.
ul : float
Upper limit evaluated at confidence level ``cl_limit``.
lnlmax : float
Log-likelihood value at ``x0``.
|
entailment
|
def parabola(xy, amplitude, x0, y0, sx, sy, theta):
"""Evaluate a 2D parabola given by:
f(x,y) = f_0 - (1/2) * \delta^T * R * \Sigma * R^T * \delta
where
\delta = [(x - x_0), (y - y_0)]
and R is the matrix for a 2D rotation by angle \theta and \Sigma
is the covariance matrix:
\Sigma = [[1/\sigma_x^2, 0 ],
[0 , 1/\sigma_y^2]]
Parameters
----------
xy : tuple
Tuple containing x and y arrays for the values at which the
parabola will be evaluated.
amplitude : float
Constant offset value.
x0 : float
Centroid in x coordinate.
y0 : float
Centroid in y coordinate.
sx : float
Standard deviation along first axis (x-axis when theta=0).
sy : float
Standard deviation along second axis (y-axis when theta=0).
theta : float
Rotation angle in radians.
Returns
-------
vals : `~numpy.ndarray`
Values of the parabola evaluated at the points defined in the
`xy` input tuple.
"""
x = xy[0]
y = xy[1]
cth = np.cos(theta)
sth = np.sin(theta)
a = (cth ** 2) / (2 * sx ** 2) + (sth ** 2) / (2 * sy ** 2)
b = -(np.sin(2 * theta)) / (4 * sx ** 2) + (np.sin(2 * theta)) / (
4 * sy ** 2)
c = (sth ** 2) / (2 * sx ** 2) + (cth ** 2) / (2 * sy ** 2)
vals = amplitude - (a * ((x - x0) ** 2) +
2 * b * (x - x0) * (y - y0) +
c * ((y - y0) ** 2))
return vals
|
Evaluate a 2D parabola given by:
f(x,y) = f_0 - (1/2) * \delta^T * R * \Sigma * R^T * \delta
where
\delta = [(x - x_0), (y - y_0)]
and R is the matrix for a 2D rotation by angle \theta and \Sigma
is the covariance matrix:
\Sigma = [[1/\sigma_x^2, 0 ],
[0 , 1/\sigma_y^2]]
Parameters
----------
xy : tuple
Tuple containing x and y arrays for the values at which the
parabola will be evaluated.
amplitude : float
Constant offset value.
x0 : float
Centroid in x coordinate.
y0 : float
Centroid in y coordinate.
sx : float
Standard deviation along first axis (x-axis when theta=0).
sy : float
Standard deviation along second axis (y-axis when theta=0).
theta : float
Rotation angle in radians.
Returns
-------
vals : `~numpy.ndarray`
Values of the parabola evaluated at the points defined in the
`xy` input tuple.
|
entailment
|
def get_region_mask(z, delta, xy=None):
"""Get mask of connected region within delta of max(z)."""
if xy is None:
ix, iy = np.unravel_index(np.argmax(z), z.shape)
else:
ix, iy = xy
mz = (z > z[ix, iy] - delta)
labels = label(mz)[0]
mz &= labels == labels[ix, iy]
return mz
|
Get mask of connected region within delta of max(z).
|
entailment
|
def fit_parabola(z, ix, iy, dpix=3, zmin=None):
"""Fit a parabola to a 2D numpy array. This function will fit a
parabola with the functional form described in
`~fermipy.utils.parabola` to a 2D slice of the input array `z`.
The fit region encompasses pixels that are within `dpix` of the
pixel coordinate (iz,iy) OR that have a value relative to the peak
value greater than `zmin`.
Parameters
----------
z : `~numpy.ndarray`
ix : int
X index of center pixel of fit region in array `z`.
iy : int
Y index of center pixel of fit region in array `z`.
dpix : int
Max distance from center pixel of fit region.
zmin : float
"""
offset = make_pixel_distance(z.shape, iy, ix)
x, y = np.meshgrid(np.arange(z.shape[0]), np.arange(z.shape[1]),
indexing='ij')
m = (offset <= dpix)
if np.sum(m) < 9:
m = (offset <= dpix + 0.5)
if zmin is not None:
m |= get_region_mask(z, np.abs(zmin), (ix, iy))
sx = get_bounded_slice(ix, dpix, z.shape[0])
sy = get_bounded_slice(iy, dpix, z.shape[1])
coeffx = poly_to_parabola(np.polyfit(x[sx, iy], z[sx, iy], 2))
coeffy = poly_to_parabola(np.polyfit(y[ix, sy], z[ix, sy], 2))
#p0 = [coeffx[2], coeffx[0], coeffy[0], coeffx[1], coeffy[1], 0.0]
p0 = [coeffx[2], float(ix), float(iy), coeffx[1], coeffy[1], 0.0]
o = {'fit_success': True, 'p0': p0}
def curve_fit_fn(*args):
return np.ravel(parabola(*args))
try:
bounds = (-np.inf * np.ones(6), np.inf * np.ones(6))
bounds[0][1] = -0.5
bounds[0][2] = -0.5
bounds[1][1] = z.shape[0] - 0.5
bounds[1][2] = z.shape[1] - 0.5
popt, pcov = scipy.optimize.curve_fit(curve_fit_fn,
(np.ravel(x[m]), np.ravel(y[m])),
np.ravel(z[m]), p0, bounds=bounds)
except Exception:
popt = copy.deepcopy(p0)
o['fit_success'] = False
fm = parabola((x[m], y[m]), *popt)
df = fm - z[m]
rchi2 = np.sum(df ** 2) / len(fm)
o['rchi2'] = rchi2
o['x0'] = popt[1]
o['y0'] = popt[2]
o['sigmax'] = np.abs(popt[3])
o['sigmay'] = np.abs(popt[4])
o['sigma'] = np.sqrt(o['sigmax'] ** 2 + o['sigmay'] ** 2)
o['z0'] = popt[0]
o['theta'] = popt[5]
o['popt'] = popt
o['mask'] = m
a = max(o['sigmax'], o['sigmay'])
b = min(o['sigmax'], o['sigmay'])
o['eccentricity'] = np.sqrt(1 - b ** 2 / a ** 2)
o['eccentricity2'] = np.sqrt(a ** 2 / b ** 2 - 1)
return o
|
Fit a parabola to a 2D numpy array. This function will fit a
parabola with the functional form described in
`~fermipy.utils.parabola` to a 2D slice of the input array `z`.
The fit region encompasses pixels that are within `dpix` of the
pixel coordinate (iz,iy) OR that have a value relative to the peak
value greater than `zmin`.
Parameters
----------
z : `~numpy.ndarray`
ix : int
X index of center pixel of fit region in array `z`.
iy : int
Y index of center pixel of fit region in array `z`.
dpix : int
Max distance from center pixel of fit region.
zmin : float
|
entailment
|
def split_bin_edges(edges, npts=2):
"""Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
"""
if npts < 2:
return edges
x = (edges[:-1, None] +
(edges[1:, None] - edges[:-1, None]) *
np.linspace(0.0, 1.0, npts + 1)[None, :])
return np.unique(np.ravel(x))
|
Subdivide an array of bins by splitting each bin into ``npts``
subintervals.
Parameters
----------
edges : `~numpy.ndarray`
Bin edge array.
npts : int
Number of intervals into which each bin will be subdivided.
Returns
-------
edges : `~numpy.ndarray`
Subdivided bin edge array.
|
entailment
|
def val_to_bin(edges, x):
"""Convert axis coordinate to bin index."""
ibin = np.digitize(np.array(x, ndmin=1), edges) - 1
return ibin
|
Convert axis coordinate to bin index.
|
entailment
|
def val_to_edge(edges, x):
"""Convert axis coordinate to bin index."""
edges = np.array(edges)
w = edges[1:] - edges[:-1]
w = np.insert(w, 0, w[0])
ibin = np.digitize(np.array(x, ndmin=1), edges - 0.5 * w) - 1
ibin[ibin < 0] = 0
return ibin
|
Convert axis coordinate to bin index.
|
entailment
|
def val_to_bin_bounded(edges, x):
"""Convert axis coordinate to bin index."""
nbins = len(edges) - 1
ibin = val_to_bin(edges, x)
ibin[ibin < 0] = 0
ibin[ibin > nbins - 1] = nbins - 1
return ibin
|
Convert axis coordinate to bin index.
|
entailment
|
def extend_array(edges, binsz, lo, hi):
"""Extend an array to encompass lo and hi values."""
numlo = int(np.ceil((edges[0] - lo) / binsz))
numhi = int(np.ceil((hi - edges[-1]) / binsz))
edges = copy.deepcopy(edges)
if numlo > 0:
edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1)
edges = np.concatenate((edges_lo[:-1], edges))
if numhi > 0:
edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1)
edges = np.concatenate((edges, edges_hi[1:]))
return edges
|
Extend an array to encompass lo and hi values.
|
entailment
|
def fits_recarray_to_dict(table):
"""Convert a FITS recarray to a python dictionary."""
cols = {}
for icol, col in enumerate(table.columns.names):
col_data = table.data[col]
if type(col_data[0]) == np.float32:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == np.float64:
cols[col] = np.array(col_data, dtype=float)
elif type(col_data[0]) == str:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.string_:
cols[col] = np.array(col_data, dtype=str)
elif type(col_data[0]) == np.int16:
cols[col] = np.array(col_data, dtype=int)
elif type(col_data[0]) == np.ndarray:
cols[col] = np.array(col_data)
else:
raise Exception(
'Unrecognized column type: %s %s' % (col, str(type(col_data))))
return cols
|
Convert a FITS recarray to a python dictionary.
|
entailment
|
def prettify_xml(elem):
"""Return a pretty-printed XML string for the Element.
"""
from xml.dom import minidom
import xml.etree.cElementTree as et
rough_string = et.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
|
Return a pretty-printed XML string for the Element.
|
entailment
|
def merge_dict(d0, d1, add_new_keys=False, append_arrays=False):
"""Recursively merge the contents of python dictionary d0 with
the contents of another python dictionary, d1.
Parameters
----------
d0 : dict
The input dictionary.
d1 : dict
Dictionary to be merged with the input dictionary.
add_new_keys : str
Do not skip keys that only exist in d1.
append_arrays : bool
If an element is a numpy array set the value of that element by
concatenating the two arrays.
"""
if d1 is None:
return d0
elif d0 is None:
return d1
elif d0 is None and d1 is None:
return {}
od = {}
for k, v in d0.items():
t0 = None
t1 = None
if k in d0:
t0 = type(d0[k])
if k in d1:
t1 = type(d1[k])
if k not in d1:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, dict) and isinstance(d1[k], dict):
od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays)
elif isinstance(v, list) and isstr(d1[k]):
od[k] = d1[k].split(',')
elif isinstance(v, dict) and d1[k] is None:
od[k] = copy.deepcopy(d0[k])
elif isinstance(v, np.ndarray) and append_arrays:
od[k] = np.concatenate((v, d1[k]))
elif (d0[k] is not None and d1[k] is not None) and t0 != t1:
if t0 == dict or t0 == list:
raise Exception('Conflicting types in dictionary merge for '
'key %s %s %s' % (k, t0, t1))
od[k] = t0(d1[k])
else:
od[k] = copy.copy(d1[k])
if add_new_keys:
for k, v in d1.items():
if k not in d0:
od[k] = copy.deepcopy(d1[k])
return od
|
Recursively merge the contents of python dictionary d0 with
the contents of another python dictionary, d1.
Parameters
----------
d0 : dict
The input dictionary.
d1 : dict
Dictionary to be merged with the input dictionary.
add_new_keys : str
Do not skip keys that only exist in d1.
append_arrays : bool
If an element is a numpy array set the value of that element by
concatenating the two arrays.
|
entailment
|
def tolist(x):
""" convenience function that takes in a
nested structure of lists and dictionaries
and converts everything to its base objects.
This is useful for dupming a file to yaml.
(a) numpy arrays into python lists
>>> type(tolist(np.asarray(123))) == int
True
>>> tolist(np.asarray([1,2,3])) == [1,2,3]
True
(b) numpy strings into python strings.
>>> tolist([np.asarray('cat')])==['cat']
True
(c) an ordered dict to a dict
>>> ordered=OrderedDict(a=1, b=2)
>>> type(tolist(ordered)) == dict
True
(d) converts unicode to regular strings
>>> type(u'a') == str
False
>>> type(tolist(u'a')) == str
True
(e) converts numbers & bools in strings to real represntation,
(i.e. '123' -> 123)
>>> type(tolist(np.asarray('123'))) == int
True
>>> type(tolist('123')) == int
True
>>> tolist('False') == False
True
"""
if isinstance(x, list):
return map(tolist, x)
elif isinstance(x, dict):
return dict((tolist(k), tolist(v)) for k, v in x.items())
elif isinstance(x, np.ndarray) or isinstance(x, np.number):
# note, call tolist again to convert strings of numbers to numbers
return tolist(x.tolist())
elif isinstance(x, OrderedDict):
return dict(x)
elif isinstance(x, np.bool_):
return bool(x)
elif isstr(x) or isinstance(x, np.str):
x = str(x) # convert unicode & numpy strings
try:
return int(x)
except:
try:
return float(x)
except:
if x == 'True':
return True
elif x == 'False':
return False
else:
return x
else:
return x
|
convenience function that takes in a
nested structure of lists and dictionaries
and converts everything to its base objects.
This is useful for dupming a file to yaml.
(a) numpy arrays into python lists
>>> type(tolist(np.asarray(123))) == int
True
>>> tolist(np.asarray([1,2,3])) == [1,2,3]
True
(b) numpy strings into python strings.
>>> tolist([np.asarray('cat')])==['cat']
True
(c) an ordered dict to a dict
>>> ordered=OrderedDict(a=1, b=2)
>>> type(tolist(ordered)) == dict
True
(d) converts unicode to regular strings
>>> type(u'a') == str
False
>>> type(tolist(u'a')) == str
True
(e) converts numbers & bools in strings to real represntation,
(i.e. '123' -> 123)
>>> type(tolist(np.asarray('123'))) == int
True
>>> type(tolist('123')) == int
True
>>> tolist('False') == False
True
|
entailment
|
def convolve2d_disk(fn, r, sig, nstep=200):
"""Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
step function given by:
g(r) = H(1-r/s)
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Radius parameter of the step function.
nstep : int
Number of sampling point for numeric integration.
"""
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - sig
rmax = r + sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = rmin[..., np.newaxis] + \
delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1)
rp = 0.5 * (redge[..., 1:] + redge[..., :-1])
dr = redge[..., 1:] - redge[..., :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
cphi = -np.ones(dr.shape)
m = ((rp + r) / sig < 1) | (r == 0)
rrp = r * rp
sx = r ** 2 + rp ** 2 - sig ** 2
cphi[~m] = sx[~m] / (2 * rrp[~m])
dphi = 2 * np.arccos(cphi)
v = rp * fnv * dphi * dr / (np.pi * sig * sig)
s = np.sum(v, axis=-1)
return s
|
Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
step function given by:
g(r) = H(1-r/s)
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Radius parameter of the step function.
nstep : int
Number of sampling point for numeric integration.
|
entailment
|
def convolve2d_gauss(fn, r, sig, nstep=200):
"""Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
2D gaussian with standard deviation s given by:
g(r) = 1/(2*pi*s^2) Exp[-r^2/(2*s^2)]
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Width parameter of the gaussian.
nstep : int
Number of sampling point for numeric integration.
"""
r = np.array(r, ndmin=1)
sig = np.array(sig, ndmin=1)
rmin = r - 10 * sig
rmax = r + 10 * sig
rmin[rmin < 0] = 0
delta = (rmax - rmin) / nstep
redge = (rmin[..., np.newaxis] +
delta[..., np.newaxis] *
np.linspace(0, nstep, nstep + 1))
rp = 0.5 * (redge[..., 1:] + redge[..., :-1])
dr = redge[..., 1:] - redge[..., :-1]
fnv = fn(rp)
r = r.reshape(r.shape + (1,))
sig2 = sig * sig
x = r * rp / (sig2)
if 'je_fn' not in convolve2d_gauss.__dict__:
t = 10 ** np.linspace(-8, 8, 1000)
t = np.insert(t, 0, [0])
je = special.ive(0, t)
convolve2d_gauss.je_fn = UnivariateSpline(t, je, k=2, s=0)
je = convolve2d_gauss.je_fn(x.flat).reshape(x.shape)
#je2 = special.ive(0,x)
v = (rp * fnv / (sig2) * je * np.exp(x - (r * r + rp * rp) /
(2 * sig2)) * dr)
s = np.sum(v, axis=-1)
return s
|
Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is
azimuthally symmetric function in two dimensions and g is a
2D gaussian with standard deviation s given by:
g(r) = 1/(2*pi*s^2) Exp[-r^2/(2*s^2)]
Parameters
----------
fn : function
Input function that takes a single radial coordinate parameter.
r : `~numpy.ndarray`
Array of points at which the convolution is to be evaluated.
sig : float
Width parameter of the gaussian.
nstep : int
Number of sampling point for numeric integration.
|
entailment
|
def make_pixel_distance(shape, xpix=None, ypix=None):
"""Fill a 2D array with dimensions `shape` with the distance of each
pixel from a reference direction (xpix,ypix) in pixel coordinates.
Pixel coordinates are defined such that (0,0) is located at the
center of the corner pixel.
"""
if np.isscalar(shape):
shape = [shape, shape]
if xpix is None:
xpix = (shape[1] - 1.0) / 2.
if ypix is None:
ypix = (shape[0] - 1.0) / 2.
dx = np.linspace(0, shape[1] - 1, shape[1]) - xpix
dy = np.linspace(0, shape[0] - 1, shape[0]) - ypix
dxy = np.zeros(shape)
dxy += np.sqrt(dx[np.newaxis, :] ** 2 + dy[:, np.newaxis] ** 2)
return dxy
|
Fill a 2D array with dimensions `shape` with the distance of each
pixel from a reference direction (xpix,ypix) in pixel coordinates.
Pixel coordinates are defined such that (0,0) is located at the
center of the corner pixel.
|
entailment
|
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=None, ypix=None):
"""Make kernel for a 2D gaussian.
Parameters
----------
sigma : float
Standard deviation in degrees.
"""
sigma /= cdelt
def fn(t, s): return 1. / (2 * np.pi * s ** 2) * np.exp(
-t ** 2 / (s ** 2 * 2.0))
dxy = make_pixel_distance(npix, xpix, ypix)
k = fn(dxy, sigma)
k /= (np.sum(k) * np.radians(cdelt) ** 2)
return k
|
Make kernel for a 2D gaussian.
Parameters
----------
sigma : float
Standard deviation in degrees.
|
entailment
|
def make_disk_kernel(radius, npix=501, cdelt=0.01, xpix=None, ypix=None):
"""Make kernel for a 2D disk.
Parameters
----------
radius : float
Disk radius in deg.
"""
radius /= cdelt
def fn(t, s): return 0.5 * (np.sign(s - t) + 1.0)
dxy = make_pixel_distance(npix, xpix, ypix)
k = fn(dxy, radius)
k /= (np.sum(k) * np.radians(cdelt) ** 2)
return k
|
Make kernel for a 2D disk.
Parameters
----------
radius : float
Disk radius in deg.
|
entailment
|
def make_cdisk_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False):
"""Make a kernel for a PSF-convolved 2D disk.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
sigma : float
68% containment radius in degrees.
"""
sigma /= 0.8246211251235321
dtheta = psf.dtheta
egy = psf.energies
x = make_pixel_distance(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
def fn(t): return psf.eval(i, t, scale_fn=psf_scale_fn)
psfc = convolve2d_disk(fn, dtheta, sigma)
k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
|
Make a kernel for a PSF-convolved 2D disk.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
sigma : float
68% containment radius in degrees.
|
entailment
|
def make_radial_kernel(psf, fn, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None,
normalize=False, klims=None, sparse=False):
"""Make a kernel for a general radially symmetric 2D function.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
fn : callable
Function that evaluates the kernel at a radial coordinate r.
sigma : float
68% containment radius in degrees.
"""
if klims is None:
egy = psf.energies
else:
egy = psf.energies[klims[0]:klims[1] + 1]
ang_dist = make_pixel_distance(npix, xpix, ypix) * cdelt
max_ang_dist = np.max(ang_dist) + cdelt
#dtheta = np.linspace(0.0, (np.max(ang_dist) * 1.05)**0.5, 200)**2.0
# z = create_kernel_function_lookup(psf, fn, sigma, egy,
# dtheta, psf_scale_fn)
shape = (len(egy), npix, npix)
k = np.zeros(shape)
r99 = psf.containment_angle(energies=egy, fraction=0.997)
r34 = psf.containment_angle(energies=egy, fraction=0.34)
rmin = np.maximum(r34 / 4., 0.01)
rmax = np.maximum(r99, 0.1)
if sigma is not None:
rmin = np.maximum(rmin, 0.5 * sigma)
rmax = np.maximum(rmax, 2.0 * r34 + 3.0 * sigma)
rmax = np.minimum(rmax, max_ang_dist)
for i in range(len(egy)):
rebin = min(int(np.ceil(cdelt / rmin[i])), 8)
if sparse:
dtheta = np.linspace(0.0, rmax[i]**0.5, 100)**2.0
else:
dtheta = np.linspace(0.0, max_ang_dist**0.5, 200)**2.0
z = eval_radial_kernel(psf, fn, sigma, i, dtheta, psf_scale_fn)
xdist = make_pixel_distance(npix * rebin,
xpix * rebin + (rebin - 1.0) / 2.,
ypix * rebin + (rebin - 1.0) / 2.)
xdist *= cdelt / float(rebin)
#x = val_to_pix(dtheta, np.ravel(xdist))
if sparse:
m = np.ravel(xdist) < rmax[i]
kk = np.zeros(xdist.size)
#kk[m] = map_coordinates(z, [x[m]], order=2, prefilter=False)
kk[m] = np.interp(np.ravel(xdist)[m], dtheta, z)
kk = kk.reshape(xdist.shape)
else:
kk = np.interp(np.ravel(xdist), dtheta, z).reshape(xdist.shape)
# kk = map_coordinates(z, [x], order=2,
# prefilter=False).reshape(xdist.shape)
if rebin > 1:
kk = sum_bins(kk, 0, rebin)
kk = sum_bins(kk, 1, rebin)
k[i] = kk / float(rebin)**2
k = k.reshape((len(egy),) + ang_dist.shape)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
|
Make a kernel for a general radially symmetric 2D function.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
fn : callable
Function that evaluates the kernel at a radial coordinate r.
sigma : float
68% containment radius in degrees.
|
entailment
|
def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False):
"""
Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees.
"""
egy = psf.energies
x = make_pixel_distance(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
k[i] = psf.eval(i, x, scale_fn=psf_scale_fn)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k
|
Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees.
|
entailment
|
def overlap_slices(large_array_shape, small_array_shape, position):
"""
Modified version of `~astropy.nddata.utils.overlap_slices`.
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Parameters
----------
large_array_shape : tuple
Shape of the large array.
small_array_shape : tuple
Shape of the small array.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
slices_large : tuple of slices
Slices in all directions for the large array, such that
``large_array[slices_large]`` extracts the region of the large array
that overlaps with the small array.
slices_small : slice
Slices in all directions for the small array, such that
``small_array[slices_small]`` extracts the region that is inside the
large array.
"""
# Get edge coordinates
edges_min = [int(pos - small_shape // 2) for (pos, small_shape) in
zip(position, small_array_shape)]
edges_max = [int(pos + (small_shape - small_shape // 2)) for
(pos, small_shape) in
zip(position, small_array_shape)]
# Set up slices
slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
slices_small = tuple(slice(max(0, -edge_min),
min(large_shape - edge_min,
edge_max - edge_min))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
return slices_large, slices_small
|
Modified version of `~astropy.nddata.utils.overlap_slices`.
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Parameters
----------
large_array_shape : tuple
Shape of the large array.
small_array_shape : tuple
Shape of the small array.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
slices_large : tuple of slices
Slices in all directions for the large array, such that
``large_array[slices_large]`` extracts the region of the large array
that overlaps with the small array.
slices_small : slice
Slices in all directions for the small array, such that
``small_array[slices_small]`` extracts the region that is inside the
large array.
|
entailment
|
def make_library(**kwargs):
"""Build and return a ModelManager object and fill the associated model library
"""
library_yaml = kwargs.pop('library', 'models/library.yaml')
comp_yaml = kwargs.pop('comp', 'config/binning.yaml')
basedir = kwargs.pop('basedir', os.path.abspath('.'))
model_man = kwargs.get('ModelManager', ModelManager(basedir=basedir))
model_comp_dict = model_man.make_library(library_yaml, library_yaml, comp_yaml)
return dict(model_comp_dict=model_comp_dict,
ModelManager=model_man)
|
Build and return a ModelManager object and fill the associated model library
|
entailment
|
def edisp_disable_list(self):
""" Return the list of source for which energy dispersion should be turned off """
l = []
for model_comp in self.model_components.values():
if model_comp.edisp_disable:
l += [model_comp.info.source_name]
return l
|
Return the list of source for which energy dispersion should be turned off
|
entailment
|
def make_srcmap_manifest(self, components, name_factory):
""" Build a yaml file that specfies how to make the srcmap files for a particular model
Parameters
----------
components : list
The binning components used in this analysis
name_factory : `NameFactory`
Object that handles naming conventions
Returns a dictionary that contains information about where to find the
source maps for each component of the model
"""
ret_dict = {}
for comp in components:
compkey = comp.make_key('{ebin_name}_{evtype_name}')
zcut = "zmax%i" % comp.zmax
name_keys = dict(modelkey=self.model_name,
zcut=zcut,
ebin=comp.ebin_name,
mktime='none',
psftype=comp.evtype_name,
coordsys=comp.coordsys)
outsrcmap = name_factory.merged_srcmaps(**name_keys)
ccube = name_factory.ccube(**name_keys)
src_dict = {}
for comp_name, model_comp in self.model_components.items():
comp_info = model_comp.info
model_type = comp_info.model_type
name_keys['sourcekey'] = comp_name
if model_type in ['CatalogSources']:
#sourcekey = comp_info.comp_key
sources = comp_info.source_names
name_keys['sourcekey'] = comp_info.catalog_info.catalog_name
elif model_type in ['CompositeSource']:
#sourcekey = comp_info.sourcekey
name_keys['sourcekey'] = comp_info.sourcekey
sources = [comp_info.source_name]
else:
#sourcekey = comp_name
sources = [comp_info.source_name]
src_dict[comp_name] = dict(sourcekey=comp_name,
srcmap_file=name_factory.srcmaps(**name_keys),
source_names=sources)
comp_dict = dict(outsrcmap=outsrcmap,
ccube=ccube,
source_dict=src_dict)
ret_dict[compkey] = comp_dict
return ret_dict
|
Build a yaml file that specfies how to make the srcmap files for a particular model
Parameters
----------
components : list
The binning components used in this analysis
name_factory : `NameFactory`
Object that handles naming conventions
Returns a dictionary that contains information about where to find the
source maps for each component of the model
|
entailment
|
def make_model_rois(self, components, name_factory):
""" Make the fermipy roi_model objects for each of a set of binning components """
ret_dict = {}
# Figure out which sources need to be split by components
master_roi_source_info = {}
sub_comp_sources = {}
for comp_name, model_comp in self.model_components.items():
comp_info = model_comp.info
if comp_info.components is None:
master_roi_source_info[comp_name] = model_comp
else:
sub_comp_sources[comp_name] = model_comp
# Build the xml for the master
master_roi = SourceFactory.make_roi(master_roi_source_info)
master_xml_mdl = name_factory.master_srcmdl_xml(
modelkey=self.model_name)
print("Writing master ROI model to %s" % master_xml_mdl)
master_roi.write_xml(master_xml_mdl)
ret_dict['master'] = master_roi
# Now deal with the components
for comp in components:
zcut = "zmax%i" % comp.zmax
compkey = "%s_%s" % (zcut, comp.make_key(
'{ebin_name}_{evtype_name}'))
# name_keys = dict(zcut=zcut,
# modelkey=self.model_name,
# component=compkey)
comp_roi_source_info = {}
for comp_name, model_comp in sub_comp_sources.items():
comp_info = model_comp.info
if comp_info.selection_dependent:
key = comp.make_key('{ebin_name}_{evtype_name}')
elif comp_info.moving:
key = zcut
info_clone = comp_info.components[key].clone_and_merge_sub(key)
comp_roi_source_info[comp_name] =\
ModelComponent(info=info_clone,
spectrum=model_comp.spectrum)
# Build the xml for the component
comp_roi = SourceFactory.make_roi(comp_roi_source_info)
comp_xml_mdl = name_factory.comp_srcmdl_xml(modelkey=self.model_name,
component=compkey)
print("Writing component ROI model to %s" % comp_xml_mdl)
comp_roi.write_xml(comp_xml_mdl)
ret_dict[compkey] = comp_roi
return ret_dict
|
Make the fermipy roi_model objects for each of a set of binning components
|
entailment
|
def read_model_yaml(self, modelkey):
""" Read the yaml file for the diffuse components
"""
model_yaml = self._name_factory.model_yaml(modelkey=modelkey,
fullpath=True)
model = yaml.safe_load(open(model_yaml))
return model
|
Read the yaml file for the diffuse components
|
entailment
|
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml):
""" Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions
"""
ret_dict = {}
#catalog_dict = yaml.safe_load(open(catalog_yaml))
components_dict = Component.build_from_yamlfile(binning_yaml)
diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm,
DiffuseModelManager=self._dmm,
library=diffuse_yaml,
components=components_dict)
catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml,
CatalogSourceManager=self._csm)
ret_dict.update(diffuse_ret_dict['comp_info_dict'])
ret_dict.update(catalog_ret_dict['comp_info_dict'])
self._library.update(ret_dict)
return ret_dict
|
Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions
|
entailment
|
def make_model_info(self, modelkey):
""" Build a dictionary with the information for a particular model.
Parameters
----------
modelkey : str
Key used to identify this particular model
Return `ModelInfo`
"""
model = self.read_model_yaml(modelkey)
sources = model['sources']
components = OrderedDict()
spec_model_yaml = self._name_factory.fullpath(localpath=model['spectral_models'])
self._spec_lib.update(yaml.safe_load(open(spec_model_yaml)))
for source, source_info in sources.items():
model_type = source_info.get('model_type', None)
par_overrides = source_info.get('par_overides', None)
version = source_info['version']
spec_type = source_info['SpectrumType']
edisp_disable = source_info.get('edisp_disable', False)
sourcekey = "%s_%s" % (source, version)
if model_type == 'galprop_rings':
comp_info_dict = self.gmm.diffuse_comp_info_dicts(version)
def_spec_type = spec_type['default']
for comp_key, comp_info in comp_info_dict.items():
model_comp = ModelComponent(info=comp_info,
spectrum=\
self._spec_lib[spec_type.get(comp_key,
def_spec_type)],
par_overrides=par_overrides,
edisp_disable=edisp_disable)
components[comp_key] = model_comp
elif model_type == 'Catalog':
comp_info_dict = self.csm.split_comp_info_dict(source, version)
def_spec_type = spec_type['default']
for comp_key, comp_info in comp_info_dict.items():
model_comp = ModelComponent(info=comp_info,
spectrum=\
self._spec_lib[spec_type.get(comp_key,
def_spec_type)],
par_overrides=par_overrides,
edisp_disable=edisp_disable)
components[comp_key] = model_comp
else:
comp_info = self.dmm.diffuse_comp_info(sourcekey)
model_comp = ModelComponent(info=comp_info,
spectrum=self._spec_lib[spec_type],
par_overrides=par_overrides,
edisp_disable=edisp_disable)
components[sourcekey] = model_comp
ret_val = ModelInfo(model_name=modelkey,
model_components=components)
self._models[modelkey] = ret_val
return ret_val
|
Build a dictionary with the information for a particular model.
Parameters
----------
modelkey : str
Key used to identify this particular model
Return `ModelInfo`
|
entailment
|
def make_srcmap_manifest(self, modelkey, components, data):
"""Build a yaml file that specfies how to make the srcmap files for a particular model
Parameters
----------
modelkey : str
Key used to identify this particular model
components : list
The binning components used in this analysis
data : str
Path to file containing dataset definition
"""
try:
model_info = self._models[modelkey]
except KeyError:
model_info = self.make_model_info(modelkey)
self._name_factory.update_base_dict(data)
outfile = os.path.join('analysis', 'model_%s' %
modelkey, 'srcmap_manifest_%s.yaml' % modelkey)
manifest = model_info.make_srcmap_manifest(
components, self._name_factory)
outdir = os.path.dirname(outfile)
try:
os.makedirs(outdir)
except OSError:
pass
utils.write_yaml(manifest, outfile)
|
Build a yaml file that specfies how to make the srcmap files for a particular model
Parameters
----------
modelkey : str
Key used to identify this particular model
components : list
The binning components used in this analysis
data : str
Path to file containing dataset definition
|
entailment
|
def make_fermipy_config_yaml(self, modelkey, components, data, **kwargs):
"""Build a fermipy top-level yaml configuration file
Parameters
----------
modelkey : str
Key used to identify this particular model
components : list
The binning components used in this analysis
data : str
Path to file containing dataset definition
"""
model_dir = os.path.join('analysis', 'model_%s' % modelkey)
hpx_order = kwargs.get('hpx_order', 9)
self._name_factory.update_base_dict(data)
try:
model_info = self._models[modelkey]
except KeyError:
model_info = self.make_model_info(modelkey)
model_rois = model_info.make_model_rois(components, self._name_factory)
#source_names = model_info.component_names
master_xml_mdl = self._name_factory.master_srcmdl_xml(modelkey=modelkey, fullpath=True)
master_data = dict(scfile=self._name_factory.ft2file(fullpath=True),
cacheft1=False)
master_binning = dict(projtype='HPX',
roiwidth=360.,
binsperdec=4,
coordsys='GAL',
hpx_ordering_scheme="RING",
hpx_order=hpx_order,
hpx_ebin=True)
# master_fileio = dict(outdir=model_dir,
# logfile=os.path.join(model_dir, 'fermipy.log'))
master_fileio = dict(logfile='fermipy.log')
master_gtlike = dict(irfs=self._name_factory.irfs(**kwargs),
edisp_disable=model_info.edisp_disable_list(),
use_external_srcmap=True)
master_selection = dict(glat=0., glon=0., radius=180.)
master_model = dict(catalogs=[os.path.join(model_dir, master_xml_mdl)])
master_plotting = dict(label_ts_threshold=1e9)
master = dict(data=master_data,
binning=master_binning,
fileio=master_fileio,
selection=master_selection,
gtlike=master_gtlike,
model=master_model,
plotting=master_plotting,
components=[])
fermipy_dict = master
#comp_rois = {}
for comp in components:
zcut = "zmax%i" % comp.zmax
compkey = "%s_%s" % (zcut, comp.make_key(
'{ebin_name}_{evtype_name}'))
comp_roi = model_rois[compkey]
name_keys = dict(zcut=zcut,
modelkey=modelkey,
component=compkey,
mktime='none',
coordsys=comp.coordsys,
fullpath=True)
comp_data = dict(ltcube=self._name_factory.ltcube(**name_keys))
comp_selection = dict(logemin=comp.log_emin,
logemax=comp.log_emax,
zmax=comp.zmax,
evtype=comp.evtype)
comp_binning = dict(enumbins=comp.enumbins,
hpx_order=min(comp.hpx_order, hpx_order),
coordsys=comp.coordsys)
comp_gtlike = dict(srcmap=self._name_factory.merged_srcmaps(**name_keys),
bexpmap=self._name_factory.bexpcube(**name_keys),
use_external_srcmap=True)
comp_diffuse_xml = [self._name_factory.comp_srcmdl_xml(**name_keys)]
#comp_roi_source_info = {}
#diffuse_srcs = []
#for src in comp_roi.diffuse_sources:
# if isinstance(src, MapCubeSource):
# diffuse_srcs.append(dict(name=src.name, file=src.mapcube))
# elif isinstance(src, IsoSource):
# diffuse_srcs.append(dict(name=src.name, file=src.fileFunction))
# else:
# pass
comp_model = dict(diffuse_xml=comp_diffuse_xml)
sub_dict = dict(data=comp_data,
binning=comp_binning,
selection=comp_selection,
gtlike=comp_gtlike,
model=comp_model)
fermipy_dict['components'].append(sub_dict)
# Add placeholder diffuse sources
#fermipy_dict['model']['diffuse'] = diffuse_srcs
outfile = os.path.join(model_dir, 'config.yaml')
print("Writing fermipy config file %s" % outfile)
utils.write_yaml(fermipy_dict, outfile)
return fermipy_dict
|
Build a fermipy top-level yaml configuration file
Parameters
----------
modelkey : str
Key used to identify this particular model
components : list
The binning components used in this analysis
data : str
Path to file containing dataset definition
|
entailment
|
def get_sub_comp_info(source_info, comp):
"""Build and return information about a sub-component for a particular selection
"""
sub_comps = source_info.get('components', None)
if sub_comps is None:
return source_info.copy()
moving = source_info.get('moving', False)
selection_dependent = source_info.get('selection_dependent', False)
if selection_dependent:
key = comp.make_key('{ebin_name}_{evtype_name}')
elif moving:
key = "zmax%i" % comp.zmax
ret_dict = source_info.copy()
ret_dict.update(sub_comps[key])
return ret_dict
|
Build and return information about a sub-component for a particular selection
|
entailment
|
def replace_aliases(cut_dict, aliases):
"""Substitute aliases in a cut dictionary."""
for k, v in cut_dict.items():
for k0, v0 in aliases.items():
cut_dict[k] = cut_dict[k].replace(k0, '(%s)' % v0)
|
Substitute aliases in a cut dictionary.
|
entailment
|
def get_files(files, extnames=['.root']):
"""Extract a list of file paths from a list containing both paths
and file lists with one path per line."""
files_out = []
for f in files:
mime = mimetypes.guess_type(f)
if os.path.splitext(f)[1] in extnames:
files_out += [f]
elif mime[0] == 'text/plain':
files_out += list(np.loadtxt(f, unpack=True, dtype='str'))
else:
raise Exception('Unrecognized input type.')
return files_out
|
Extract a list of file paths from a list containing both paths
and file lists with one path per line.
|
entailment
|
def get_cuts_from_xml(xmlfile):
"""Extract event selection strings from the XML file."""
root = ElementTree.ElementTree(file=xmlfile).getroot()
event_maps = root.findall('EventMap')
alias_maps = root.findall('AliasDict')[0]
event_classes = {}
event_types = {}
event_aliases = {}
for m in event_maps:
if m.attrib['altName'] == 'EVENT_CLASS':
for c in m.findall('EventCategory'):
event_classes[c.attrib['name']] = strip(
c.find('ShortCut').text)
elif m.attrib['altName'] == 'EVENT_TYPE':
for c in m.findall('EventCategory'):
event_types[c.attrib['name']] = strip(c.find('ShortCut').text)
for m in alias_maps.findall('Alias'):
event_aliases[m.attrib['name']] = strip(m.text)
replace_aliases(event_aliases, event_aliases.copy())
replace_aliases(event_aliases, event_aliases.copy())
replace_aliases(event_classes, event_aliases)
replace_aliases(event_types, event_aliases)
event_selections = {}
event_selections.update(event_classes)
event_selections.update(event_types)
event_selections.update(event_aliases)
return event_selections
|
Extract event selection strings from the XML file.
|
entailment
|
def set_event_list(tree, selection=None, fraction=None, start_fraction=None):
"""
Set the event list for a tree or chain.
Parameters
----------
tree : `ROOT.TTree`
Input tree/chain.
selection : str
Cut string defining the event list.
fraction : float
Fraction of the total file to include in the event list
starting from the *end* of the file.
"""
import ROOT
elist = rand_str()
if selection is None:
cuts = ''
else:
cuts = selection
if fraction is None or fraction >= 1.0:
n = tree.Draw(">>%s" % elist, cuts, "goff")
tree.SetEventList(ROOT.gDirectory.Get(elist))
elif start_fraction is None:
nentries = int(tree.GetEntries())
first_entry = min(int((1.0 - fraction) * nentries), nentries)
n = tree.Draw(">>%s" % elist, cuts, "goff", nentries, first_entry)
tree.SetEventList(ROOT.gDirectory.Get(elist))
else:
nentries = int(tree.GetEntries())
first_entry = min(int(start_fraction * nentries), nentries)
n = first_entry + int(nentries * fraction)
n = tree.Draw(">>%s" % elist, cuts, "goff",
n - first_entry, first_entry)
tree.SetEventList(ROOT.gDirectory.Get(elist))
return n
|
Set the event list for a tree or chain.
Parameters
----------
tree : `ROOT.TTree`
Input tree/chain.
selection : str
Cut string defining the event list.
fraction : float
Fraction of the total file to include in the event list
starting from the *end* of the file.
|
entailment
|
def find_sources(self, prefix='', **kwargs):
"""An iterative source-finding algorithm that uses likelihood
ratio (TS) maps of the region of interest to find new sources.
After each iteration a new TS map is generated incorporating
sources found in the previous iteration. The method stops
when the number of iterations exceeds ``max_iter`` or no
sources exceeding ``sqrt_ts_threshold`` are found.
Parameters
----------
{options}
tsmap : dict
Keyword arguments dictionary for tsmap method.
tscube : dict
Keyword arguments dictionary for tscube method.
Returns
-------
peaks : list
List of peak objects.
sources : list
List of source objects.
"""
timer = Timer.create(start=True)
self.logger.info('Starting.')
schema = ConfigSchema(self.defaults['sourcefind'],
tsmap=self.defaults['tsmap'],
tscube=self.defaults['tscube'])
schema.add_option('search_skydir', None, '', SkyCoord)
schema.add_option('search_minmax_radius', [None, 1.0], '', list)
config = utils.create_dict(self.config['sourcefind'],
tsmap=self.config['tsmap'],
tscube=self.config['tscube'])
config = schema.create_config(config, **kwargs)
# Defining default properties of test source model
config['model'].setdefault('Index', 2.0)
config['model'].setdefault('SpectrumType', 'PowerLaw')
config['model'].setdefault('SpatialModel', 'PointSource')
config['model'].setdefault('Prefactor', 1E-13)
o = {'sources': [], 'peaks': []}
for i in range(config['max_iter']):
srcs, peaks = self._find_sources_iterate(prefix, i, **config)
self.logger.info('Found %i sources in iteration %i.' %
(len(srcs), i))
o['sources'] += srcs
o['peaks'] += peaks
if len(srcs) == 0:
break
self.logger.info('Done.')
self.logger.info('Execution time: %.2f s', timer.elapsed_time)
return o
|
An iterative source-finding algorithm that uses likelihood
ratio (TS) maps of the region of interest to find new sources.
After each iteration a new TS map is generated incorporating
sources found in the previous iteration. The method stops
when the number of iterations exceeds ``max_iter`` or no
sources exceeding ``sqrt_ts_threshold`` are found.
Parameters
----------
{options}
tsmap : dict
Keyword arguments dictionary for tsmap method.
tscube : dict
Keyword arguments dictionary for tscube method.
Returns
-------
peaks : list
List of peak objects.
sources : list
List of source objects.
|
entailment
|
def localize(self, name, **kwargs):
"""Find the best-fit position of a source. Localization is
performed in two steps. First a TS map is computed centered
on the source with half-width set by ``dtheta_max``. A fit is
then performed to the maximum TS peak in this map. The source
position is then further refined by scanning the likelihood in
the vicinity of the peak found in the first step. The size of
the scan region is set to encompass the 99% positional
uncertainty contour as determined from the peak fit.
Parameters
----------
name : str
Source name.
{options}
optimizer : dict
Dictionary that overrides the default optimizer settings.
Returns
-------
localize : dict
Dictionary containing results of the localization
analysis.
"""
timer = Timer.create(start=True)
name = self.roi.get_source_by_name(name).name
schema = ConfigSchema(self.defaults['localize'],
optimizer=self.defaults['optimizer'])
schema.add_option('use_cache', True)
schema.add_option('prefix', '')
config = utils.create_dict(self.config['localize'],
optimizer=self.config['optimizer'])
config = schema.create_config(config, **kwargs)
self.logger.info('Running localization for %s' % name)
free_state = FreeParameterState(self)
loc = self._localize(name, **config)
free_state.restore()
self.logger.info('Finished localization.')
if config['make_plots']:
self._plotter.make_localization_plots(loc, self.roi,
prefix=config['prefix'])
outfile = \
utils.format_filename(self.workdir, 'loc',
prefix=[config['prefix'],
name.lower().replace(' ', '_')])
if config['write_fits']:
loc['file'] = os.path.basename(outfile) + '.fits'
self._make_localize_fits(loc, outfile + '.fits',
**config)
if config['write_npy']:
np.save(outfile + '.npy', dict(loc))
self.logger.info('Execution time: %.2f s', timer.elapsed_time)
return loc
|
Find the best-fit position of a source. Localization is
performed in two steps. First a TS map is computed centered
on the source with half-width set by ``dtheta_max``. A fit is
then performed to the maximum TS peak in this map. The source
position is then further refined by scanning the likelihood in
the vicinity of the peak found in the first step. The size of
the scan region is set to encompass the 99% positional
uncertainty contour as determined from the peak fit.
Parameters
----------
name : str
Source name.
{options}
optimizer : dict
Dictionary that overrides the default optimizer settings.
Returns
-------
localize : dict
Dictionary containing results of the localization
analysis.
|
entailment
|
def _fit_position_tsmap(self, name, **kwargs):
"""Localize a source from its TS map."""
prefix = kwargs.get('prefix', '')
dtheta_max = kwargs.get('dtheta_max', 0.5)
zmin = kwargs.get('zmin', -3.0)
kw = {
'map_size': 2.0 * dtheta_max,
'write_fits': kwargs.get('write_fits', False),
'write_npy': kwargs.get('write_npy', False),
'use_pylike': kwargs.get('use_pylike', True),
'max_kernel_radius': self.config['tsmap']['max_kernel_radius'],
'loglevel': logging.DEBUG
}
src = self.roi.copy_source(name)
if src['SpatialModel'] in ['RadialDisk', 'RadialGaussian']:
kw['max_kernel_radius'] = max(kw['max_kernel_radius'],
2.0 * src['SpatialWidth'])
skydir = kwargs.get('skydir', src.skydir)
tsmap = self.tsmap(utils.join_strings([prefix, name.lower().
replace(' ', '_')]),
model=src.data,
map_skydir=skydir,
exclude=[name],
make_plots=False, **kw)
# Find peaks with TS > 4
peaks = find_peaks(tsmap['ts'], 4.0, 0.2)
peak_best = None
o = {}
for p in sorted(peaks, key=lambda t: t['amp'], reverse=True):
xy = p['ix'], p['iy']
ts_value = tsmap['ts'].data[xy[1], xy[0]]
posfit = fit_error_ellipse(tsmap['ts'], xy=xy, dpix=2,
zmin=max(zmin, -ts_value * 0.5))
offset = posfit['skydir'].separation(self.roi[name].skydir).deg
if posfit['fit_success'] and posfit['fit_inbounds']:
peak_best = p
break
if peak_best is None:
ts_value = np.max(tsmap['ts'].data)
posfit = fit_error_ellipse(tsmap['ts'], dpix=2,
zmin=max(zmin, -ts_value * 0.5))
o.update(posfit)
pix = posfit['skydir'].to_pixel(self.geom.wcs)
o['xpix'] = float(pix[0])
o['ypix'] = float(pix[1])
o['skydir'] = posfit['skydir'].transform_to('icrs')
o['pos_offset'] = posfit['skydir'].separation(
self.roi[name].skydir).deg
o['loglike'] = 0.5 * posfit['zoffset']
o['tsmap'] = tsmap['ts']
return o
|
Localize a source from its TS map.
|
entailment
|
def make_nfs_path(path):
"""Make a nfs version of a file path.
This just puts /nfs at the beginning instead of /gpfs"""
if os.path.isabs(path):
fullpath = path
else:
fullpath = os.path.abspath(path)
if len(fullpath) < 6:
return fullpath
if fullpath[0:6] == '/gpfs/':
fullpath = fullpath.replace('/gpfs/', '/nfs/')
return fullpath
|
Make a nfs version of a file path.
This just puts /nfs at the beginning instead of /gpfs
|
entailment
|
def make_gpfs_path(path):
"""Make a gpfs version of a file path.
This just puts /gpfs at the beginning instead of /nfs"""
if os.path.isabs(path):
fullpath = os.path.abspath(path)
else:
fullpath = os.path.abspath(path)
if len(fullpath) < 5:
return fullpath
if fullpath[0:5] == '/nfs/':
fullpath = fullpath.replace('/nfs/', '/gpfs/')
return fullpath
|
Make a gpfs version of a file path.
This just puts /gpfs at the beginning instead of /nfs
|
entailment
|
def get_lsf_status():
"""Count and print the number of jobs in various LSF states
"""
status_count = {'RUN': 0,
'PEND': 0,
'SUSP': 0,
'USUSP': 0,
'NJOB': 0,
'UNKNWN': 0}
try:
subproc = subprocess.Popen(['bjobs'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subproc.stderr.close()
output = subproc.stdout.readlines()
except OSError:
return status_count
for line in output[1:]:
line = line.strip().split()
# Protect against format of multiproc jobs
if len(line) < 5:
continue
status_count['NJOB'] += 1
for k in status_count:
if line[2] == k:
status_count[k] += 1
return status_count
|
Count and print the number of jobs in various LSF states
|
entailment
|
def build_bsub_command(command_template, lsf_args):
"""Build and return a lsf batch command template
The structure will be 'bsub -s <key> <value> <command_template>'
where <key> and <value> refer to items in lsf_args
"""
if command_template is None:
return ""
full_command = 'bsub -o {logfile}'
for key, value in lsf_args.items():
full_command += ' -%s' % key
if value is not None:
full_command += ' %s' % value
full_command += ' %s' % command_template
return full_command
|
Build and return a lsf batch command template
The structure will be 'bsub -s <key> <value> <command_template>'
where <key> and <value> refer to items in lsf_args
|
entailment
|
def get_slac_default_args(job_time=1500):
""" Create a batch job interface object.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
"""
slac_default_args = dict(lsf_args={'W': job_time,
'R': '\"select[rhel60&&!fell]\"'},
max_jobs=500,
time_per_cycle=15,
jobs_per_cycle=20,
max_job_age=90,
no_batch=False)
return slac_default_args.copy()
|
Create a batch job interface object.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
|
entailment
|
def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):
"""Send a single job to the LSF batch
Parameters
----------
link : `fermipy.jobs.chain.Link`
The link used to invoke the command we are running
key : str
A string that identifies this particular instance of the job
job_config : dict
A dictionrary with the arguments for the job. Used with
the self._command_template job template
logfile : str
The logfile for this job, may be used to check for success/ failure
"""
full_sub_dict = job_config.copy()
if self._no_batch:
full_command = "%s >& %s" % (
link.command_template().format(**full_sub_dict), logfile)
else:
full_sub_dict['logfile'] = logfile
full_command_template = build_bsub_command(
link.command_template(), self._lsf_args)
full_command = full_command_template.format(**full_sub_dict)
logdir = os.path.dirname(logfile)
print_bsub = True
if self._dry_run:
if print_bsub:
stream.write("%s\n" % full_command)
return 0
try:
os.makedirs(logdir)
except OSError:
pass
proc = subprocess.Popen(full_command.split(),
stderr=stream,
stdout=stream)
proc.communicate()
return proc.returncode
|
Send a single job to the LSF batch
Parameters
----------
link : `fermipy.jobs.chain.Link`
The link used to invoke the command we are running
key : str
A string that identifies this particular instance of the job
job_config : dict
A dictionrary with the arguments for the job. Used with
the self._command_template job template
logfile : str
The logfile for this job, may be used to check for success/ failure
|
entailment
|
def submit_jobs(self, link, job_dict=None, job_archive=None, stream=sys.stdout):
"""Submit all the jobs in job_dict """
if link is None:
return JobStatus.no_job
if job_dict is None:
job_keys = link.jobs.keys()
else:
job_keys = sorted(job_dict.keys())
# copy & reverse the keys b/c we will be popping item off the back of
# the list
unsubmitted_jobs = job_keys
unsubmitted_jobs.reverse()
failed = False
if unsubmitted_jobs:
if stream != sys.stdout:
sys.stdout.write('Submitting jobs (%i): ' %
len(unsubmitted_jobs))
sys.stdout.flush()
while unsubmitted_jobs:
status = get_lsf_status()
njob_to_submit = min(self._max_jobs - status['NJOB'],
self._jobs_per_cycle,
len(unsubmitted_jobs))
if self._dry_run:
njob_to_submit = len(unsubmitted_jobs)
for i in range(njob_to_submit):
job_key = unsubmitted_jobs.pop()
# job_details = job_dict[job_key]
job_details = link.jobs[job_key]
job_config = job_details.job_config
if job_details.status == JobStatus.failed:
clean_job(job_details.logfile, {}, self._dry_run)
# clean_job(job_details.logfile,
# job_details.outfiles, self.args['dry_run'])
job_config['logfile'] = job_details.logfile
new_job_details = self.dispatch_job(
link, job_key, job_archive, stream)
if new_job_details.status == JobStatus.failed:
failed = True
clean_job(new_job_details.logfile,
new_job_details.outfiles, self._dry_run)
link.jobs[job_key] = new_job_details
if unsubmitted_jobs:
if stream != sys.stdout:
sys.stdout.write('.')
sys.stdout.flush()
stream.write('Sleeping %.0f seconds between submission cycles\n' %
self._time_per_cycle)
time.sleep(self._time_per_cycle)
if failed:
return JobStatus.failed
if stream != sys.stdout:
sys.stdout.write('!\n')
return JobStatus.done
|
Submit all the jobs in job_dict
|
entailment
|
def create_sc_table(scfile, colnames=None):
"""Load an FT2 file from a file or list of files."""
if utils.is_fits_file(scfile) and colnames is None:
return create_table_from_fits(scfile, 'SC_DATA')
if utils.is_fits_file(scfile):
files = [scfile]
else:
files = [line.strip() for line in open(scfile, 'r')]
tables = [create_table_from_fits(f, 'SC_DATA', colnames)
for f in files]
return vstack(tables)
|
Load an FT2 file from a file or list of files.
|
entailment
|
def create_table_from_fits(fitsfile, hduname, colnames=None):
"""Memory efficient function for loading a table from a FITS
file."""
if colnames is None:
return Table.read(fitsfile, hduname)
cols = []
with fits.open(fitsfile, memmap=True) as h:
for k in colnames:
data = h[hduname].data.field(k)
cols += [Column(name=k, data=data)]
return Table(cols)
|
Memory efficient function for loading a table from a FITS
file.
|
entailment
|
def get_spectral_index(src, egy):
"""Compute the local spectral index of a source."""
delta = 1E-5
f0 = src.spectrum()(pyLike.dArg(egy * (1 - delta)))
f1 = src.spectrum()(pyLike.dArg(egy * (1 + delta)))
if f0 > 0 and f1 > 0:
gamma = np.log10(f0 / f1) / np.log10((1 - delta) / (1 + delta))
else:
gamma = np.nan
return gamma
|
Compute the local spectral index of a source.
|
entailment
|
def create(cls, infile, config=None, params=None, mask=None):
"""Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
"""
infile = os.path.abspath(infile)
roi_file, roi_data = utils.load_data(infile)
if config is None:
config = roi_data['config']
validate = False
else:
validate = True
gta = cls(config, validate=validate)
gta.setup(init_sources=False)
gta.load_roi(infile, params=params, mask=mask)
return gta
|
Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
|
entailment
|
def clone(self, config, **kwargs):
"""Make a clone of this analysis instance."""
gta = GTAnalysis(config, **kwargs)
gta._roi = copy.deepcopy(self.roi)
return gta
|
Make a clone of this analysis instance.
|
entailment
|
def set_random_seed(self, seed):
"""Set the seed for the random number generator"""
self.config['mc']['seed'] = seed
np.random.seed(seed)
|
Set the seed for the random number generator
|
entailment
|
def reload_source(self, name, init_source=True):
"""Delete and reload a source in the model. This will update
the spatial model of this source to the one defined in the XML
model."""
for c in self.components:
c.reload_source(name)
if init_source:
self._init_source(name)
self.like.model = self.like.components[0].model
|
Delete and reload a source in the model. This will update
the spatial model of this source to the one defined in the XML
model.
|
entailment
|
def set_source_morphology(self, name, **kwargs):
"""Set the spatial model of a source.
Parameters
----------
name : str
Source name.
spatial_model : str
Spatial model name (PointSource, RadialGaussian, etc.).
spatial_pars : dict
Dictionary of spatial parameters (optional).
use_cache : bool
Generate the spatial model by interpolating the cached source
map.
use_pylike : bool
"""
name = self.roi.get_source_by_name(name).name
src = self.roi[name]
spatial_model = kwargs.get('spatial_model', src['SpatialModel'])
spatial_pars = kwargs.get('spatial_pars', {})
use_pylike = kwargs.get('use_pylike', True)
psf_scale_fn = kwargs.get('psf_scale_fn', None)
update_source = kwargs.get('update_source', False)
if hasattr(pyLike.BinnedLikelihood, 'setSourceMapImage') and not use_pylike:
src.set_spatial_model(spatial_model, spatial_pars)
self._update_srcmap(src.name, src, psf_scale_fn=psf_scale_fn)
else:
src = self.delete_source(name, loglevel=logging.DEBUG,
save_template=False)
src.set_spatial_model(spatial_model, spatial_pars)
self.add_source(src.name, src, init_source=False,
use_pylike=use_pylike, loglevel=logging.DEBUG)
if update_source:
self.update_source(name)
|
Set the spatial model of a source.
Parameters
----------
name : str
Source name.
spatial_model : str
Spatial model name (PointSource, RadialGaussian, etc.).
spatial_pars : dict
Dictionary of spatial parameters (optional).
use_cache : bool
Generate the spatial model by interpolating the cached source
map.
use_pylike : bool
|
entailment
|
def set_source_spectrum(self, name, spectrum_type='PowerLaw',
spectrum_pars=None, update_source=True):
"""Set the spectral model of a source. This function can be
used to change the spectral type of a source or modify its
spectral parameters. If called with
spectrum_type='FileFunction' and spectrum_pars=None, the
source spectrum will be replaced with a FileFunction with the
same differential flux distribution as the original spectrum.
Parameters
----------
name : str
Source name.
spectrum_type : str
Spectrum type (PowerLaw, etc.).
spectrum_pars : dict
Dictionary of spectral parameters (optional).
update_source : bool
Recompute all source characteristics (flux, TS, NPred)
using the new spectral model of the source.
"""
name = self.roi.get_source_by_name(name).name
src = self.roi[name]
spectrum_pars = {} if spectrum_pars is None else spectrum_pars
if (self.roi[name]['SpectrumType'] == 'PowerLaw' and
spectrum_type == 'LogParabola'):
spectrum_pars.setdefault('beta', {'value': 0.0, 'scale': 1.0,
'min': 0.0, 'max': 1.0})
spectrum_pars.setdefault('Eb', src.spectral_pars['Scale'])
spectrum_pars.setdefault('norm', src.spectral_pars['Prefactor'])
if 'alpha' not in spectrum_pars:
spectrum_pars['alpha'] = src.spectral_pars['Index']
spectrum_pars['alpha']['value'] *= -1.0
if spectrum_pars['alpha']['scale'] == -1.0:
spectrum_pars['alpha']['value'] *= -1.0
spectrum_pars['alpha']['scale'] *= -1.0
if spectrum_type == 'FileFunction':
self._create_filefunction(name, spectrum_pars)
else:
fn = gtutils.create_spectrum_from_dict(spectrum_type,
spectrum_pars)
self.like.setSpectrum(str(name), fn)
# Get parameters
src = self.components[0].like.logLike.getSource(str(name))
pars_dict = gtutils.get_function_pars_dict(src.spectrum())
self.roi[name]['SpectrumType'] = spectrum_type
self.roi[name].set_spectral_pars(pars_dict)
for c in self.components:
c.roi[name]['SpectrumType'] = spectrum_type
c.roi[name].set_spectral_pars(pars_dict)
if update_source:
self.update_source(name)
|
Set the spectral model of a source. This function can be
used to change the spectral type of a source or modify its
spectral parameters. If called with
spectrum_type='FileFunction' and spectrum_pars=None, the
source spectrum will be replaced with a FileFunction with the
same differential flux distribution as the original spectrum.
Parameters
----------
name : str
Source name.
spectrum_type : str
Spectrum type (PowerLaw, etc.).
spectrum_pars : dict
Dictionary of spectral parameters (optional).
update_source : bool
Recompute all source characteristics (flux, TS, NPred)
using the new spectral model of the source.
|
entailment
|
def set_source_dnde(self, name, dnde, update_source=True):
"""Set the differential flux distribution of a source with the
FileFunction spectral type.
Parameters
----------
name : str
Source name.
dnde : `~numpy.ndarray`
Array of differential flux values (cm^{-2} s^{-1} MeV^{-1}).
"""
name = self.roi.get_source_by_name(name).name
if self.roi[name]['SpectrumType'] != 'FileFunction':
msg = 'Wrong spectral type: %s' % self.roi[name]['SpectrumType']
self.logger.error(msg)
raise Exception(msg)
xy = self.get_source_dnde(name)
if len(dnde) != len(xy[0]):
msg = 'Wrong length for dnde array: %i' % len(dnde)
self.logger.error(msg)
raise Exception(msg)
for c in self.components:
src = c.like.logLike.getSource(str(name))
spectrum = src.spectrum()
file_function = pyLike.FileFunction_cast(spectrum)
file_function.setSpectrum(10**xy[0], dnde)
if update_source:
self.update_source(name)
|
Set the differential flux distribution of a source with the
FileFunction spectral type.
Parameters
----------
name : str
Source name.
dnde : `~numpy.ndarray`
Array of differential flux values (cm^{-2} s^{-1} MeV^{-1}).
|
entailment
|
def get_source_dnde(self, name):
"""Return differential flux distribution of a source. For
sources with FileFunction spectral type this returns the
internal differential flux array.
Returns
-------
loge : `~numpy.ndarray`
Array of energies at which the differential flux is
evaluated (log10(E/MeV)).
dnde : `~numpy.ndarray`
Array of differential flux values (cm^{-2} s^{-1} MeV^{-1})
evaluated at energies in ``loge``.
"""
name = self.roi.get_source_by_name(name).name
if self.roi[name]['SpectrumType'] != 'FileFunction':
src = self.components[0].like.logLike.getSource(str(name))
spectrum = src.spectrum()
file_function = pyLike.FileFunction_cast(spectrum)
loge = file_function.log_energy()
logdnde = file_function.log_dnde()
loge = np.log10(np.exp(loge))
dnde = np.exp(logdnde)
return loge, dnde
else:
ebinsz = (self.log_energies[-1] -
self.log_energies[0]) / self.enumbins
loge = utils.extend_array(self.log_energies, ebinsz, 0.5, 6.5)
dnde = np.array([self.like[name].spectrum()(pyLike.dArg(10 ** egy))
for egy in loge])
return loge, dnde
|
Return differential flux distribution of a source. For
sources with FileFunction spectral type this returns the
internal differential flux array.
Returns
-------
loge : `~numpy.ndarray`
Array of energies at which the differential flux is
evaluated (log10(E/MeV)).
dnde : `~numpy.ndarray`
Array of differential flux values (cm^{-2} s^{-1} MeV^{-1})
evaluated at energies in ``loge``.
|
entailment
|
def _create_filefunction(self, name, spectrum_pars):
"""Replace the spectrum of an existing source with a
FileFunction."""
spectrum_pars = {} if spectrum_pars is None else spectrum_pars
if 'loge' in spectrum_pars:
loge = spectrum_pars.get('loge')
else:
ebinsz = (self.log_energies[-1] -
self.log_energies[0]) / self.enumbins
loge = utils.extend_array(self.log_energies, ebinsz, 0.5, 6.5)
# Get the values
dnde = np.zeros(len(loge))
if 'dnde' in spectrum_pars:
dnde = spectrum_pars.get('dnde')
else:
dnde = np.array([self.like[name].spectrum()(pyLike.dArg(10 ** egy))
for egy in loge])
filename = \
os.path.join(self.workdir,
'%s_filespectrum.txt' % (name.lower().replace(' ', '_')))
# Create file spectrum txt file
np.savetxt(filename, np.vstack((10**loge, dnde)).T)
self.like.setSpectrum(name, str('FileFunction'))
self.roi[name]['Spectrum_Filename'] = filename
# Update
for c in self.components:
src = c.like.logLike.getSource(str(name))
spectrum = src.spectrum()
spectrum.getParam(str('Normalization')).setBounds(1E-3, 1E3)
file_function = pyLike.FileFunction_cast(spectrum)
file_function.readFunction(str(filename))
c.roi[name]['Spectrum_Filename'] = filename
|
Replace the spectrum of an existing source with a
FileFunction.
|
entailment
|
def stage_output(self):
"""Copy data products to final output directory."""
if self.workdir == self.outdir:
return
elif not os.path.isdir(self.workdir):
self.logger.error('Working directory does not exist.')
return
regex = self.config['fileio']['outdir_regex']
savefits = self.config['fileio']['savefits']
files = os.listdir(self.workdir)
self.logger.info('Staging files to %s', self.outdir)
fitsfiles = []
for c in self.components:
for f in c.files.values():
if f is None:
continue
fitsfiles += [os.path.basename(f)]
for f in files:
wpath = os.path.join(self.workdir, f)
opath = os.path.join(self.outdir, f)
if not utils.match_regex_list(regex, os.path.basename(f)):
continue
if os.path.isfile(opath) and filecmp.cmp(wpath, opath, False):
continue
if not savefits and f in fitsfiles:
continue
self.logger.debug('Copying ' + f)
self.logger.info('Copying ' + f)
shutil.copy(wpath, self.outdir)
self.logger.info('Finished.')
|
Copy data products to final output directory.
|
entailment
|
def stage_input(self):
"""Copy input files to working directory."""
if self.workdir == self.outdir:
return
elif not os.path.isdir(self.workdir):
self.logger.error('Working directory does not exist.')
return
self.logger.info('Staging files to %s', self.workdir)
files = [os.path.join(self.outdir, f)
for f in os.listdir(self.outdir)]
regex = copy.deepcopy(self.config['fileio']['workdir_regex'])
for f in files:
if not os.path.isfile(f):
continue
if not utils.match_regex_list(regex, os.path.basename(f)):
continue
self.logger.debug('Copying ' + os.path.basename(f))
shutil.copy(f, self.workdir)
for c in self.components:
for f in c.files.values():
if f is None:
continue
wpath = os.path.join(self.workdir, os.path.basename(f))
opath = os.path.join(self.outdir, os.path.basename(f))
if os.path.isfile(wpath):
continue
elif os.path.isfile(opath):
self.logger.debug('Copying ' + os.path.basename(f))
shutil.copy(opath, self.workdir)
self.logger.info('Finished.')
|
Copy input files to working directory.
|
entailment
|
def setup(self, init_sources=True, overwrite=False, **kwargs):
"""Run pre-processing for each analysis component and
construct a joint likelihood object. This function performs
the following tasks: data selection (gtselect, gtmktime),
data binning (gtbin), and model generation (gtexpcube2,gtsrcmaps).
Parameters
----------
init_sources : bool
Choose whether to compute properties (flux, TS, etc.) for
individual sources.
overwrite : bool
Run all pre-processing steps even if the output file of
that step is present in the working directory. By default
this function will skip any steps for which the output file
already exists.
"""
loglevel = kwargs.get('loglevel', self.loglevel)
self.logger.log(loglevel, 'Running setup.')
# Make spatial maps for extended sources
for s in self.roi.sources:
if s.diffuse:
continue
if not s.extended:
continue
self.make_template(s)
# Run setup for each component
for i, c in enumerate(self.components):
c.setup(overwrite=overwrite)
# Create likelihood
self._create_likelihood()
# Determine tmin, tmax
for i, c in enumerate(self._components):
self._tmin = (c.tmin if self._tmin is None
else min(self._tmin, c.tmin))
self._tmax = (c.tmax if self._tmax is None
else min(self._tmax, c.tmax))
if init_sources:
self.logger.log(loglevel, 'Initializing source properties')
for name in self.like.sourceNames():
self.logger.debug('Initializing source %s', name)
self._init_source(name)
self._update_roi()
self.logger.log(loglevel, 'Finished setup.')
|
Run pre-processing for each analysis component and
construct a joint likelihood object. This function performs
the following tasks: data selection (gtselect, gtmktime),
data binning (gtbin), and model generation (gtexpcube2,gtsrcmaps).
Parameters
----------
init_sources : bool
Choose whether to compute properties (flux, TS, etc.) for
individual sources.
overwrite : bool
Run all pre-processing steps even if the output file of
that step is present in the working directory. By default
this function will skip any steps for which the output file
already exists.
|
entailment
|
def _create_likelihood(self, srcmdl=None):
"""Instantiate the likelihood object for each component and
create a SummedLikelihood."""
self._like = SummedLikelihood()
for c in self.components:
c._create_binned_analysis(srcmdl)
self._like.addComponent(c.like)
self.like.model = self.like.components[0].model
self._fitcache = None
self._init_roi_model()
|
Instantiate the likelihood object for each component and
create a SummedLikelihood.
|
entailment
|
def generate_model(self, model_name=None):
"""Generate model maps for all components. model_name should
be a unique identifier for the model. If model_name is None
then the model maps will be generated using the current
parameters of the ROI."""
for i, c in enumerate(self._components):
c.generate_model(model_name=model_name)
|
Generate model maps for all components. model_name should
be a unique identifier for the model. If model_name is None
then the model maps will be generated using the current
parameters of the ROI.
|
entailment
|
def set_energy_range(self, logemin, logemax):
"""Set the energy bounds of the analysis. This restricts the
evaluation of the likelihood to the data that falls in this
range. Input values will be rounded to the closest bin edge
value. If either argument is None then the lower or upper
bound of the analysis instance will be used.
Parameters
----------
logemin : float
Lower energy bound in log10(E/MeV).
logemax : float
Upper energy bound in log10(E/MeV).
Returns
-------
eminmax : array
Minimum and maximum energy in log10(E/MeV).
"""
if logemin is None:
logemin = self.log_energies[0]
else:
imin = int(utils.val_to_edge(self.log_energies, logemin)[0])
logemin = self.log_energies[imin]
if logemax is None:
logemax = self.log_energies[-1]
else:
imax = int(utils.val_to_edge(self.log_energies, logemax)[0])
logemax = self.log_energies[imax]
self._loge_bounds = np.array([logemin, logemax])
self._roi_data['loge_bounds'] = np.copy(self.loge_bounds)
for c in self.components:
c.set_energy_range(logemin, logemax)
return self._loge_bounds
|
Set the energy bounds of the analysis. This restricts the
evaluation of the likelihood to the data that falls in this
range. Input values will be rounded to the closest bin edge
value. If either argument is None then the lower or upper
bound of the analysis instance will be used.
Parameters
----------
logemin : float
Lower energy bound in log10(E/MeV).
logemax : float
Upper energy bound in log10(E/MeV).
Returns
-------
eminmax : array
Minimum and maximum energy in log10(E/MeV).
|
entailment
|
def model_counts_map(self, name=None, exclude=None, use_mask=False):
"""Return the model counts map for a single source, a list of
sources, or for the sum of all sources in the ROI. The
exclude parameter can be used to exclude one or more
components when generating the model map.
Parameters
----------
name : str or list of str
Parameter controlling the set of sources for which the
model counts map will be calculated. If name=None the
model map will be generated for all sources in the ROI.
exclude : str or list of str
List of sources that will be excluded when calculating the
model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~gammapy.maps.Map`
"""
maps = [c.model_counts_map(name, exclude, use_mask=use_mask)
for c in self.components]
return skymap.coadd_maps(self.geom, maps)
|
Return the model counts map for a single source, a list of
sources, or for the sum of all sources in the ROI. The
exclude parameter can be used to exclude one or more
components when generating the model map.
Parameters
----------
name : str or list of str
Parameter controlling the set of sources for which the
model counts map will be calculated. If name=None the
model map will be generated for all sources in the ROI.
exclude : str or list of str
List of sources that will be excluded when calculating the
model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~gammapy.maps.Map`
|
entailment
|
def model_counts_spectrum(self, name, logemin=None, logemax=None,
summed=False, weighted=False):
"""Return the predicted number of model counts versus energy
for a given source and energy range. If summed=True return
the counts spectrum summed over all components otherwise
return a list of model spectra. If weighted=True return
the weighted version of the counts spectrum
"""
if logemin is None:
logemin = self.log_energies[0]
if logemax is None:
logemax = self.log_energies[-1]
if summed:
cs = np.zeros(self.enumbins)
imin = utils.val_to_bin_bounded(self.log_energies,
logemin + 1E-7)[0]
imax = utils.val_to_bin_bounded(self.log_energies,
logemax - 1E-7)[0] + 1
for c in self.components:
ecenter = 0.5 * (c.log_energies[:-1] + c.log_energies[1:])
counts = c.model_counts_spectrum(name, self.log_energies[0],
self.log_energies[-1], weighted)
cs += np.histogram(ecenter,
weights=counts,
bins=self.log_energies)[0]
return cs[imin:imax]
else:
cs = []
for c in self.components:
cs += [c.model_counts_spectrum(name, logemin,
logemax, weighted=weighted)]
return cs
|
Return the predicted number of model counts versus energy
for a given source and energy range. If summed=True return
the counts spectrum summed over all components otherwise
return a list of model spectra. If weighted=True return
the weighted version of the counts spectrum
|
entailment
|
def get_sources(self, cuts=None, distance=None, skydir=None,
minmax_ts=None, minmax_npred=None, exclude=None,
square=False):
"""Retrieve list of sources in the ROI satisfying the given
selections.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
coordsys = self.config['binning']['coordsys']
return self.roi.get_sources(skydir, distance, cuts,
minmax_ts, minmax_npred,
exclude, square,
coordsys=coordsys)
|
Retrieve list of sources in the ROI satisfying the given
selections.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
|
entailment
|
def add_source(self, name, src_dict, free=None, init_source=True,
save_source_maps=True, use_pylike=True,
use_single_psf=False, **kwargs):
"""Add a source to the ROI model. This function may be called
either before or after `~fermipy.gtanalysis.GTAnalysis.setup`.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or source object defining the source properties
(coordinates, spectral parameters, etc.).
free : bool
Initialize the source with a free normalization parameter.
use_pylike : bool
Create source maps with pyLikelihood.
use_single_psf : bool
Use the PSF model calculated for the ROI center. If false
then a new model will be generated using the position of
the source.
"""
if self.roi.has_source(name):
msg = 'Source %s already exists.' % name
self.logger.error(msg)
raise Exception(msg)
loglevel = kwargs.pop('loglevel', self.loglevel)
self.logger.log(loglevel, 'Adding source ' + name)
src = self.roi.create_source(name, src_dict, rescale=True)
self.make_template(src)
for c in self.components:
c.add_source(name, src_dict, free=free,
save_source_maps=save_source_maps,
use_pylike=use_pylike,
use_single_psf=use_single_psf)
if self._like is None:
return
if self.config['gtlike']['edisp'] and src.name not in \
self.config['gtlike']['edisp_disable']:
self.set_edisp_flag(src.name, True)
self.like.syncSrcParams(str(name))
self.like.model = self.like.components[0].model
# if free is not None:
# self.free_norm(name, free, loglevel=logging.DEBUG)
if init_source:
self._init_source(name)
self._update_roi()
if self._fitcache is not None:
self._fitcache.update_source(name)
|
Add a source to the ROI model. This function may be called
either before or after `~fermipy.gtanalysis.GTAnalysis.setup`.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or source object defining the source properties
(coordinates, spectral parameters, etc.).
free : bool
Initialize the source with a free normalization parameter.
use_pylike : bool
Create source maps with pyLikelihood.
use_single_psf : bool
Use the PSF model calculated for the ROI center. If false
then a new model will be generated using the position of
the source.
|
entailment
|
def add_sources_from_roi(self, names, roi, free=False, **kwargs):
"""Add multiple sources to the current ROI model copied from another ROI model.
Parameters
----------
names : list
List of str source names to add.
roi : `~fermipy.roi_model.ROIModel` object
The roi model from which to add sources.
free : bool
Initialize the source with a free normalization paramter.
"""
for name in names:
self.add_source(name, roi[name].data, free=free, **kwargs)
|
Add multiple sources to the current ROI model copied from another ROI model.
Parameters
----------
names : list
List of str source names to add.
roi : `~fermipy.roi_model.ROIModel` object
The roi model from which to add sources.
free : bool
Initialize the source with a free normalization paramter.
|
entailment
|
def delete_source(self, name, save_template=True, delete_source_map=False,
build_fixed_wts=True, **kwargs):
"""Delete a source from the ROI model.
Parameters
----------
name : str
Source name.
save_template : bool
Keep the SpatialMap FITS template associated with this
source.
delete_source_map : bool
Delete the source map associated with this source from the
source maps file.
Returns
-------
src : `~fermipy.roi_model.Model`
The deleted source object.
"""
if not self.roi.has_source(name):
self.logger.error('No source with name: %s', name)
return
loglevel = kwargs.pop('loglevel', self.loglevel)
self.logger.log(loglevel, 'Deleting source %s', name)
# STs require a source to be freed before deletion
if self.like is not None:
self.free_norm(name, loglevel=logging.DEBUG)
for c in self.components:
c.delete_source(name, save_template=save_template,
delete_source_map=delete_source_map,
build_fixed_wts=build_fixed_wts)
src = self.roi.get_source_by_name(name)
self.roi.delete_sources([src])
if self.like is not None:
self.like.model = self.like.components[0].model
self._update_roi()
return src
|
Delete a source from the ROI model.
Parameters
----------
name : str
Source name.
save_template : bool
Keep the SpatialMap FITS template associated with this
source.
delete_source_map : bool
Delete the source map associated with this source from the
source maps file.
Returns
-------
src : `~fermipy.roi_model.Model`
The deleted source object.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.