sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def main():
gtselect_keys = ['tmin', 'tmax', 'emin', 'emax', 'zmax', 'evtype', 'evclass',
'phasemin', 'phasemax', 'convtype', 'rad', 'ra', 'dec']
gtmktime_keys = ['roicut', 'filter']
usage = "usage: %(prog)s [options] "
description = "Run gtselect and gtmktime on one or more FT1 files. "
"Note that gtmktime will be skipped if no FT2 file is provided."
parser = argparse.ArgumentParser(usage=usage, description=description)
add_lsf_args(parser)
for k in gtselect_keys:
if k in ['evtype', 'evclass', 'convtype']:
parser.add_argument('--%s' % k, default=None, type=int, help='')
else:
parser.add_argument('--%s' % k, default=None, type=float, help='')
for k in gtmktime_keys:
parser.add_argument('--%s' % k, default=None, type=str, help='')
parser.add_argument('--rock_angle', default=None, type=float, help='')
parser.add_argument('--outdir', default=None, type=str,
help='Path to output directory used when merge=False.')
parser.add_argument('--output', default=None, type=str,
help='Path to output file used when merge=True.')
parser.add_argument('--scfile', default=None, type=str, help='')
parser.add_argument('--dry_run', default=False, action='store_true')
parser.add_argument('--overwrite', default=False, action='store_true')
parser.add_argument('--merge', default=False, action='store_true',
help='Merge input FT1 files into N files where N is determined '
'by files_per_split.')
parser.add_argument('--files_per_split', default=100,
type=int, help='Set the number of files to combine in each '
'split of the input file list.')
parser.add_argument('--file_idx_min', default=None,
type=int, help='Set the number of files to assign to '
'each batch job.')
parser.add_argument('--file_idx_max', default=None,
type=int, help='Set the number of files to assign to '
'each batch job.')
parser.add_argument('files', nargs='+', default=None,
help='List of files.')
args = parser.parse_args()
batch = vars(args).pop('batch')
files = vars(args).pop('files')
args.outdir = os.path.abspath(args.outdir)
files = [os.path.abspath(f) for f in files]
ft1_files = get_files(files, ['.fit', '.fits'])
for i, f in enumerate(ft1_files):
if re.search('^root\:\/\/', f) is None:
ft1_files[i] = os.path.abspath(f)
input_files = []
output_files = []
files_idx_min = []
files_idx_max = []
opts = []
if args.file_idx_min is not None and args.file_idx_max is not None:
files_idx_min = [args.file_idx_min]
files_idx_max = [args.file_idx_max]
input_files = [files]
output_files = [args.output]
elif args.merge:
if not args.output:
raise Exception('No output file defined.')
nfiles = len(ft1_files)
njob = int(np.ceil(nfiles / float(args.files_per_split)))
for ijob, i in enumerate(range(0, nfiles, args.files_per_split)):
if args.outdir is not None:
mkdir(args.outdir)
outdir = os.path.abspath(args.outdir)
else:
outdir = os.path.dirname(os.path.dirname(args.output))
outfile = os.path.splitext(os.path.basename(args.output))[0]
outfile += '_%03i.fits' % (ijob)
outfile = os.path.join(outdir, outfile)
input_files += [files]
output_files += [outfile]
files_idx_min += [i]
files_idx_max += [i + args.files_per_split]
opts += [vars(args).copy()]
opts[-1]['output'] = outfile
opts[-1]['file_idx_min'] = i
opts[-1]['file_idx_max'] = i + args.files_per_split
else:
input_files = ft1_files
files_idx_min = [i for i in range(len(ft1_files))]
files_idx_max = [i + 1 for i in range(len(ft1_files))]
output_files = [os.path.join(
args.outdir, os.path.basename(x)) for x in ft1_files]
opts = [vars(args).copy() for x in ft1_files]
if batch:
submit_jobs('fermipy-select',
input_files, opts, output_files, overwrite=args.overwrite,
dry_run=args.dry_run)
sys.exit(0)
logger = Logger.configure(os.path.basename(__file__), None, logging.INFO)
logger.info('Starting.')
if args.scfile is not None:
args.scfile = os.path.abspath(args.scfile)
cwd = os.getcwd()
user = os.environ['USER']
tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch')
os.chdir(tmpdir)
logger.info('tmpdir %s', tmpdir)
logger.info('outdir %s', args.outdir)
logger.info('output %s', args.output)
for infiles, outfile, idx_min, idx_max in zip(input_files, output_files,
files_idx_min, files_idx_max):
logger.info('infiles %s', pprint.pformat(infiles))
logger.info('outfile %s', outfile)
infiles = get_files(infiles, ['.fit', '.fits'])
if idx_min is not None:
infiles = infiles[idx_min:idx_max]
for i, f in enumerate(infiles):
if re.search('^root\:\/\/', f) is None:
continue
os.system('xrdcp %s %s' % (f, f.split('/')[-1]))
infiles[i] = os.path.join(tmpdir, f.split('/')[-1])
kw = {k: args.__dict__[k] for k in gtselect_keys}
if kw['emax'] is None:
kw['emax'] = 1E6
create_filelist(infiles, 'list.txt')
kw['infile'] = 'list.txt'
kw['outfile'] = 'out.fits'
staged_outfile = kw['outfile']
run_gtapp('gtselect', logger, kw)
kw = {k: args.__dict__[k] for k in gtmktime_keys}
if kw['roicut'] is None:
kw['roicut'] = 'no'
if kw['filter'] is None:
kw['filter'] = 'DATA_QUAL==1 && LAT_CONFIG==1'
if args.rock_angle is not None:
kw['filter'] += ' && ABS(ROCK_ANGLE)<%(rock)s ' % dict(
rock=args.rock_angle)
kw['evfile'] = 'out.fits'
kw['outfile'] = 'out_filtered.fits'
if args.scfile is not None:
kw['scfile'] = args.scfile
staged_outfile = kw['outfile']
run_gtapp('gtmktime', logger, kw)
logger.info('cp %s %s', staged_outfile, outfile)
shutil.copy(staged_outfile, outfile)
os.chdir(cwd)
logger.info('Deleting %s', tmpdir)
shutil.rmtree(tmpdir)
logger.info('Done.')
|
Note that gtmktime will be skipped if no FT2 file is provided.
|
entailment
|
def select_extended(cat_table):
"""Select only rows representing extended sources from a catalog table
"""
try:
l = [len(row.strip()) > 0 for row in cat_table['Extended_Source_Name'].data]
return np.array(l, bool)
except KeyError:
return cat_table['Extended']
|
Select only rows representing extended sources from a catalog table
|
entailment
|
def make_mask(cat_table, cut):
"""Mask a bit mask selecting the rows that pass a selection
"""
cut_var = cut['cut_var']
min_val = cut.get('min_val', None)
max_val = cut.get('max_val', None)
nsrc = len(cat_table)
if min_val is None:
min_mask = np.ones((nsrc), bool)
else:
min_mask = cat_table[cut_var] >= min_val
if max_val is None:
max_mask = np.ones((nsrc), bool)
else:
max_mask = cat_table[cut_var] <= max_val
full_mask = min_mask * max_mask
return full_mask
|
Mask a bit mask selecting the rows that pass a selection
|
entailment
|
def select_sources(cat_table, cuts):
"""Select only rows passing a set of cuts from catalog table
"""
nsrc = len(cat_table)
full_mask = np.ones((nsrc), bool)
for cut in cuts:
if cut == 'mask_extended':
full_mask *= mask_extended(cat_table)
elif cut == 'select_extended':
full_mask *= select_extended(cat_table)
else:
full_mask *= make_mask(cat_table, cut)
lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]
return lout
|
Select only rows passing a set of cuts from catalog table
|
entailment
|
def make_catalog_comp_dict(**kwargs):
"""Build and return the information about the catalog components
"""
library_yamlfile = kwargs.pop('library', 'models/library.yaml')
csm = kwargs.pop('CatalogSourceManager', CatalogSourceManager(**kwargs))
if library_yamlfile is None or library_yamlfile == 'None':
yamldict = {}
else:
yamldict = yaml.safe_load(open(library_yamlfile))
catalog_info_dict, comp_info_dict = csm.make_catalog_comp_info_dict(yamldict)
return dict(catalog_info_dict=catalog_info_dict,
comp_info_dict=comp_info_dict,
CatalogSourceManager=csm)
|
Build and return the information about the catalog components
|
entailment
|
def read_catalog_info_yaml(self, splitkey):
""" Read the yaml file for a particular split key
"""
catalog_info_yaml = self._name_factory.catalog_split_yaml(sourcekey=splitkey,
fullpath=True)
yaml_dict = yaml.safe_load(open(catalog_info_yaml))
# resolve env vars
yaml_dict['catalog_file'] = os.path.expandvars(yaml_dict['catalog_file'])
yaml_dict['catalog_extdir'] = os.path.expandvars(yaml_dict['catalog_extdir'])
return yaml_dict
|
Read the yaml file for a particular split key
|
entailment
|
def build_catalog_info(self, catalog_info):
""" Build a CatalogInfo object """
cat = SourceFactory.build_catalog(**catalog_info)
catalog_info['catalog'] = cat
# catalog_info['catalog_table'] =
# Table.read(catalog_info['catalog_file'])
catalog_info['catalog_table'] = cat.table
catalog_info['roi_model'] =\
SourceFactory.make_fermipy_roi_model_from_catalogs([cat])
catalog_info['srcmdl_name'] =\
self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name'])
return CatalogInfo(**catalog_info)
|
Build a CatalogInfo object
|
entailment
|
def catalog_components(self, catalog_name, split_ver):
""" Return the set of merged components for a particular split key """
return sorted(self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)].keys())
|
Return the set of merged components for a particular split key
|
entailment
|
def split_comp_info(self, catalog_name, split_ver, split_key):
""" Return the info for a particular split key """
return self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)][split_key]
|
Return the info for a particular split key
|
entailment
|
def make_catalog_comp_info(self, full_cat_info, split_key, rule_key, rule_val, sources):
""" Make the information about a single merged component
Parameters
----------
full_cat_info : `_model_component.CatalogInfo`
Information about the full catalog
split_key : str
Key identifying the version of the spliting used
rule_key : str
Key identifying the specific rule for this component
rule_val : list
List of the cuts used to define this component
sources : list
List of the names of the sources in this component
Returns `CompositeSourceInfo` or `CatalogSourcesInfo`
"""
merge = rule_val.get('merge', True)
sourcekey = "%s_%s_%s" % (
full_cat_info.catalog_name, split_key, rule_key)
srcmdl_name = self._name_factory.srcmdl_xml(sourcekey=sourcekey)
srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name)
kwargs = dict(source_name="%s_%s" % (full_cat_info.catalog_name, rule_key),
source_ver=split_key,
sourcekey=sourcekey,
srcmdl_name=srcmdl_name,
source_names=sources,
catalog_info=full_cat_info,
roi_model=SourceFactory.copy_selected_sources(full_cat_info.roi_model,
sources))
if merge:
return CompositeSourceInfo(**kwargs)
return CatalogSourcesInfo(**kwargs)
|
Make the information about a single merged component
Parameters
----------
full_cat_info : `_model_component.CatalogInfo`
Information about the full catalog
split_key : str
Key identifying the version of the spliting used
rule_key : str
Key identifying the specific rule for this component
rule_val : list
List of the cuts used to define this component
sources : list
List of the names of the sources in this component
Returns `CompositeSourceInfo` or `CatalogSourcesInfo`
|
entailment
|
def make_catalog_comp_info_dict(self, catalog_sources):
""" Make the information about the catalog components
Parameters
----------
catalog_sources : dict
Dictionary with catalog source defintions
Returns
-------
catalog_ret_dict : dict
Dictionary mapping catalog_name to `model_component.CatalogInfo`
split_ret_dict : dict
Dictionary mapping sourcekey to `model_component.ModelComponentInfo`
"""
catalog_ret_dict = {}
split_ret_dict = {}
for key, value in catalog_sources.items():
if value is None:
continue
if value['model_type'] != 'catalog':
continue
versions = value['versions']
for version in versions:
ver_key = "%s_%s" % (key, version)
source_dict = self.read_catalog_info_yaml(ver_key)
try:
full_cat_info = catalog_ret_dict[key]
except KeyError:
full_cat_info = self.build_catalog_info(source_dict)
catalog_ret_dict[key] = full_cat_info
try:
all_sources = [x.strip() for x in full_cat_info.catalog_table[
'Source_Name'].astype(str).tolist()]
except KeyError:
print(full_cat_info.catalog_table.colnames)
used_sources = []
rules_dict = source_dict['rules_dict']
split_dict = {}
for rule_key, rule_val in rules_dict.items():
# full_key =\
# self._name_factory.merged_sourcekey(catalog=ver_key,
# rulekey=rule_key)
sources = select_sources(
full_cat_info.catalog_table, rule_val['cuts'])
used_sources.extend(sources)
split_dict[rule_key] = self.make_catalog_comp_info(
full_cat_info, version, rule_key, rule_val, sources)
# Now deal with the remainder
for source in used_sources:
try:
all_sources.remove(source)
except ValueError:
continue
rule_val = dict(cuts=[],
merge=source_dict['remainder'].get('merge', False))
split_dict['remain'] = self.make_catalog_comp_info(
full_cat_info, version, 'remain', rule_val, all_sources)
# Merge in the info for this version of splits
split_ret_dict[ver_key] = split_dict
self._catalog_comp_info_dicts.update(catalog_ret_dict)
self._split_comp_info_dicts.update(split_ret_dict)
return (catalog_ret_dict, split_ret_dict)
|
Make the information about the catalog components
Parameters
----------
catalog_sources : dict
Dictionary with catalog source defintions
Returns
-------
catalog_ret_dict : dict
Dictionary mapping catalog_name to `model_component.CatalogInfo`
split_ret_dict : dict
Dictionary mapping sourcekey to `model_component.ModelComponentInfo`
|
entailment
|
def extract_images_from_tscube(infile, outfile):
""" Extract data from table HDUs in TSCube file and convert them to FITS images
"""
inhdulist = fits.open(infile)
wcs = pywcs.WCS(inhdulist[0].header)
map_shape = inhdulist[0].data.shape
t_eng = Table.read(infile, "EBOUNDS")
t_scan = Table.read(infile, "SCANDATA")
t_fit = Table.read(infile, "FITDATA")
n_ebin = len(t_eng)
energies = np.ndarray((n_ebin + 1))
energies[0:-1] = t_eng["E_MIN"]
energies[-1] = t_eng["E_MAX"][-1]
cube_shape = (n_ebin, map_shape[1], map_shape[0])
wcs_cube = wcs_utils.wcs_add_energy_axis(wcs, energies)
outhdulist = [inhdulist[0], inhdulist["EBOUNDS"]]
FIT_COLNAMES = ['FIT_TS', 'FIT_STATUS', 'FIT_NORM',
'FIT_NORM_ERR', 'FIT_NORM_ERRP', 'FIT_NORM_ERRN']
SCAN_COLNAMES = ['TS', 'BIN_STATUS', 'NORM', 'NORM_UL',
'NORM_ERR', 'NORM_ERRP', 'NORM_ERRN', 'LOGLIKE']
for c in FIT_COLNAMES:
data = t_fit[c].data.reshape(map_shape)
hdu = fits.ImageHDU(data, wcs.to_header(), name=c)
outhdulist.append(hdu)
for c in SCAN_COLNAMES:
data = t_scan[c].data.swapaxes(0, 1).reshape(cube_shape)
hdu = fits.ImageHDU(data, wcs_cube.to_header(), name=c)
outhdulist.append(hdu)
hdulist = fits.HDUList(outhdulist)
hdulist.writeto(outfile, clobber=True)
return hdulist
|
Extract data from table HDUs in TSCube file and convert them to FITS images
|
entailment
|
def convert_tscube_old(infile, outfile):
"""Convert between old and new TSCube formats."""
inhdulist = fits.open(infile)
# If already in the new-style format just write and exit
if 'DLOGLIKE_SCAN' in inhdulist['SCANDATA'].columns.names:
if infile != outfile:
inhdulist.writeto(outfile, clobber=True)
return
# Get stuff out of the input file
nrows = inhdulist['SCANDATA']._nrows
nebins = inhdulist['EBOUNDS']._nrows
npts = inhdulist['SCANDATA'].data.field('NORMSCAN').shape[1] / nebins
emin = inhdulist['EBOUNDS'].data.field('e_min') / 1E3
emax = inhdulist['EBOUNDS'].data.field('e_max') / 1E3
eref = np.sqrt(emin * emax)
dnde_emin = inhdulist['EBOUNDS'].data.field('E_MIN_FL')
dnde_emax = inhdulist['EBOUNDS'].data.field('E_MAX_FL')
index = np.log(dnde_emin / dnde_emax) / np.log(emin / emax)
flux = PowerLaw.eval_flux(emin, emax, [dnde_emin, index], emin)
eflux = PowerLaw.eval_eflux(emin, emax, [dnde_emin, index], emin)
dnde = PowerLaw.eval_dnde(np.sqrt(emin * emax), [dnde_emin, index], emin)
ts_map = inhdulist['PRIMARY'].data.reshape((nrows))
ok_map = inhdulist['TSMAP_OK'].data.reshape((nrows))
n_map = inhdulist['N_MAP'].data.reshape((nrows))
errp_map = inhdulist['ERRP_MAP'].data.reshape((nrows))
errn_map = inhdulist['ERRN_MAP'].data.reshape((nrows))
err_map = np.ndarray((nrows))
m = errn_map > 0
err_map[m] = 0.5 * (errp_map[m] + errn_map[m])
err_map[~m] = errp_map[~m]
ul_map = n_map + 2.0 * errp_map
ncube = np.rollaxis(inhdulist['N_CUBE'].data,
0, 3).reshape((nrows, nebins))
errpcube = np.rollaxis(
inhdulist['ERRPCUBE'].data, 0, 3).reshape((nrows, nebins))
errncube = np.rollaxis(
inhdulist['ERRNCUBE'].data, 0, 3).reshape((nrows, nebins))
tscube = np.rollaxis(inhdulist['TSCUBE'].data,
0, 3).reshape((nrows, nebins))
nll_cube = np.rollaxis(
inhdulist['NLL_CUBE'].data, 0, 3).reshape((nrows, nebins))
ok_cube = np.rollaxis(
inhdulist['TSCUBE_OK'].data, 0, 3).reshape((nrows, nebins))
ul_cube = ncube + 2.0 * errpcube
m = errncube > 0
errcube = np.ndarray((nrows, nebins))
errcube[m] = 0.5 * (errpcube[m] + errncube[m])
errcube[~m] = errpcube[~m]
norm_scan = inhdulist['SCANDATA'].data.field(
'NORMSCAN').reshape((nrows, npts, nebins)).swapaxes(1, 2)
nll_scan = inhdulist['SCANDATA'].data.field(
'NLL_SCAN').reshape((nrows, npts, nebins)).swapaxes(1, 2)
# Adjust the "EBOUNDS" hdu
columns = inhdulist['EBOUNDS'].columns
columns.add_col(fits.Column(name=str('e_ref'),
format='E', array=eref * 1E3,
unit='keV'))
columns.add_col(fits.Column(name=str('ref_flux'),
format='D', array=flux,
unit='ph / (cm2 s)'))
columns.add_col(fits.Column(name=str('ref_eflux'),
format='D', array=eflux,
unit='MeV / (cm2 s)'))
columns.add_col(fits.Column(name=str('ref_dnde'),
format='D', array=dnde,
unit='ph / (MeV cm2 s)'))
columns.change_name('E_MIN_FL', str('ref_dnde_e_min'))
columns.change_unit('ref_dnde_e_min', 'ph / (MeV cm2 s)')
columns.change_name('E_MAX_FL', str('ref_dnde_e_max'))
columns.change_unit('ref_dnde_e_max', 'ph / (MeV cm2 s)')
columns.change_name('NPRED', str('ref_npred'))
hdu_e = fits.BinTableHDU.from_columns(columns, name='EBOUNDS')
# Make the "FITDATA" hdu
columns = fits.ColDefs([])
columns.add_col(fits.Column(
name=str('fit_ts'), format='E', array=ts_map))
columns.add_col(fits.Column(
name=str('fit_status'), format='E', array=ok_map))
columns.add_col(fits.Column(
name=str('fit_norm'), format='E', array=n_map))
columns.add_col(fits.Column(
name=str('fit_norm_err'), format='E', array=err_map))
columns.add_col(fits.Column(
name=str('fit_norm_errp'), format='E', array=errp_map))
columns.add_col(fits.Column(
name=str('fit_norm_errn'), format='E', array=errn_map))
hdu_f = fits.BinTableHDU.from_columns(columns, name='FITDATA')
# Make the "SCANDATA" hdu
columns = fits.ColDefs([])
columns.add_col(fits.Column(name=str('ts'),
format='%iE' % nebins, array=tscube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('bin_status'),
format='%iE' % nebins, array=ok_cube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('norm'),
format='%iE' % nebins, array=ncube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('norm_ul'),
format='%iE' % nebins, array=ul_cube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('norm_err'),
format='%iE' % nebins, array=errcube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('norm_errp'),
format='%iE' % nebins, array=errpcube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('norm_errn'),
format='%iE' % nebins, array=errncube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('loglike'),
format='%iE' % nebins, array=nll_cube,
dim=str('(%i)' % nebins)))
columns.add_col(fits.Column(name=str('norm_scan'),
format='%iE' % (nebins * npts),
array=norm_scan,
dim=str('(%i,%i)' % (npts, nebins))))
columns.add_col(fits.Column(name=str('dloglike_scan'),
format='%iE' % (nebins * npts),
array=nll_scan,
dim=str('(%i,%i)' % (npts, nebins))))
hdu_s = fits.BinTableHDU.from_columns(columns, name='SCANDATA')
hdulist = fits.HDUList([inhdulist[0],
hdu_s,
hdu_f,
inhdulist["BASELINE"],
hdu_e])
hdulist['SCANDATA'].header['UL_CONF'] = 0.95
hdulist.writeto(outfile, clobber=True)
return hdulist
|
Convert between old and new TSCube formats.
|
entailment
|
def truncate_array(array1, array2, position):
"""Truncate array1 by finding the overlap with array2 when the
array1 center is located at the given position in array2."""
slices = []
for i in range(array1.ndim):
xmin = 0
xmax = array1.shape[i]
dxlo = array1.shape[i] // 2
dxhi = array1.shape[i] - dxlo
if position[i] - dxlo < 0:
xmin = max(dxlo - position[i], 0)
if position[i] + dxhi > array2.shape[i]:
xmax = array1.shape[i] - (position[i] + dxhi - array2.shape[i])
xmax = max(xmax, 0)
slices += [slice(xmin, xmax)]
return array1[slices]
|
Truncate array1 by finding the overlap with array2 when the
array1 center is located at the given position in array2.
|
entailment
|
def _sum_wrapper(fn):
"""
Wrapper to perform row-wise aggregation of list arguments and pass
them to a function. The return value of the function is summed
over the argument groups. Non-list arguments will be
automatically cast to a list.
"""
def wrapper(*args, **kwargs):
v = 0
new_args = _cast_args_to_list(args)
for arg in zip(*new_args):
v += fn(*arg, **kwargs)
return v
return wrapper
|
Wrapper to perform row-wise aggregation of list arguments and pass
them to a function. The return value of the function is summed
over the argument groups. Non-list arguments will be
automatically cast to a list.
|
entailment
|
def _amplitude_bounds(counts, bkg, model):
"""
Compute bounds for the root of `_f_cash_root_cython`.
Parameters
----------
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
if isinstance(counts, list):
counts = np.concatenate([t.flat for t in counts])
bkg = np.concatenate([t.flat for t in bkg])
model = np.concatenate([t.flat for t in model])
s_model = np.sum(model)
s_counts = np.sum(counts)
sn = bkg / model
imin = np.argmin(sn)
sn_min = sn[imin]
c_min = counts[imin]
b_min = c_min / s_model - sn_min
b_max = s_counts / s_model - sn_min
return max(b_min, 0), b_max
|
Compute bounds for the root of `_f_cash_root_cython`.
Parameters
----------
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
|
entailment
|
def _f_cash_root(x, counts, bkg, model):
"""
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return np.sum(model * (counts / (x * model + bkg) - 1.0))
|
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
|
entailment
|
def _root_amplitude_brentq(counts, bkg, model, root_fn=_f_cash_root):
"""Fit amplitude by finding roots using Brent algorithm.
See Appendix A Stewart (2009).
Parameters
----------
counts : `~numpy.ndarray`
Slice of count map.
bkg : `~numpy.ndarray`
Slice of background map.
model : `~numpy.ndarray`
Model template to fit.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
"""
# Compute amplitude bounds and assert counts > 0
amplitude_min, amplitude_max = _amplitude_bounds(counts, bkg, model)
if not np.sum(counts) > 0:
return amplitude_min, 0
args = (counts, bkg, model)
if root_fn(0.0, *args) < 0:
return 0.0, 1
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
result = brentq(root_fn, amplitude_min, amplitude_max, args=args,
maxiter=MAX_NITER, full_output=True, rtol=1E-4)
return result[0], result[1].iterations
except (RuntimeError, ValueError):
# Where the root finding fails NaN is set as amplitude
return np.nan, MAX_NITER
|
Fit amplitude by finding roots using Brent algorithm.
See Appendix A Stewart (2009).
Parameters
----------
counts : `~numpy.ndarray`
Slice of count map.
bkg : `~numpy.ndarray`
Slice of background map.
model : `~numpy.ndarray`
Model template to fit.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
|
entailment
|
def poisson_log_like(counts, model):
"""Compute the Poisson log-likelihood function for the given
counts and model arrays."""
loglike = np.array(model)
m = counts > 0
loglike[m] -= counts[m] * np.log(model[m])
return loglike
|
Compute the Poisson log-likelihood function for the given
counts and model arrays.
|
entailment
|
def f_cash(x, counts, bkg, model):
"""
Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return 2.0 * poisson_log_like(counts, bkg + x * model)
|
Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
|
entailment
|
def _ts_value(position, counts, bkg, model, C_0_map):
"""
Compute TS value at a given pixel position using the approach described
in Stewart (2009).
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
"""
extract_fn = _collect_wrapper(extract_large_array)
truncate_fn = _collect_wrapper(extract_small_array)
# Get data slices
counts_slice = extract_fn(counts, model, position)
bkg_slice = extract_fn(bkg, model, position)
C_0_slice = extract_fn(C_0_map, model, position)
model_slice = truncate_fn(model, counts, position)
# Flattened Arrays
counts_ = np.concatenate([t.flat for t in counts_slice])
bkg_ = np.concatenate([t.flat for t in bkg_slice])
model_ = np.concatenate([t.flat for t in model_slice])
C_0_ = np.concatenate([t.flat for t in C_0_slice])
C_0 = np.sum(C_0_)
root_fn = _sum_wrapper(_f_cash_root)
amplitude, niter = _root_amplitude_brentq(counts_, bkg_, model_,
root_fn=_f_cash_root)
if niter > MAX_NITER:
print('Exceeded maximum number of function evaluations!')
return np.nan, amplitude, niter
with np.errstate(invalid='ignore', divide='ignore'):
C_1 = f_cash_sum(amplitude, counts_, bkg_, model_)
# Compute and return TS value
return (C_0 - C_1) * np.sign(amplitude), amplitude, niter
|
Compute TS value at a given pixel position using the approach described
in Stewart (2009).
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
|
entailment
|
def _ts_value_newton(position, counts, bkg, model, C_0_map):
"""
Compute TS value at a given pixel position using the newton
method.
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
amp : float
Best-fit amplitude of the test source.
niter : int
Number of fit iterations.
"""
extract_fn = _collect_wrapper(extract_large_array)
truncate_fn = _collect_wrapper(extract_small_array)
# Get data slices
counts_slice = extract_fn(counts, model, position)
bkg_slice = extract_fn(bkg, model, position)
C_0_map_slice = extract_fn(C_0_map, model, position)
model_slice = truncate_fn(model, counts, position)
# Mask of pixels with > 0 counts
mask = [c > 0 for c in counts_slice]
# Sum of background and model in empty pixels
bkg_sum = np.sum(np.array([np.sum(t[~m])
for t, m in zip(bkg_slice, mask)]))
model_sum = np.sum(np.array([np.sum(t[~m])
for t, m in zip(model_slice, mask)]))
# Flattened Arrays
counts_ = np.concatenate([t[m].flat for t, m in zip(counts_slice, mask)])
bkg_ = np.concatenate([t[m].flat for t, m in zip(bkg_slice, mask)])
model_ = np.concatenate([t[m].flat for t, m in zip(model_slice, mask)])
C_0 = np.sum(np.array([np.sum(t) for t in C_0_map_slice]))
amplitude, niter = _fit_amplitude_newton(counts_, bkg_, model_,
model_sum)
if niter > MAX_NITER:
print('Exceeded maximum number of function evaluations!')
return np.nan, amplitude, niter
with np.errstate(invalid='ignore', divide='ignore'):
C_1 = f_cash_sum(amplitude, counts_, bkg_, model_, bkg_sum, model_sum)
# Compute and return TS value
return (C_0 - C_1) * np.sign(amplitude), amplitude, niter
|
Compute TS value at a given pixel position using the newton
method.
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
amp : float
Best-fit amplitude of the test source.
niter : int
Number of fit iterations.
|
entailment
|
def tsmap(self, prefix='', **kwargs):
"""Generate a spatial TS map for a source component with
properties defined by the `model` argument. The TS map will
have the same geometry as the ROI. The output of this method
is a dictionary containing `~fermipy.skymap.Map` objects with
the TS and amplitude of the best-fit test source. By default
this method will also save maps to FITS files and render them
as image files.
This method uses a simplified likelihood fitting
implementation that only fits for the normalization of the
test source. Before running this method it is recommended to
first optimize the ROI model (e.g. by running
:py:meth:`~fermipy.gtanalysis.GTAnalysis.optimize`).
Parameters
----------
prefix : str
Optional string that will be prepended to all output files.
{options}
Returns
-------
tsmap : dict
A dictionary containing the `~fermipy.skymap.Map` objects
for TS and source amplitude.
"""
timer = Timer.create(start=True)
schema = ConfigSchema(self.defaults['tsmap'])
schema.add_option('loglevel', logging.INFO)
schema.add_option('map_skydir', None, '', astropy.coordinates.SkyCoord)
schema.add_option('map_size', 1.0)
schema.add_option('threshold', 1E-2, '', float)
schema.add_option('use_pylike', True, '', bool)
schema.add_option('outfile', None, '', str)
config = schema.create_config(self.config['tsmap'], **kwargs)
# Defining default properties of test source model
config['model'].setdefault('Index', 2.0)
config['model'].setdefault('SpectrumType', 'PowerLaw')
config['model'].setdefault('SpatialModel', 'PointSource')
self.logger.log(config['loglevel'], 'Generating TS map')
o = self._make_tsmap_fast(prefix, **config)
if config['make_plots']:
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.make_tsmap_plots(o, self.roi)
self.logger.log(config['loglevel'], 'Finished TS map')
outfile = config.get('outfile', None)
if outfile is None:
outfile = utils.format_filename(self.workdir, 'tsmap',
prefix=[o['name']])
else:
outfile = os.path.join(self.workdir,
os.path.splitext(outfile)[0])
if config['write_fits']:
o['file'] = os.path.basename(outfile) + '.fits'
self._make_tsmap_fits(o, outfile + '.fits')
if config['write_npy']:
np.save(outfile + '.npy', o)
self.logger.log(config['loglevel'],
'Execution time: %.2f s', timer.elapsed_time)
return o
|
Generate a spatial TS map for a source component with
properties defined by the `model` argument. The TS map will
have the same geometry as the ROI. The output of this method
is a dictionary containing `~fermipy.skymap.Map` objects with
the TS and amplitude of the best-fit test source. By default
this method will also save maps to FITS files and render them
as image files.
This method uses a simplified likelihood fitting
implementation that only fits for the normalization of the
test source. Before running this method it is recommended to
first optimize the ROI model (e.g. by running
:py:meth:`~fermipy.gtanalysis.GTAnalysis.optimize`).
Parameters
----------
prefix : str
Optional string that will be prepended to all output files.
{options}
Returns
-------
tsmap : dict
A dictionary containing the `~fermipy.skymap.Map` objects
for TS and source amplitude.
|
entailment
|
def _make_tsmap_fast(self, prefix, **kwargs):
"""
Make a TS map from a GTAnalysis instance. This is a
simplified implementation optimized for speed that only fits
for the source normalization (all background components are
kept fixed). The spectral/spatial characteristics of the test
source can be defined with the src_dict argument. By default
this method will generate a TS map for a point source with an
index=2.0 power-law spectrum.
Parameters
----------
model : dict or `~fermipy.roi_model.Source`
Dictionary or Source object defining the properties of the
test source that will be used in the scan.
"""
loglevel = kwargs.get('loglevel', self.loglevel)
src_dict = copy.deepcopy(kwargs.setdefault('model', {}))
src_dict = {} if src_dict is None else src_dict
multithread = kwargs.setdefault('multithread', False)
threshold = kwargs.setdefault('threshold', 1E-2)
max_kernel_radius = kwargs.get('max_kernel_radius')
loge_bounds = kwargs.setdefault('loge_bounds', None)
use_pylike = kwargs.setdefault('use_pylike', True)
if loge_bounds:
if len(loge_bounds) != 2:
raise Exception('Wrong size of loge_bounds array.')
loge_bounds[0] = (loge_bounds[0] if loge_bounds[0] is not None
else self.log_energies[0])
loge_bounds[1] = (loge_bounds[1] if loge_bounds[1] is not None
else self.log_energies[-1])
else:
loge_bounds = [self.log_energies[0], self.log_energies[-1]]
# Put the test source at the pixel closest to the ROI center
xpix, ypix = (np.round((self.npix - 1.0) / 2.),
np.round((self.npix - 1.0) / 2.))
cpix = np.array([xpix, ypix])
map_geom = self._geom.to_image()
frame = coordsys_to_frame(map_geom.coordsys)
skydir = SkyCoord(*map_geom.pix_to_coord((cpix[0], cpix[1])),
frame=frame, unit='deg')
skydir = skydir.transform_to('icrs')
src_dict['ra'] = skydir.ra.deg
src_dict['dec'] = skydir.dec.deg
src_dict.setdefault('SpatialModel', 'PointSource')
src_dict.setdefault('SpatialWidth', 0.3)
src_dict.setdefault('Index', 2.0)
src_dict.setdefault('Prefactor', 1E-13)
counts = []
bkg = []
model = []
c0_map = []
eslices = []
enumbins = []
model_npred = 0
for c in self.components:
imin = utils.val_to_edge(c.log_energies, loge_bounds[0])[0]
imax = utils.val_to_edge(c.log_energies, loge_bounds[1])[0]
eslice = slice(imin, imax)
bm = c.model_counts_map(exclude=kwargs['exclude']).data.astype('float')[
eslice, ...]
cm = c.counts_map().data.astype('float')[eslice, ...]
bkg += [bm]
counts += [cm]
c0_map += [cash(cm, bm)]
eslices += [eslice]
enumbins += [cm.shape[0]]
self.add_source('tsmap_testsource', src_dict, free=True,
init_source=False, use_single_psf=True,
use_pylike=use_pylike,
loglevel=logging.DEBUG)
src = self.roi['tsmap_testsource']
# self.logger.info(str(src_dict))
modelname = utils.create_model_name(src)
for c, eslice in zip(self.components, eslices):
mm = c.model_counts_map('tsmap_testsource').data.astype('float')[
eslice, ...]
model_npred += np.sum(mm)
model += [mm]
self.delete_source('tsmap_testsource', loglevel=logging.DEBUG)
for i, mm in enumerate(model):
dpix = 3
for j in range(mm.shape[0]):
ix, iy = np.unravel_index(
np.argmax(mm[j, ...]), mm[j, ...].shape)
mx = mm[j, ix, :] > mm[j, ix, iy] * threshold
my = mm[j, :, iy] > mm[j, ix, iy] * threshold
dpix = max(dpix, np.round(np.sum(mx) / 2.))
dpix = max(dpix, np.round(np.sum(my) / 2.))
if max_kernel_radius is not None and \
dpix > int(max_kernel_radius / self.components[i].binsz):
dpix = int(max_kernel_radius / self.components[i].binsz)
xslice = slice(max(int(xpix - dpix), 0),
min(int(xpix + dpix + 1), self.npix))
model[i] = model[i][:, xslice, xslice]
ts_values = np.zeros((self.npix, self.npix))
amp_values = np.zeros((self.npix, self.npix))
wrap = functools.partial(_ts_value_newton, counts=counts,
bkg=bkg, model=model,
C_0_map=c0_map)
if kwargs['map_skydir'] is not None:
map_offset = wcs_utils.skydir_to_pix(kwargs['map_skydir'],
map_geom.wcs)
map_delta = 0.5 * kwargs['map_size'] / self.components[0].binsz
xmin = max(int(np.ceil(map_offset[1] - map_delta)), 0)
xmax = min(int(np.floor(map_offset[1] + map_delta)) + 1, self.npix)
ymin = max(int(np.ceil(map_offset[0] - map_delta)), 0)
ymax = min(int(np.floor(map_offset[0] + map_delta)) + 1, self.npix)
xslice = slice(xmin, xmax)
yslice = slice(ymin, ymax)
xyrange = [range(xmin, xmax), range(ymin, ymax)]
wcs = map_geom.wcs.deepcopy()
npix = (ymax - ymin, xmax - xmin)
crpix = (map_geom._crpix[0] - ymin, map_geom._crpix[1] - xmin)
wcs.wcs.crpix[0] -= ymin
wcs.wcs.crpix[1] -= xmin
# FIXME: We should implement this with a proper cutout method
map_geom = WcsGeom(wcs, npix, crpix=crpix)
else:
xyrange = [range(self.npix), range(self.npix)]
xslice = slice(0, self.npix)
yslice = slice(0, self.npix)
positions = []
for i, j in itertools.product(xyrange[0], xyrange[1]):
p = [[k // 2, i, j] for k in enumbins]
positions += [p]
self.logger.log(loglevel, 'Fitting test source.')
if multithread:
pool = Pool()
results = pool.map(wrap, positions)
pool.close()
pool.join()
else:
results = map(wrap, positions)
for i, r in enumerate(results):
ix = positions[i][0][1]
iy = positions[i][0][2]
ts_values[ix, iy] = r[0]
amp_values[ix, iy] = r[1]
ts_values = ts_values[xslice, yslice]
amp_values = amp_values[xslice, yslice]
ts_map = WcsNDMap(map_geom, ts_values)
sqrt_ts_map = WcsNDMap(map_geom, ts_values**0.5)
npred_map = WcsNDMap(map_geom, amp_values * model_npred)
amp_map = WcsNDMap(map_geom, amp_values * src.get_norm())
o = {'name': utils.join_strings([prefix, modelname]),
'src_dict': copy.deepcopy(src_dict),
'file': None,
'ts': ts_map,
'sqrt_ts': sqrt_ts_map,
'npred': npred_map,
'amplitude': amp_map,
'loglike': -self.like(),
'config': kwargs
}
return o
|
Make a TS map from a GTAnalysis instance. This is a
simplified implementation optimized for speed that only fits
for the source normalization (all background components are
kept fixed). The spectral/spatial characteristics of the test
source can be defined with the src_dict argument. By default
this method will generate a TS map for a point source with an
index=2.0 power-law spectrum.
Parameters
----------
model : dict or `~fermipy.roi_model.Source`
Dictionary or Source object defining the properties of the
test source that will be used in the scan.
|
entailment
|
def tscube(self, prefix='', **kwargs):
"""Generate a spatial TS map for a source component with
properties defined by the `model` argument. This method uses
the `gttscube` ST application for source fitting and will
simultaneously fit the test source normalization as well as
the normalizations of any background components that are
currently free. The output of this method is a dictionary
containing `~fermipy.skymap.Map` objects with the TS and
amplitude of the best-fit test source. By default this method
will also save maps to FITS files and render them as image
files.
Parameters
----------
prefix : str
Optional string that will be prepended to all output files
(FITS and rendered images).
model : dict
Dictionary defining the properties of the test source.
do_sed : bool
Compute the energy bin-by-bin fits.
nnorm : int
Number of points in the likelihood v. normalization scan.
norm_sigma : float
Number of sigma to use for the scan range.
tol : float
Critetia for fit convergence (estimated vertical distance
to min < tol ).
tol_type : int
Absoulte (0) or relative (1) criteria for convergence.
max_iter : int
Maximum number of iterations for the Newton's method fitter
remake_test_source : bool
If true, recomputes the test source image (otherwise just shifts it)
st_scan_level : int
make_plots : bool
Write image files.
write_fits : bool
Write a FITS file with the results of the analysis.
Returns
-------
maps : dict
A dictionary containing the `~fermipy.skymap.Map` objects
for TS and source amplitude.
"""
self.logger.info('Generating TS cube')
schema = ConfigSchema(self.defaults['tscube'])
schema.add_option('make_plots', True)
schema.add_option('write_fits', True)
schema.add_option('write_npy', True)
config = schema.create_config(self.config['tscube'], **kwargs)
maps = self._make_ts_cube(prefix, **config)
if config['make_plots']:
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.make_tsmap_plots(maps, self.roi, suffix='tscube')
self.logger.info("Finished TS cube")
return maps
|
Generate a spatial TS map for a source component with
properties defined by the `model` argument. This method uses
the `gttscube` ST application for source fitting and will
simultaneously fit the test source normalization as well as
the normalizations of any background components that are
currently free. The output of this method is a dictionary
containing `~fermipy.skymap.Map` objects with the TS and
amplitude of the best-fit test source. By default this method
will also save maps to FITS files and render them as image
files.
Parameters
----------
prefix : str
Optional string that will be prepended to all output files
(FITS and rendered images).
model : dict
Dictionary defining the properties of the test source.
do_sed : bool
Compute the energy bin-by-bin fits.
nnorm : int
Number of points in the likelihood v. normalization scan.
norm_sigma : float
Number of sigma to use for the scan range.
tol : float
Critetia for fit convergence (estimated vertical distance
to min < tol ).
tol_type : int
Absoulte (0) or relative (1) criteria for convergence.
max_iter : int
Maximum number of iterations for the Newton's method fitter
remake_test_source : bool
If true, recomputes the test source image (otherwise just shifts it)
st_scan_level : int
make_plots : bool
Write image files.
write_fits : bool
Write a FITS file with the results of the analysis.
Returns
-------
maps : dict
A dictionary containing the `~fermipy.skymap.Map` objects
for TS and source amplitude.
|
entailment
|
def compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0, spatial_model='PointSource',
spatial_size=1E-3):
"""Calculate the observed signal and background counts given models
for the exposure, background intensity, PSF, and source flux.
Parameters
----------
ebins : `~numpy.ndarray`
Array of energy bin edges.
exp : `~numpy.ndarray`
Model for exposure.
psf : `~fermipy.irfs.PSFModel`
Model for average PSF.
bkg : `~numpy.ndarray`
Array of background intensities.
fn : `~fermipy.spectrum.SpectralFunction`
egy_dim : int
Index of energy dimension in ``bkg`` and ``exp`` arrays.
"""
ewidth = utils.edge_to_width(ebins)
ectr = np.exp(utils.edge_to_center(np.log(ebins)))
r68 = psf.containment_angle(ectr, fraction=0.68)
if spatial_model != 'PointSource':
r68[r68 < spatial_size] = spatial_size
# * np.ones((len(ectr), 31))
theta_edges = np.linspace(0.0, 3.0, 31)[np.newaxis, :]
theta_edges = theta_edges * r68[:, np.newaxis]
theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])
domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)
if spatial_model == 'PointSource':
sig_pdf = domega * psf.interp(ectr[:, np.newaxis], theta)
elif spatial_model == 'RadialGaussian':
sig_pdf = domega * utils.convolve2d_gauss(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
theta, spatial_size / 1.5095921854516636, nstep=2000)
elif spatial_model == 'RadialDisk':
sig_pdf = domega * utils.convolve2d_disk(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
theta, spatial_size / 0.8246211251235321)
else:
raise ValueError('Invalid spatial model: {}'.format(spatial_model))
sig_pdf *= (np.pi / 180.)**2
sig_flux = fn.flux(ebins[:-1], ebins[1:])
# Background and signal counts
bkgc = bkg[..., np.newaxis] * domega * exp[..., np.newaxis] * \
ewidth[..., np.newaxis] * (np.pi / 180.)**2
sigc = sig_pdf * sig_flux[..., np.newaxis] * exp[..., np.newaxis]
return sigc, bkgc
|
Calculate the observed signal and background counts given models
for the exposure, background intensity, PSF, and source flux.
Parameters
----------
ebins : `~numpy.ndarray`
Array of energy bin edges.
exp : `~numpy.ndarray`
Model for exposure.
psf : `~fermipy.irfs.PSFModel`
Model for average PSF.
bkg : `~numpy.ndarray`
Array of background intensities.
fn : `~fermipy.spectrum.SpectralFunction`
egy_dim : int
Index of energy dimension in ``bkg`` and ``exp`` arrays.
|
entailment
|
def compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=None, bkg_fit=None,
rebin_axes=None):
"""Solve for the normalization of the signal distribution at which the
detection test statistic (twice delta-loglikelihood ratio) is >=
``ts_thresh`` AND the number of signal counts >= ``min_counts``.
This function uses the Asimov method to calculate the median
expected TS when the model for the background is fixed (no
uncertainty on the background amplitude).
Parameters
----------
sig : `~numpy.ndarray`
Array of signal amplitudes in counts.
bkg : `~numpy.ndarray`
Array of background amplitudes in counts.
ts_thresh : float
Test statistic threshold.
min_counts : float
Counts threshold.
sum_axes : list
Axes over which the source test statistic should be summed.
By default the summation will be performed over all
dimensions.
bkg_fit : `~numpy.ndarray`
Array of background amplitudes in counts for the fitting
model. If None then the fit model will be equal to the data
model.
"""
if sum_axes is None:
sum_axes = np.arange(sig.ndim)
sig = np.expand_dims(sig, -1)
bkg = np.expand_dims(bkg, -1)
sig_sum = np.apply_over_axes(np.sum, sig, sum_axes)
bkg_sum = np.apply_over_axes(np.sum, bkg, sum_axes)
bkg_fit_sum = None
if bkg_fit is not None:
bkg_fit = np.expand_dims(bkg_fit, -1)
bkg_fit_sum = np.apply_over_axes(np.sum, bkg_fit, sum_axes)
sig_rebin = sig
bkg_rebin = bkg
bkg_fit_rebin = bkg_fit
if rebin_axes:
sig_rebin = sig.copy()
bkg_rebin = bkg.copy()
if bkg_fit is not None:
bkg_fit_rebin = bkg_fit.copy()
for dim, rebin in zip(sum_axes, rebin_axes):
sig_rebin = sum_bins(sig_rebin, dim, rebin)
bkg_rebin = sum_bins(bkg_rebin, dim, rebin)
if bkg_fit is not None:
bkg_fit_rebin = sum_bins(bkg_fit_rebin, dim, rebin)
# Find approx solution using coarse binning and summed arrays
sig_scale = 10**np.linspace(0.0, 10.0, 51) * (min_counts / sig_sum)
vals_approx = _solve_norm(sig_rebin, bkg_rebin, ts_thresh, min_counts,
sig_scale, sum_axes, bkg_fit_rebin)
# Refine solution using an interval (0.1,10) around approx
# solution
sig_scale = (10**np.linspace(0.0, 1.0, 21) *
np.fmax(0.333 * vals_approx[..., None],
min_counts / sig_sum))
vals = _solve_norm(sig, bkg, ts_thresh, min_counts, sig_scale,
sum_axes, bkg_fit)
#sig_scale = 10**np.linspace(0.0, 10.0, 101)*(min_counts / sig_sum)
# vals = _solve_norm(sig, bkg, ts_thresh, min_counts, sig_scale2,
# sum_axes, bkg_fit)
return vals
|
Solve for the normalization of the signal distribution at which the
detection test statistic (twice delta-loglikelihood ratio) is >=
``ts_thresh`` AND the number of signal counts >= ``min_counts``.
This function uses the Asimov method to calculate the median
expected TS when the model for the background is fixed (no
uncertainty on the background amplitude).
Parameters
----------
sig : `~numpy.ndarray`
Array of signal amplitudes in counts.
bkg : `~numpy.ndarray`
Array of background amplitudes in counts.
ts_thresh : float
Test statistic threshold.
min_counts : float
Counts threshold.
sum_axes : list
Axes over which the source test statistic should be summed.
By default the summation will be performed over all
dimensions.
bkg_fit : `~numpy.ndarray`
Array of background amplitudes in counts for the fitting
model. If None then the fit model will be equal to the data
model.
|
entailment
|
def create_psf(event_class, event_type, dtheta, egy, cth):
"""Create an array of PSF response values versus energy and
inclination angle.
Parameters
----------
egy : `~numpy.ndarray`
Energy in MeV.
cth : `~numpy.ndarray`
Cosine of the incidence angle.
"""
irf = create_irf(event_class, event_type)
theta = np.degrees(np.arccos(cth))
m = np.zeros((len(dtheta), len(egy), len(cth)))
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m[:, i, j] = irf.psf().value(dtheta, x, y, 0.0)
return m
|
Create an array of PSF response values versus energy and
inclination angle.
Parameters
----------
egy : `~numpy.ndarray`
Energy in MeV.
cth : `~numpy.ndarray`
Cosine of the incidence angle.
|
entailment
|
def create_edisp(event_class, event_type, erec, egy, cth):
"""Create an array of energy response values versus energy and
inclination angle.
Parameters
----------
egy : `~numpy.ndarray`
Energy in MeV.
cth : `~numpy.ndarray`
Cosine of the incidence angle.
"""
irf = create_irf(event_class, event_type)
theta = np.degrees(np.arccos(cth))
v = np.zeros((len(erec), len(egy), len(cth)))
m = (erec[:,None] / egy[None,:] < 3.0) & (erec[:,None] / egy[None,:] > 0.33333)
# m |= ((erec[:,None] / egy[None,:] < 3.0) &
# (erec[:,None] / egy[None,:] > 0.5) & (egy[None,:] < 10**2.5))
m = np.broadcast_to(m[:,:,None], v.shape)
try:
x = np.ones(v.shape)*erec[:,None,None]
y = np.ones(v.shape)*egy[None,:,None]
z = np.ones(v.shape)*theta[None,None,:]
v[m] = irf.edisp().value(np.ravel(x[m]), np.ravel(y[m]), np.ravel(z[m]), 0.0)
except:
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m = (erec / x < 3.0) & (erec / x > 0.333)
v[m, i, j] = irf.edisp().value(erec[m], x, y, 0.0)
return v
|
Create an array of energy response values versus energy and
inclination angle.
Parameters
----------
egy : `~numpy.ndarray`
Energy in MeV.
cth : `~numpy.ndarray`
Cosine of the incidence angle.
|
entailment
|
def create_aeff(event_class, event_type, egy, cth):
"""Create an array of effective areas versus energy and incidence
angle. Binning in energy and incidence angle is controlled with
the egy and cth input parameters.
Parameters
----------
event_class : str
Event class string (e.g. P8R2_SOURCE_V6).
event_type : list
egy : array_like
Evaluation points in energy (MeV).
cth : array_like
Evaluation points in cosine of the incidence angle.
"""
irf = create_irf(event_class, event_type)
irf.aeff().setPhiDependence(False)
theta = np.degrees(np.arccos(cth))
# Exposure Matrix
# Dimensions are Etrue and incidence angle
m = np.zeros((len(egy), len(cth)))
for i, x in enumerate(egy):
for j, y in enumerate(theta):
m[i, j] = irf.aeff().value(x, y, 0.0)
return m
|
Create an array of effective areas versus energy and incidence
angle. Binning in energy and incidence angle is controlled with
the egy and cth input parameters.
Parameters
----------
event_class : str
Event class string (e.g. P8R2_SOURCE_V6).
event_type : list
egy : array_like
Evaluation points in energy (MeV).
cth : array_like
Evaluation points in cosine of the incidence angle.
|
entailment
|
def calc_exp(skydir, ltc, event_class, event_types,
egy, cth_bins, npts=None):
"""Calculate the exposure on a 2D grid of energy and incidence angle.
Parameters
----------
npts : int
Number of points by which to sample the response in each
incidence angle bin. If None then npts will be automatically
set such that incidence angle is sampled on intervals of <
0.05 in Cos(Theta).
Returns
-------
exp : `~numpy.ndarray`
2D Array of exposures vs. energy and incidence angle.
"""
if npts is None:
npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.025))
exp = np.zeros((len(egy), len(cth_bins) - 1))
cth_bins = utils.split_bin_edges(cth_bins, npts)
cth = edge_to_center(cth_bins)
ltw = ltc.get_skydir_lthist(skydir, cth_bins).reshape(-1, npts)
for et in event_types:
aeff = create_aeff(event_class, et, egy, cth)
aeff = aeff.reshape(exp.shape + (npts,))
exp += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)
return exp
|
Calculate the exposure on a 2D grid of energy and incidence angle.
Parameters
----------
npts : int
Number of points by which to sample the response in each
incidence angle bin. If None then npts will be automatically
set such that incidence angle is sampled on intervals of <
0.05 in Cos(Theta).
Returns
-------
exp : `~numpy.ndarray`
2D Array of exposures vs. energy and incidence angle.
|
entailment
|
def create_avg_rsp(rsp_fn, skydir, ltc, event_class, event_types, x,
egy, cth_bins, npts=None):
"""Calculate the weighted response function.
"""
if npts is None:
npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05))
wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1))
exps = np.zeros((len(egy), len(cth_bins) - 1))
cth_bins = utils.split_bin_edges(cth_bins, npts)
cth = edge_to_center(cth_bins)
ltw = ltc.get_skydir_lthist(skydir, cth_bins)
ltw = ltw.reshape(-1, npts)
for et in event_types:
rsp = rsp_fn(event_class, et, x, egy, cth)
aeff = create_aeff(event_class, et, egy, cth)
rsp = rsp.reshape(wrsp.shape + (npts,))
aeff = aeff.reshape(exps.shape + (npts,))
wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] *
ltw[np.newaxis, np.newaxis, :, :], axis=-1)
exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)
exps_inv = np.zeros_like(exps)
exps_inv[exps > 0] = 1./exps[exps>0]
wrsp *= exps_inv[np.newaxis, :, :]
return wrsp
|
Calculate the weighted response function.
|
entailment
|
def create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
egy, cth_bins, npts=None):
"""Generate model for exposure-weighted PSF averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
Energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle.
"""
return create_avg_rsp(create_psf, skydir, ltc,
event_class, event_types,
dtheta, egy, cth_bins, npts)
|
Generate model for exposure-weighted PSF averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
Energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle.
|
entailment
|
def create_avg_edisp(skydir, ltc, event_class, event_types, erec,
egy, cth_bins, npts=None):
"""Generate model for exposure-weighted DRM averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
True energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle.
"""
return create_avg_rsp(create_edisp, skydir, ltc,
event_class, event_types,
erec, egy, cth_bins, npts)
|
Generate model for exposure-weighted DRM averaged over incidence
angle.
Parameters
----------
egy : `~numpy.ndarray`
True energies in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the incidence angle.
|
entailment
|
def create_wtd_psf(skydir, ltc, event_class, event_types, dtheta,
egy_bins, cth_bins, fn, nbin=64, npts=1):
"""Create an exposure- and dispersion-weighted PSF model for a source
with spectral parameterization ``fn``. The calculation performed
by this method accounts for the influence of energy dispersion on
the PSF.
Parameters
----------
dtheta : `~numpy.ndarray`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
nbin : int
Number of bins per decade in true energy.
npts : int
Number of points by which to oversample each energy bin.
"""
#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
etrue = 10**utils.edge_to_center(np.log10(etrue_bins))
psf = create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
etrue, cth_bins)
drm = calc_drm(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, nbin=nbin)
cnts = calc_counts(skydir, ltc, event_class, event_types,
etrue_bins, cth_bins, fn)
wts = drm * cnts[None, :, :]
wts_norm = np.sum(wts, axis=1)
wts_norm[wts_norm == 0] = 1.0
wts = wts / wts_norm[:, None, :]
wpsf = np.sum(wts[None, :, :, :] * psf[:, None, :, :], axis=2)
wts = np.sum(wts[None, :, :, :], axis=2)
if npts > 1:
shape = (wpsf.shape[0], int(wpsf.shape[1] / npts), npts, wpsf.shape[2])
wpsf = np.sum((wpsf * wts).reshape(shape), axis=2)
shape = (wts.shape[0], int(wts.shape[1] / npts), npts, wts.shape[2])
wpsf = wpsf / np.sum(wts.reshape(shape), axis=2)
return wpsf
|
Create an exposure- and dispersion-weighted PSF model for a source
with spectral parameterization ``fn``. The calculation performed
by this method accounts for the influence of energy dispersion on
the PSF.
Parameters
----------
dtheta : `~numpy.ndarray`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
nbin : int
Number of bins per decade in true energy.
npts : int
Number of points by which to oversample each energy bin.
|
entailment
|
def calc_drm(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, nbin=64):
"""Calculate the detector response matrix."""
npts = int(np.ceil(128. / bins_per_dec(egy_bins)))
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
egy = 10**utils.edge_to_center(np.log10(egy_bins))
egy_width = utils.edge_to_width(egy_bins)
etrue = 10**utils.edge_to_center(np.log10(etrue_bins))
edisp = create_avg_edisp(skydir, ltc, event_class, event_types,
egy, etrue, cth_bins)
edisp = edisp * egy_width[:, None, None]
edisp = sum_bins(edisp, 0, npts)
return edisp
|
Calculate the detector response matrix.
|
entailment
|
def calc_counts(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, npts=1):
"""Calculate the expected counts vs. true energy and incidence angle
for a source with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinate.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
npts : int
Number of points by which to oversample each energy bin.
"""
#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
exp = calc_exp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins)
dnde = fn.dnde(egy_bins)
cnts = loglog_quad(egy_bins, exp * dnde[:, None], 0)
cnts = sum_bins(cnts, 0, npts)
return cnts
|
Calculate the expected counts vs. true energy and incidence angle
for a source with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinate.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
npts : int
Number of points by which to oversample each energy bin.
|
entailment
|
def calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=16, npts=1):
"""Calculate the expected counts vs. observed energy and true
incidence angle for a source with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinate.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
nbin : int
Number of points per decade with which to sample true energy.
npts : int
Number of points by which to oversample each reconstructed energy bin.
"""
#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
# Split energy bins
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
drm = calc_drm(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, nbin=nbin)
cnts_etrue = calc_counts(skydir, ltc, event_class, event_types,
etrue_bins, cth_bins, fn)
cnts = np.sum(cnts_etrue[None, :, :] * drm[:, :, :], axis=1)
cnts = sum_bins(cnts, 0, npts)
return cnts
|
Calculate the expected counts vs. observed energy and true
incidence angle for a source with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinate.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
egy_bins : `~numpy.ndarray`
Bin edges in observed energy in MeV.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the true incidence angle.
nbin : int
Number of points per decade with which to sample true energy.
npts : int
Number of points by which to oversample each reconstructed energy bin.
|
entailment
|
def calc_wtd_exp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=16):
"""Calculate the effective exposure.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
nbin : int
Number of points per decade with which to sample true energy.
"""
cnts = calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=nbin)
flux = fn.flux(egy_bins[:-1], egy_bins[1:])
return cnts / flux[:, None]
|
Calculate the effective exposure.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
nbin : int
Number of points per decade with which to sample true energy.
|
entailment
|
def create(cls, ltc, event_class, event_types, ebins):
"""Create an exposure map from a livetime cube. This method will
generate an exposure map with the same geometry as the
livetime cube (nside, etc.).
Parameters
----------
ltc : `~fermipy.irfs.LTCube`
Livetime cube object.
event_class : str
Event class string.
event_types : list
List of event type strings, e.g. ['FRONT','BACK'].
ebins : `~numpy.ndarray`
Energy bin edges in MeV.
"""
evals = np.sqrt(ebins[1:] * ebins[:-1])
exp = np.zeros((len(evals), ltc.hpx.npix))
for et in event_types:
aeff = create_aeff(event_class, et, evals, ltc.costh_center)
exp += np.sum(aeff.T[:, :, np.newaxis] *
ltc.data[:, np.newaxis, :], axis=0)
hpx = HPX(ltc.hpx.nside, ltc.hpx.nest,
ltc.hpx.coordsys, ebins=ebins)
return cls(exp, hpx)
|
Create an exposure map from a livetime cube. This method will
generate an exposure map with the same geometry as the
livetime cube (nside, etc.).
Parameters
----------
ltc : `~fermipy.irfs.LTCube`
Livetime cube object.
event_class : str
Event class string.
event_types : list
List of event type strings, e.g. ['FRONT','BACK'].
ebins : `~numpy.ndarray`
Energy bin edges in MeV.
|
entailment
|
def eval(self, ebin, dtheta, scale_fn=None):
"""Evaluate the PSF at the given energy bin index.
Parameters
----------
ebin : int
Index of energy bin.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
"""
if scale_fn is None and self.scale_fn is not None:
scale_fn = self.scale_fn
if scale_fn is None:
scale_factor = 1.0
else:
dtheta = dtheta / scale_fn(self.energies[ebin])
scale_factor = 1. / scale_fn(self.energies[ebin])**2
vals = 10**np.interp(dtheta, self.dtheta, np.log10(self.val[:, ebin]))
return vals * scale_factor
|
Evaluate the PSF at the given energy bin index.
Parameters
----------
ebin : int
Index of energy bin.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
|
entailment
|
def interp(self, energies, dtheta, scale_fn=None):
"""Evaluate the PSF model at an array of energies and angular
separations.
Parameters
----------
energies : array_like
Array of energies in MeV.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
"""
if scale_fn is None and self.scale_fn:
scale_fn = self.scale_fn
log_energies = np.log10(energies)
shape = (energies * dtheta).shape
scale_factor = np.ones(shape)
if scale_fn is not None:
dtheta = dtheta / scale_fn(energies)
scale_factor = 1. / scale_fn(energies)**2
vals = np.exp(self._psf_fn((dtheta, log_energies)))
return vals * scale_factor
|
Evaluate the PSF model at an array of energies and angular
separations.
Parameters
----------
energies : array_like
Array of energies in MeV.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
|
entailment
|
def interp_bin(self, egy_bins, dtheta, scale_fn=None):
"""Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``.
Parameters
----------
egy_bins : array_like
Energy bin edges in MeV.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
"""
npts = 4
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
egy = np.exp(utils.edge_to_center(np.log(egy_bins)))
log_energies = np.log10(egy)
vals = self.interp(egy[None, :], dtheta[:, None],
scale_fn=scale_fn)
wts = np.exp(self._wts_fn((log_energies,)))
wts = wts.reshape((1,) + wts.shape)
vals = np.sum(
(vals * wts).reshape((vals.shape[0], int(vals.shape[1] / npts), npts)), axis=2)
vals /= np.sum(wts.reshape(wts.shape[0],
int(wts.shape[1] / npts), npts), axis=2)
return vals
|
Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``.
Parameters
----------
egy_bins : array_like
Energy bin edges in MeV.
dtheta : array_like
Array of angular separations in degrees.
scale_fn : callable
Function that evaluates the PSF scaling function.
Argument is energy in MeV.
|
entailment
|
def containment_angle(self, energies=None, fraction=0.68, scale_fn=None):
"""Evaluate the PSF containment angle at a sequence of energies."""
if energies is None:
energies = self.energies
vals = self.interp(energies[np.newaxis, :], self.dtheta[:, np.newaxis],
scale_fn=scale_fn)
dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))
return self._calc_containment(dtheta, vals, fraction)
|
Evaluate the PSF containment angle at a sequence of energies.
|
entailment
|
def containment_angle_bin(self, egy_bins, fraction=0.68, scale_fn=None):
"""Evaluate the PSF containment angle averaged over energy bins."""
vals = self.interp_bin(egy_bins, self.dtheta, scale_fn=scale_fn)
dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))
return self._calc_containment(dtheta, vals, fraction)
|
Evaluate the PSF containment angle averaged over energy bins.
|
entailment
|
def create(cls, skydir, ltc, event_class, event_types, energies, cth_bins=None,
ndtheta=500, use_edisp=False, fn=None, nbin=64):
"""Create a PSFModel object. This class can be used to evaluate the
exposure-weighted PSF for a source with a given observing
profile and energy distribution.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
energies : `~numpy.ndarray`
Grid of energies at which the PSF will be pre-computed.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the inclination angle.
use_edisp : bool
Generate the PSF model accounting for the influence of
energy dispersion.
fn : `~fermipy.spectrum.SpectralFunction`
Model for the spectral energy distribution of the source.
"""
if isinstance(event_types, int):
event_types = bitmask_to_bits(event_types)
if fn is None:
fn = spectrum.PowerLaw([1E-13, -2.0])
dtheta = np.logspace(-4, 1.75, ndtheta)
dtheta = np.insert(dtheta, 0, [0])
log_energies = np.log10(energies)
egy_bins = 10**utils.center_to_edge(log_energies)
if cth_bins is None:
cth_bins = np.array([0.2, 1.0])
if use_edisp:
psf = create_wtd_psf(skydir, ltc, event_class, event_types,
dtheta, egy_bins, cth_bins, fn, nbin=nbin)
wts = calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=nbin)
else:
psf = create_avg_psf(skydir, ltc, event_class, event_types,
dtheta, energies, cth_bins)
wts = calc_counts(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn)
exp = calc_exp(skydir, ltc, event_class, event_types,
energies, cth_bins)
return cls(dtheta, energies, cth_bins, np.squeeze(exp), np.squeeze(psf),
np.squeeze(wts))
|
Create a PSFModel object. This class can be used to evaluate the
exposure-weighted PSF for a source with a given observing
profile and energy distribution.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
energies : `~numpy.ndarray`
Grid of energies at which the PSF will be pre-computed.
cth_bins : `~numpy.ndarray`
Bin edges in cosine of the inclination angle.
use_edisp : bool
Generate the PSF model accounting for the influence of
energy dispersion.
fn : `~fermipy.spectrum.SpectralFunction`
Model for the spectral energy distribution of the source.
|
entailment
|
def remove_file(filepath, dry_run=False):
"""Remove the file at filepath
Catches exception if the file does not exist.
If dry_run is True, print name of file to be removed, but do not remove it.
"""
if dry_run:
sys.stdout.write("rm %s\n" % filepath)
else:
try:
os.remove(filepath)
except OSError:
pass
|
Remove the file at filepath
Catches exception if the file does not exist.
If dry_run is True, print name of file to be removed, but do not remove it.
|
entailment
|
def clean_job(logfile, outfiles, dry_run=False):
"""Removes log file and files created by failed jobs.
If dry_run is True, print name of files to be removed, but do not remove them.
"""
remove_file(logfile, dry_run)
for outfile in outfiles.values():
remove_file(outfile, dry_run)
|
Removes log file and files created by failed jobs.
If dry_run is True, print name of files to be removed, but do not remove them.
|
entailment
|
def check_log(logfile, exited='Exited with exit code',
successful='Successfully completed'):
"""Check a log file to determine status of LSF job
Often logfile doesn't exist because the job hasn't begun
to run. It is unclear what you want to do in that case...
Parameters
----------
logfile : str
String with path to logfile
exited : str
Value to check for in existing logfile for exit with failure
successful : str
Value to check for in existing logfile for success
Returns str, one of 'Pending', 'Running', 'Done', 'Failed'
"""
if not os.path.exists(logfile):
return JobStatus.ready
if exited in open(logfile).read():
return JobStatus.failed
elif successful in open(logfile).read():
return JobStatus.done
return JobStatus.running
|
Check a log file to determine status of LSF job
Often logfile doesn't exist because the job hasn't begun
to run. It is unclear what you want to do in that case...
Parameters
----------
logfile : str
String with path to logfile
exited : str
Value to check for in existing logfile for exit with failure
successful : str
Value to check for in existing logfile for success
Returns str, one of 'Pending', 'Running', 'Done', 'Failed'
|
entailment
|
def check_job(cls, job_details):
""" Check the status of a specfic job """
return check_log(job_details.logfile, cls.string_exited, cls.string_successful)
|
Check the status of a specfic job
|
entailment
|
def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):
"""Hook to dispatch a single job"""
raise NotImplementedError("SysInterface.dispatch_job_hook")
|
Hook to dispatch a single job
|
entailment
|
def dispatch_job(self, link, key, job_archive, stream=sys.stdout):
"""Function to dispatch a single job
Parameters
----------
link : `Link`
Link object that sendes the job
key : str
Key used to identify this particular job
job_archive : `JobArchive`
Archive used to keep track of jobs
Returns `JobDetails` object
"""
try:
job_details = link.jobs[key]
except KeyError:
print(key, link.jobs)
job_config = job_details.job_config
link.update_args(job_config)
logfile = job_config['logfile']
try:
self.dispatch_job_hook(link, key, job_config, logfile, stream)
job_details.status = JobStatus.running
except IOError:
job_details.status = JobStatus.failed
if job_archive is not None:
job_archive.register_job(job_details)
return job_details
|
Function to dispatch a single job
Parameters
----------
link : `Link`
Link object that sendes the job
key : str
Key used to identify this particular job
job_archive : `JobArchive`
Archive used to keep track of jobs
Returns `JobDetails` object
|
entailment
|
def submit_jobs(self, link, job_dict=None, job_archive=None, stream=sys.stdout):
"""Run the `Link` with all of the items job_dict as input.
If job_dict is None, the job_dict will be take from link.jobs
Returns a `JobStatus` enum
"""
failed = False
if job_dict is None:
job_dict = link.jobs
for job_key, job_details in sorted(job_dict.items()):
job_config = job_details.job_config
# clean failed jobs
if job_details.status == JobStatus.failed:
clean_job(job_details.logfile,
job_details.outfiles, self._dry_run)
# clean_job(job_details.logfile, {}, self._dry_run)
job_config['logfile'] = job_details.logfile
new_job_details = self.dispatch_job(
link, job_key, job_archive, stream)
if new_job_details.status == JobStatus.failed:
failed = True
clean_job(new_job_details.logfile,
new_job_details.outfiles, self._dry_run)
link.jobs[job_key] = new_job_details
if failed:
return JobStatus.failed
return JobStatus.done
|
Run the `Link` with all of the items job_dict as input.
If job_dict is None, the job_dict will be take from link.jobs
Returns a `JobStatus` enum
|
entailment
|
def clean_jobs(self, link, job_dict=None, clean_all=False):
""" Clean up all the jobs associated with this link.
Returns a `JobStatus` enum
"""
failed = False
if job_dict is None:
job_dict = link.jobs
for job_details in job_dict.values():
# clean failed jobs
if job_details.status == JobStatus.failed or clean_all:
# clean_job(job_details.logfile, job_details.outfiles, self._dry_run)
clean_job(job_details.logfile, {}, self._dry_run)
job_details.status = JobStatus.ready
if failed:
return JobStatus.failed
return JobStatus.done
|
Clean up all the jobs associated with this link.
Returns a `JobStatus` enum
|
entailment
|
def get_function_spec(name):
"""Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary.
"""
if not hasattr(get_function_spec, 'fndict'):
modelfile = os.path.join('$FERMIPY_ROOT',
'data', 'models.yaml')
modelfile = os.path.expandvars(modelfile)
get_function_spec.fndict = yaml.load(open(modelfile))
if not name in get_function_spec.fndict.keys():
raise Exception('Invalid Function Name: %s' % name)
return get_function_spec.fndict[name]
|
Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary.
|
entailment
|
def get_spatial_type(spatial_model):
"""Translate a spatial model string to a spatial type."""
if spatial_model in ['SkyDirFunction', 'PointSource',
'Gaussian']:
return 'SkyDirFunction'
elif spatial_model in ['SpatialMap']:
return 'SpatialMap'
elif spatial_model in ['RadialGaussian', 'RadialDisk']:
try:
import pyLikelihood
if hasattr(pyLikelihood, 'RadialGaussian'):
return spatial_model
else:
return 'SpatialMap'
except Exception:
return spatial_model
else:
return spatial_model
|
Translate a spatial model string to a spatial type.
|
entailment
|
def create_pars_from_dict(name, pars_dict, rescale=True, update_bounds=False):
"""Create a dictionary for the parameters of a function.
Parameters
----------
name : str
Name of the function.
pars_dict : dict
Existing parameter dict that will be merged with the
default dictionary created by this method.
rescale : bool
Rescale parameter values.
"""
o = get_function_defaults(name)
pars_dict = pars_dict.copy()
for k in o.keys():
if not k in pars_dict:
continue
v = pars_dict[k]
if not isinstance(v, dict):
v = {'name': k, 'value': v}
o[k].update(v)
kw = dict(update_bounds=update_bounds,
rescale=rescale)
if 'min' in v or 'max' in v:
kw['update_bounds'] = False
if 'scale' in v:
kw['rescale'] = False
o[k] = make_parameter_dict(o[k], **kw)
return o
|
Create a dictionary for the parameters of a function.
Parameters
----------
name : str
Name of the function.
pars_dict : dict
Existing parameter dict that will be merged with the
default dictionary created by this method.
rescale : bool
Rescale parameter values.
|
entailment
|
def make_parameter_dict(pdict, fixed_par=False, rescale=True,
update_bounds=False):
"""
Update a parameter dictionary. This function will automatically
set the parameter scale and bounds if they are not defined.
Bounds are also adjusted to ensure that they encompass the
parameter value.
"""
o = copy.deepcopy(pdict)
o.setdefault('scale', 1.0)
if rescale:
value, scale = utils.scale_parameter(o['value'] * o['scale'])
o['value'] = np.abs(value) * np.sign(o['value'])
o['scale'] = np.abs(scale) * np.sign(o['scale'])
if 'error' in o:
o['error'] /= np.abs(scale)
if update_bounds:
o['min'] = o['value'] * 1E-3
o['max'] = o['value'] * 1E3
if fixed_par:
o['min'] = o['value']
o['max'] = o['value']
if float(o['min']) > float(o['value']):
o['min'] = o['value']
if float(o['max']) < float(o['value']):
o['max'] = o['value']
return o
|
Update a parameter dictionary. This function will automatically
set the parameter scale and bounds if they are not defined.
Bounds are also adjusted to ensure that they encompass the
parameter value.
|
entailment
|
def cast_pars_dict(pars_dict):
"""Cast the bool and float elements of a parameters dict to
the appropriate python types.
"""
o = {}
for pname, pdict in pars_dict.items():
o[pname] = {}
for k, v in pdict.items():
if k == 'free':
o[pname][k] = bool(int(v))
elif k == 'name':
o[pname][k] = v
else:
o[pname][k] = float(v)
return o
|
Cast the bool and float elements of a parameters dict to
the appropriate python types.
|
entailment
|
def do_gather(flist):
""" Gather all the HDUs from a list of files"""
hlist = []
nskip = 3
for fname in flist:
fin = fits.open(fname)
if len(hlist) == 0:
if fin[1].name == 'SKYMAP':
nskip = 4
start = 0
else:
start = nskip
for h in fin[start:]:
hlist.append(h)
hdulistout = fits.HDUList(hlist)
return hdulistout
|
Gather all the HDUs from a list of files
|
entailment
|
def main():
""" Main function for command line usage """
usage = "usage: %(prog)s [options] "
description = "Gather source maps from Fermi-LAT files."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('-o', '--output', default=None, type=str,
help='Output file.')
parser.add_argument('--clobber', default=False, action='store_true',
help='Overwrite output file.')
parser.add_argument('--gzip', action='store_true',
help='Compress output file')
parser.add_argument('--rm', action='store_true',
help='Remove input files.')
parser.add_argument('files', nargs='+', default=None,
help='List of input files.')
args = parser.parse_args()
hdulistout = do_gather(args.files)
if args.output:
hdulistout.writeto(args.output, clobber=args.clobber)
if args.gzip:
os.system('gzip -9 %s'%args.output)
if args.rm:
for farg in args.files:
flist = glob.glob(farg)
for ffound in flist:
os.path.unlink(ffound)
|
Main function for command line usage
|
entailment
|
def main_browse():
"""Entry point for command line use for browsing a JobArchive """
parser = argparse.ArgumentParser(usage="job_archive.py [options]",
description="Browse a job archive")
parser.add_argument('--jobs', action='store', dest='job_archive_table',
type=str, default='job_archive_temp2.fits', help="Job archive file")
parser.add_argument('--files', action='store', dest='file_archive_table',
type=str, default='file_archive_temp2.fits', help="File archive file")
parser.add_argument('--base', action='store', dest='base_path',
type=str, default=os.path.abspath('.'), help="File archive base path")
args = parser.parse_args(sys.argv[1:])
job_ar = JobArchive.build_archive(**args.__dict__)
job_ar.table.pprint()
|
Entry point for command line use for browsing a JobArchive
|
entailment
|
def n_waiting(self):
"""Return the number of jobs in various waiting states"""
return self._counters[JobStatus.no_job] +\
self._counters[JobStatus.unknown] +\
self._counters[JobStatus.not_ready] +\
self._counters[JobStatus.ready]
|
Return the number of jobs in various waiting states
|
entailment
|
def n_failed(self):
"""Return the number of failed jobs"""
return self._counters[JobStatus.failed] + self._counters[JobStatus.partial_failed]
|
Return the number of failed jobs
|
entailment
|
def get_status(self):
"""Return an overall status based
on the number of jobs in various states.
"""
if self.n_total == 0:
return JobStatus.no_job
elif self.n_done == self.n_total:
return JobStatus.done
elif self.n_failed > 0:
# If more that a quater of the jobs fail, fail the whole thing
if self.n_failed > self.n_total / 4.:
return JobStatus.failed
return JobStatus.partial_failed
elif self.n_running > 0:
return JobStatus.running
elif self.n_pending > 0:
return JobStatus.pending
return JobStatus.ready
|
Return an overall status based
on the number of jobs in various states.
|
entailment
|
def make_tables(job_dict):
"""Build and return an `astropy.table.Table' to store `JobDetails`"""
col_dbkey = Column(name='dbkey', dtype=int)
col_jobname = Column(name='jobname', dtype='S64')
col_jobkey = Column(name='jobkey', dtype='S64')
col_appname = Column(name='appname', dtype='S64')
col_logfile = Column(name='logfile', dtype='S256')
col_job_config = Column(name='job_config', dtype='S1024')
col_timestamp = Column(name='timestamp', dtype=int)
col_infile_refs = Column(name='infile_refs', dtype=int, shape=(2))
col_outfile_refs = Column(name='outfile_refs', dtype=int, shape=(2))
col_rmfile_refs = Column(name='rmfile_refs', dtype=int, shape=(2))
col_intfile_refs = Column(name='intfile_refs', dtype=int, shape=(2))
col_status = Column(name='status', dtype=int)
columns = [col_dbkey, col_jobname, col_jobkey, col_appname,
col_logfile, col_job_config, col_timestamp,
col_infile_refs, col_outfile_refs,
col_rmfile_refs, col_intfile_refs,
col_status]
table = Table(data=columns)
col_file_ids = Column(name='file_id', dtype=int)
table_ids = Table(data=[col_file_ids])
for val in job_dict.values():
val.append_to_tables(table, table_ids)
return table, table_ids
|
Build and return an `astropy.table.Table' to store `JobDetails`
|
entailment
|
def get_file_ids(self, file_archive, creator=None, status=FileStatus.no_file):
"""Fill the file id arrays from the file lists
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
creator : int
A unique key for the job that created these file
status : `FileStatus`
Enumeration giving current status thse files
"""
file_dict = copy.deepcopy(self.file_dict)
if self.sub_file_dict is not None:
file_dict.update(self.sub_file_dict.file_dict)
infiles = file_dict.input_files
outfiles = file_dict.output_files
rmfiles = file_dict.temp_files
int_files = file_dict.internal_files
if self.infile_ids is None:
if infiles is not None:
self.infile_ids = np.zeros((len(infiles)), int)
filelist = file_archive.get_file_ids(
infiles, creator, FileStatus.expected, file_dict)
JobDetails._fill_array_from_list(filelist, self.infile_ids)
else:
self.infile_ids = np.zeros((0), int)
if self.outfile_ids is None:
if outfiles is not None:
self.outfile_ids = np.zeros((len(outfiles)), int)
filelist = file_archive.get_file_ids(
outfiles, creator, status, file_dict)
JobDetails._fill_array_from_list(filelist, self.outfile_ids)
else:
self.outfile_ids = np.zeros((0), int)
if self.rmfile_ids is None:
if rmfiles is not None:
self.rmfile_ids = np.zeros((len(rmfiles)), int)
filelist = file_archive.get_file_ids(rmfiles)
JobDetails._fill_array_from_list(filelist, self.rmfile_ids)
else:
self.rmfile_ids = np.zeros((0), int)
if self.intfile_ids is None:
if int_files is not None:
self.intfile_ids = np.zeros((len(int_files)), int)
filelist = file_archive.get_file_ids(
int_files, creator, status)
JobDetails._fill_array_from_list(filelist, self.intfile_ids)
else:
self.intfile_ids = np.zeros((0), int)
|
Fill the file id arrays from the file lists
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
creator : int
A unique key for the job that created these file
status : `FileStatus`
Enumeration giving current status thse files
|
entailment
|
def get_file_paths(self, file_archive, file_id_array):
"""Get the full paths of the files used by this object from the the id arrays
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
file_id_array : `numpy.array`
Array that remaps the file indexes
"""
full_list = []
status_dict = {}
full_list += file_archive.get_file_paths(
file_id_array[self.infile_ids])
full_list += file_archive.get_file_paths(
file_id_array[self.outfile_ids])
full_list += file_archive.get_file_paths(
file_id_array[self.rmfile_ids])
full_list += file_archive.get_file_paths(
file_id_array[self.intfile_ids])
for filepath in full_list:
handle = file_archive.get_handle(filepath)
status_dict[filepath] = handle.status
if self.file_dict is None:
self.file_dict = FileDict()
self.file_dict.update(status_dict)
|
Get the full paths of the files used by this object from the the id arrays
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
file_id_array : `numpy.array`
Array that remaps the file indexes
|
entailment
|
def _fill_array_from_list(the_list, the_array):
"""Fill an `array` from a `list`"""
for i, val in enumerate(the_list):
the_array[i] = val
return the_array
|
Fill an `array` from a `list`
|
entailment
|
def make_dict(cls, table):
"""Build a dictionary map int to `JobDetails` from an `astropy.table.Table`"""
ret_dict = {}
for row in table:
job_details = cls.create_from_row(row)
ret_dict[job_details.dbkey] = job_details
return ret_dict
|
Build a dictionary map int to `JobDetails` from an `astropy.table.Table`
|
entailment
|
def create_from_row(cls, table_row):
"""Create a `JobDetails` from an `astropy.table.row.Row` """
kwargs = {}
for key in table_row.colnames:
kwargs[key] = table_row[key]
infile_refs = kwargs.pop('infile_refs')
outfile_refs = kwargs.pop('outfile_refs')
rmfile_refs = kwargs.pop('rmfile_refs')
intfile_refs = kwargs.pop('intfile_refs')
kwargs['infile_ids'] = np.arange(infile_refs[0], infile_refs[1])
kwargs['outfile_ids'] = np.arange(outfile_refs[0], outfile_refs[1])
kwargs['rmfile_ids'] = np.arange(rmfile_refs[0], rmfile_refs[1])
kwargs['intfile_ids'] = np.arange(intfile_refs[0], intfile_refs[1])
return cls(**kwargs)
|
Create a `JobDetails` from an `astropy.table.row.Row`
|
entailment
|
def append_to_tables(self, table, table_ids):
"""Add this instance as a row on a `astropy.table.Table` """
infile_refs = np.zeros((2), int)
outfile_refs = np.zeros((2), int)
rmfile_refs = np.zeros((2), int)
intfile_refs = np.zeros((2), int)
f_ptr = len(table_ids['file_id'])
infile_refs[0] = f_ptr
if self.infile_ids is not None:
for fid in self.infile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
infile_refs[1] = f_ptr
outfile_refs[0] = f_ptr
if self.outfile_ids is not None:
for fid in self.outfile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
outfile_refs[1] = f_ptr
rmfile_refs[0] = f_ptr
if self.rmfile_ids is not None:
for fid in self.rmfile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
rmfile_refs[1] = f_ptr
intfile_refs[0] = f_ptr
if self.intfile_ids is not None:
for fid in self.intfile_ids:
table_ids.add_row(dict(file_id=fid))
f_ptr += 1
intfile_refs[1] = f_ptr
table.add_row(dict(dbkey=self.dbkey,
jobname=self.jobname,
jobkey=self.jobkey,
appname=self.appname,
logfile=self.logfile,
job_config=str(self.job_config),
timestamp=self.timestamp,
infile_refs=infile_refs,
outfile_refs=outfile_refs,
rmfile_refs=rmfile_refs,
intfile_refs=intfile_refs,
status=self.status))
|
Add this instance as a row on a `astropy.table.Table`
|
entailment
|
def update_table_row(self, table, row_idx):
"""Add this instance as a row on a `astropy.table.Table` """
try:
table[row_idx]['timestamp'] = self.timestamp
table[row_idx]['status'] = self.status
except IndexError:
print("Index error", len(table), row_idx)
|
Add this instance as a row on a `astropy.table.Table`
|
entailment
|
def check_status_logfile(self, checker_func):
"""Check on the status of this particular job using the logfile"""
self.status = checker_func(self.logfile)
return self.status
|
Check on the status of this particular job using the logfile
|
entailment
|
def _fill_cache(self):
"""Fill the cache from the `astropy.table.Table`"""
for irow in range(len(self._table)):
job_details = self.make_job_details(irow)
self._cache[job_details.fullkey] = job_details
|
Fill the cache from the `astropy.table.Table`
|
entailment
|
def _read_table_file(self, table_file):
"""Read an `astropy.table.Table` from table_file to set up the `JobArchive`"""
self._table_file = table_file
if os.path.exists(self._table_file):
self._table = Table.read(self._table_file, hdu='JOB_ARCHIVE')
self._table_ids = Table.read(self._table_file, hdu='FILE_IDS')
else:
self._table, self._table_ids = JobDetails.make_tables({})
self._table_id_array = self._table_ids['file_id'].data
self._fill_cache()
|
Read an `astropy.table.Table` from table_file to set up the `JobArchive`
|
entailment
|
def make_job_details(self, row_idx):
"""Create a `JobDetails` from an `astropy.table.row.Row` """
row = self._table[row_idx]
job_details = JobDetails.create_from_row(row)
job_details.get_file_paths(self._file_archive, self._table_id_array)
self._cache[job_details.fullkey] = job_details
return job_details
|
Create a `JobDetails` from an `astropy.table.row.Row`
|
entailment
|
def get_details(self, jobname, jobkey):
"""Get the `JobDetails` associated to a particular job instance"""
fullkey = JobDetails.make_fullkey(jobname, jobkey)
return self._cache[fullkey]
|
Get the `JobDetails` associated to a particular job instance
|
entailment
|
def register_job(self, job_details):
"""Register a job in this `JobArchive` """
# check to see if the job already exists
try:
job_details_old = self.get_details(job_details.jobname,
job_details.jobkey)
if job_details_old.status <= JobStatus.running:
job_details_old.status = job_details.status
job_details_old.update_table_row(
self._table, job_details_old.dbkey - 1)
job_details = job_details_old
except KeyError:
job_details.dbkey = len(self._table) + 1
job_details.get_file_ids(
self._file_archive, creator=job_details.dbkey)
job_details.append_to_tables(self._table, self._table_ids)
self._table_id_array = self._table_ids['file_id'].data
self._cache[job_details.fullkey] = job_details
return job_details
|
Register a job in this `JobArchive`
|
entailment
|
def register_jobs(self, job_dict):
"""Register a bunch of jobs in this archive"""
njobs = len(job_dict)
sys.stdout.write("Registering %i total jobs: " % njobs)
for i, job_details in enumerate(job_dict.values()):
if i % 10 == 0:
sys.stdout.write('.')
sys.stdout.flush()
self.register_job(job_details)
sys.stdout.write('!\n')
|
Register a bunch of jobs in this archive
|
entailment
|
def register_job_from_link(self, link, key, **kwargs):
"""Register a job in the `JobArchive` from a `Link` object """
job_config = kwargs.get('job_config', None)
if job_config is None:
job_config = link.args
status = kwargs.get('status', JobStatus.unknown)
job_details = JobDetails(jobname=link.linkname,
jobkey=key,
appname=link.appname,
logfile=kwargs.get('logfile'),
jobconfig=job_config,
timestamp=get_timestamp(),
file_dict=copy.deepcopy(link.files),
sub_file_dict=copy.deepcopy(link.sub_files),
status=status)
self.register_job(job_details)
return job_details
|
Register a job in the `JobArchive` from a `Link` object
|
entailment
|
def update_job(self, job_details):
"""Update a job in the `JobArchive` """
other = self.get_details(job_details.jobname,
job_details.jobkey)
other.timestamp = job_details.timestamp
other.status = job_details.status
other.update_table_row(self._table, other.dbkey - 1)
return other
|
Update a job in the `JobArchive`
|
entailment
|
def remove_jobs(self, mask):
"""Mark all jobs that match a mask as 'removed' """
jobnames = self.table[mask]['jobname']
jobkey = self.table[mask]['jobkey']
self.table[mask]['status'] = JobStatus.removed
for jobname, jobkey in zip(jobnames, jobkey):
fullkey = JobDetails.make_fullkey(jobname, jobkey)
self._cache.pop(fullkey).status = JobStatus.removed
self.write_table_file()
|
Mark all jobs that match a mask as 'removed'
|
entailment
|
def build_temp_job_archive(cls):
"""Build and return a `JobArchive` using defualt locations of
persistent files. """
try:
os.unlink('job_archive_temp.fits')
os.unlink('file_archive_temp.fits')
except OSError:
pass
cls._archive = cls(job_archive_table='job_archive_temp.fits',
file_archive_table='file_archive_temp.fits',
base_path=os.path.abspath('.') + '/')
return cls._archive
|
Build and return a `JobArchive` using defualt locations of
persistent files.
|
entailment
|
def write_table_file(self, job_table_file=None, file_table_file=None):
"""Write the table to self._table_file"""
if self._table is None:
raise RuntimeError("No table to write")
if self._table_ids is None:
raise RuntimeError("No ID table to write")
if job_table_file is not None:
self._table_file = job_table_file
if self._table_file is None:
raise RuntimeError("No output file specified for table")
write_tables_to_fits(self._table_file, [self._table, self._table_ids], clobber=True,
namelist=['JOB_ARCHIVE', 'FILE_IDS'])
self._file_archive.write_table_file(file_table_file)
|
Write the table to self._table_file
|
entailment
|
def update_job_status(self, checker_func):
"""Update the status of all the jobs in the archive"""
njobs = len(self.cache.keys())
status_vect = np.zeros((8), int)
sys.stdout.write("Updating status of %i jobs: " % njobs)
sys.stdout.flush()
for i, key in enumerate(self.cache.keys()):
if i % 200 == 0:
sys.stdout.write('.')
sys.stdout.flush()
job_details = self.cache[key]
if job_details.status in [JobStatus.pending, JobStatus.running]:
if checker_func:
job_details.check_status_logfile(checker_func)
job_details.update_table_row(self._table, job_details.dbkey - 1)
status_vect[job_details.status] += 1
sys.stdout.write("!\n")
sys.stdout.flush()
sys.stdout.write("Summary:\n")
sys.stdout.write(" Unknown: %i\n" % status_vect[JobStatus.unknown])
sys.stdout.write(" Not Ready: %i\n" %
status_vect[JobStatus.not_ready])
sys.stdout.write(" Ready: %i\n" % status_vect[JobStatus.ready])
sys.stdout.write(" Pending: %i\n" % status_vect[JobStatus.pending])
sys.stdout.write(" Running: %i\n" % status_vect[JobStatus.running])
sys.stdout.write(" Done: %i\n" % status_vect[JobStatus.done])
sys.stdout.write(" Failed: %i\n" % status_vect[JobStatus.failed])
sys.stdout.write(" Partial: %i\n" %
status_vect[JobStatus.partial_failed])
|
Update the status of all the jobs in the archive
|
entailment
|
def build_archive(cls, **kwargs):
"""Return the singleton `JobArchive` instance, building it if needed """
if cls._archive is None:
cls._archive = cls(**kwargs)
return cls._archive
|
Return the singleton `JobArchive` instance, building it if needed
|
entailment
|
def elapsed_time(self):
"""Get the elapsed time."""
# Timer is running
if self._t0 is not None:
return self._time + self._get_time()
else:
return self._time
|
Get the elapsed time.
|
entailment
|
def stop(self):
"""Stop the timer."""
if self._t0 is None:
raise RuntimeError('Timer not started.')
self._time += self._get_time()
self._t0 = None
|
Stop the timer.
|
entailment
|
def make_spatialmap_source(name, Spatial_Filename, spectrum):
"""Construct and return a `fermipy.roi_model.Source` object
"""
data = dict(Spatial_Filename=Spatial_Filename,
ra=0.0, dec=0.0,
SpatialType='SpatialMap',
Source_Name=name)
if spectrum is not None:
data.update(spectrum)
return roi_model.Source(name, data)
|
Construct and return a `fermipy.roi_model.Source` object
|
entailment
|
def make_mapcube_source(name, Spatial_Filename, spectrum):
"""Construct and return a `fermipy.roi_model.MapCubeSource` object
"""
data = dict(Spatial_Filename=Spatial_Filename)
if spectrum is not None:
data.update(spectrum)
return roi_model.MapCubeSource(name, data)
|
Construct and return a `fermipy.roi_model.MapCubeSource` object
|
entailment
|
def make_isotropic_source(name, Spectrum_Filename, spectrum):
"""Construct and return a `fermipy.roi_model.IsoSource` object
"""
data = dict(Spectrum_Filename=Spectrum_Filename)
if spectrum is not None:
data.update(spectrum)
return roi_model.IsoSource(name, data)
|
Construct and return a `fermipy.roi_model.IsoSource` object
|
entailment
|
def make_composite_source(name, spectrum):
"""Construct and return a `fermipy.roi_model.CompositeSource` object
"""
data = dict(SpatialType='CompositeSource',
SpatialModel='CompositeSource',
SourceType='CompositeSource')
if spectrum is not None:
data.update(spectrum)
return roi_model.CompositeSource(name, data)
|
Construct and return a `fermipy.roi_model.CompositeSource` object
|
entailment
|
def make_catalog_sources(catalog_roi_model, source_names):
"""Construct and return dictionary of sources that are a subset of sources
in catalog_roi_model.
Parameters
----------
catalog_roi_model : dict or `fermipy.roi_model.ROIModel`
Input set of sources
source_names : list
Names of sourcs to extract
Returns dict mapping source_name to `fermipy.roi_model.Source` object
"""
sources = {}
for source_name in source_names:
sources[source_name] = catalog_roi_model[source_name]
return sources
|
Construct and return dictionary of sources that are a subset of sources
in catalog_roi_model.
Parameters
----------
catalog_roi_model : dict or `fermipy.roi_model.ROIModel`
Input set of sources
source_names : list
Names of sourcs to extract
Returns dict mapping source_name to `fermipy.roi_model.Source` object
|
entailment
|
def make_sources(comp_key, comp_dict):
"""Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source`
"""
srcdict = OrderedDict()
try:
comp_info = comp_dict.info
except AttributeError:
comp_info = comp_dict
try:
spectrum = comp_dict.spectrum
except AttributeError:
spectrum = None
model_type = comp_info.model_type
if model_type == 'PointSource':
srcdict[comp_key] = make_point_source(comp_info.source_name,
comp_info.src_dict)
elif model_type == 'SpatialMap':
srcdict[comp_key] = make_spatialmap_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'MapCubeSource':
srcdict[comp_key] = make_mapcube_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'IsoSource':
srcdict[comp_key] = make_isotropic_source(comp_info.source_name,
comp_info.Spectral_Filename,
spectrum)
elif model_type == 'CompositeSource':
srcdict[comp_key] = make_composite_source(comp_info.source_name,
spectrum)
elif model_type == 'CatalogSources':
srcdict.update(make_catalog_sources(comp_info.roi_model,
comp_info.source_names))
else:
raise ValueError("Unrecognized model_type %s" % model_type)
return srcdict
|
Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source`
|
entailment
|
def add_sources(self, source_info_dict):
"""Add all of the sources in source_info_dict to this factory
"""
self._source_info_dict.update(source_info_dict)
for key, value in source_info_dict.items():
self._sources.update(make_sources(key, value))
|
Add all of the sources in source_info_dict to this factory
|
entailment
|
def build_catalog(**kwargs):
"""Build a `fermipy.catalog.Catalog` object
Parameters
----------
catalog_type : str
Specifies catalog type, options include 2FHL | 3FGL | 4FGLP
catalog_file : str
FITS file with catalog tables
catalog_extdir : str
Path to directory with extended source templates
"""
catalog_type = kwargs.get('catalog_type')
catalog_file = kwargs.get('catalog_file')
catalog_extdir = kwargs.get('catalog_extdir')
if catalog_type == '2FHL':
return catalog.Catalog2FHL(fitsfile=catalog_file, extdir=catalog_extdir)
elif catalog_type == '3FGL':
return catalog.Catalog3FGL(fitsfile=catalog_file, extdir=catalog_extdir)
elif catalog_type == '4FGLP':
return catalog.Catalog4FGLP(fitsfile=catalog_file, extdir=catalog_extdir)
elif catalog_type == 'FL8Y':
return catalog.CatalogFL8Y(fitsfile=catalog_file, extdir=catalog_extdir)
else:
table = Table.read(catalog_file)
return catalog.Catalog(table, extdir=catalog_extdir)
|
Build a `fermipy.catalog.Catalog` object
Parameters
----------
catalog_type : str
Specifies catalog type, options include 2FHL | 3FGL | 4FGLP
catalog_file : str
FITS file with catalog tables
catalog_extdir : str
Path to directory with extended source templates
|
entailment
|
def make_fermipy_roi_model_from_catalogs(cataloglist):
"""Build and return a `fermipy.roi_model.ROIModel object from
a list of fermipy.catalog.Catalog` objects
"""
data = dict(catalogs=cataloglist,
src_roiwidth=360.)
return roi_model.ROIModel(data, skydir=SkyCoord(0.0, 0.0, unit='deg'))
|
Build and return a `fermipy.roi_model.ROIModel object from
a list of fermipy.catalog.Catalog` objects
|
entailment
|
def make_roi(cls, sources=None):
"""Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources
"""
if sources is None:
sources = {}
src_fact = cls()
src_fact.add_sources(sources)
ret_model = roi_model.ROIModel(
{}, skydir=SkyCoord(0.0, 0.0, unit='deg'))
for source in src_fact.sources.values():
ret_model.load_source(source,
build_index=False, merge_sources=False)
return ret_model
|
Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources
|
entailment
|
def copy_selected_sources(cls, roi, source_names):
"""Build and return a `fermipy.roi_model.ROIModel` object
by copying selected sources from another such object
"""
roi_new = cls.make_roi()
for source_name in source_names:
try:
src_cp = roi.copy_source(source_name)
except Exception:
continue
roi_new.load_source(src_cp, build_index=False)
return roi_new
|
Build and return a `fermipy.roi_model.ROIModel` object
by copying selected sources from another such object
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.