sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def delete_sources(self, cuts=None, distance=None,
skydir=None, minmax_ts=None, minmax_npred=None,
exclude=None, square=False, names=None):
"""Delete sources in the ROI model satisfying the given
selection criteria.
Parameters
----------
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Select sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Select sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
names : list
Select sources matching a name in this list.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
srcs = self.roi.get_sources(skydir=skydir, distance=distance, cuts=cuts,
minmax_ts=minmax_ts, minmax_npred=minmax_npred,
exclude=exclude, square=square,
coordsys=self.config[
'binning']['coordsys'],
names=names)
for s in srcs:
self.delete_source(s.name, build_fixed_wts=False)
if self.like is not None:
# Build fixed model weights in one pass
for c in self.components:
c.like.logLike.buildFixedModelWts()
self._update_roi()
return srcs
|
Delete sources in the ROI model satisfying the given
selection criteria.
Parameters
----------
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Select sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Select sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
names : list
Select sources matching a name in this list.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
|
entailment
|
def free_sources_by_name(self, names, free=True, pars=None,
**kwargs):
"""Free all sources with names matching ``names``.
Parameters
----------
names : list
List of source names.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
if names is None:
return
names = [names] if not isinstance(names, list) else names
names = [self.roi.get_source_by_name(t).name for t in names]
srcs = [s for s in self.roi.sources if s.name in names]
for s in srcs:
self.free_source(s.name, free=free, pars=pars, **kwargs)
return srcs
|
Free all sources with names matching ``names``.
Parameters
----------
names : list
List of source names.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
|
entailment
|
def free_sources(self, free=True, pars=None, cuts=None,
distance=None, skydir=None, minmax_ts=None, minmax_npred=None,
exclude=None, square=False, **kwargs):
"""Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
srcs = self.roi.get_sources(skydir=skydir, distance=distance,
cuts=cuts, minmax_ts=minmax_ts,
minmax_npred=minmax_npred, exclude=exclude,
square=square,
coordsys=self.config['binning']['coordsys'])
for s in srcs:
self.free_source(s.name, free=free, pars=pars, **kwargs)
return srcs
|
Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
|
entailment
|
def set_parameter(self, name, par, value, true_value=True, scale=None,
bounds=None, error=None, update_source=True):
"""
Update the value of a parameter. Parameter bounds will
automatically be adjusted to encompass the new parameter
value.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
value : float
Parameter value. By default this argument should be the
unscaled (True) parameter value.
scale : float
Parameter scale (optional). Value argument is interpreted
with respect to the scale parameter if it is provided.
error : float
Parameter error (optional). By default this argument should be the
unscaled (True) parameter value.
update_source : bool
Update the source dictionary for the object.
"""
name = self.roi.get_source_by_name(name).name
idx = self.like.par_index(name, par)
current_bounds = list(self.like.model[idx].getBounds())
if scale is not None:
self.like[idx].setScale(scale)
else:
scale = self.like.model[idx].getScale()
if true_value:
current_bounds[0] = min(current_bounds[0], value / scale)
current_bounds[1] = max(current_bounds[1], value / scale)
if error is not None:
error = error / scale
else:
current_bounds[0] = min(current_bounds[0], value)
current_bounds[1] = max(current_bounds[1], value)
# update current bounds to encompass new value
self.like[idx].setBounds(*current_bounds)
if true_value:
for p in self.like[idx].pars:
p.setTrueValue(value)
else:
self.like[idx].setValue(value)
if bounds is not None:
if true_value:
bounds[0] = min(bounds[0], value / scale)
bounds[1] = max(bounds[1], value / scale)
else:
bounds[0] = min(bounds[0], value)
bounds[1] = max(bounds[1], value)
# For some reason the numerical accuracy is causing this to throw exceptions.
try:
if bounds is not None:
self.like[idx].setBounds(*bounds)
except RuntimeError:
self.logger.warning(
"Caught failure on setBounds for %s::%s." % (name, par))
pass
if error is not None:
self.like[idx].setError(error)
self._sync_params(name)
if update_source:
self.update_source(name)
|
Update the value of a parameter. Parameter bounds will
automatically be adjusted to encompass the new parameter
value.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
value : float
Parameter value. By default this argument should be the
unscaled (True) parameter value.
scale : float
Parameter scale (optional). Value argument is interpreted
with respect to the scale parameter if it is provided.
error : float
Parameter error (optional). By default this argument should be the
unscaled (True) parameter value.
update_source : bool
Update the source dictionary for the object.
|
entailment
|
def set_parameter_scale(self, name, par, scale):
"""Update the scale of a parameter while keeping its value constant."""
name = self.roi.get_source_by_name(name).name
idx = self.like.par_index(name, par)
current_bounds = list(self.like.model[idx].getBounds())
current_scale = self.like.model[idx].getScale()
current_value = self.like[idx].getValue()
self.like[idx].setScale(scale)
self.like[idx].setValue(current_value * current_scale / scale)
self.like[idx].setBounds(current_bounds[0] * current_scale / scale,
current_bounds[1] * current_scale / scale)
self._sync_params(name)
|
Update the scale of a parameter while keeping its value constant.
|
entailment
|
def set_parameter_bounds(self, name, par, bounds):
"""Set the bounds on the scaled value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
bounds : list
Upper and lower bound.
"""
idx = self.like.par_index(name, par)
self.like[idx].setBounds(*bounds)
self._sync_params(name)
|
Set the bounds on the scaled value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
bounds : list
Upper and lower bound.
|
entailment
|
def set_parameter_error(self, name, par, error):
"""Set the error on the value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
error : float
The value for the parameter error
"""
idx = self.like.par_index(name, par)
self.like[idx].setError(error)
self._sync_params(name)
|
Set the error on the value of a parameter.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
error : float
The value for the parameter error
|
entailment
|
def lock_parameter(self, name, par, lock=True):
"""Set parameter to locked/unlocked state. A locked parameter
will be ignored when running methods that free/fix sources or
parameters.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
lock : bool
Set parameter to locked (True) or unlocked (False) state.
"""
name = self.roi.get_source_by_name(name).name
lck_params = self._lck_params.setdefault(name, [])
if lock:
self.free_parameter(name, par, False)
if not par in lck_params:
lck_params += [par]
else:
if par in lck_params:
lck_params.remove(par)
|
Set parameter to locked/unlocked state. A locked parameter
will be ignored when running methods that free/fix sources or
parameters.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
lock : bool
Set parameter to locked (True) or unlocked (False) state.
|
entailment
|
def free_parameter(self, name, par, free=True):
"""Free/Fix a parameter of a source by name.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
"""
name = self.get_source_name(name)
if par in self._lck_params.get(name, []):
return
idx = self.like.par_index(name, par)
self.like[idx].setFree(free)
self._sync_params(name)
|
Free/Fix a parameter of a source by name.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
|
entailment
|
def lock_source(self, name, lock=True):
"""Set all parameters of a source to a locked/unlocked state.
Locked parameters will be ignored when running methods that
free/fix sources or parameters.
Parameters
----------
name : str
Source name.
lock : bool
Set source parameters to locked (True) or unlocked (False)
state.
"""
name = self.get_source_name(name)
if lock:
par_names = self.get_source_params(name)
self.free_source(name, False, pars=par_names)
self._lck_params[name] = par_names
else:
self._lck_params[name] = []
|
Set all parameters of a source to a locked/unlocked state.
Locked parameters will be ignored when running methods that
free/fix sources or parameters.
Parameters
----------
name : str
Source name.
lock : bool
Set source parameters to locked (True) or unlocked (False)
state.
|
entailment
|
def free_source(self, name, free=True, pars=None, **kwargs):
"""Free/Fix parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for this source. If
none then all source parameters will be freed/fixed with the
exception of those defined in the skip_pars list.
"""
free_pars = self.get_free_param_vector()
loglevel = kwargs.pop('loglevel', self.loglevel)
# Find the source
src = self.roi.get_source_by_name(name)
name = src.name
if pars is None or (isinstance(pars, list) and not pars):
pars = []
pars += norm_parameters.get(src['SpectrumType'], [])
pars += shape_parameters.get(src['SpectrumType'], [])
elif pars == 'norm':
pars = []
pars += norm_parameters.get(src['SpectrumType'], [])
elif pars == 'shape':
pars = []
pars += shape_parameters.get(src['SpectrumType'], [])
elif isinstance(pars, list):
pass
else:
raise Exception('Invalid parameter list.')
# Remove locked parameters
lck_params = self._lck_params.get(name, [])
pars = [p for p in pars if p not in lck_params]
# Deduce here the names of all parameters from the spectral type
src_par_names = pyLike.StringVector()
self.like[name].src.spectrum().getParamNames(src_par_names)
par_indices = []
par_names = []
for p in src_par_names:
if pars is not None and p not in pars:
continue
idx = self.like.par_index(name, p)
if free == free_pars[idx]:
continue
par_indices.append(idx)
par_names.append(p)
if len(par_names) == 0:
return
if free:
self.logger.log(loglevel, 'Freeing parameters for %-22s: %s',
name, par_names)
else:
self.logger.log(loglevel, 'Fixing parameters for %-22s: %s',
name, par_names)
for (idx, par_name) in zip(par_indices, par_names):
self.like[idx].setFree(free)
self._sync_params_state(name)
|
Free/Fix parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for this source. If
none then all source parameters will be freed/fixed with the
exception of those defined in the skip_pars list.
|
entailment
|
def free_norm(self, name, free=True, **kwargs):
"""Free/Fix normalization of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
"""
name = self.get_source_name(name)
normPar = self.like.normPar(name).getName()
self.free_source(name, pars=[normPar], free=free, **kwargs)
|
Free/Fix normalization of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
|
entailment
|
def free_index(self, name, free=True, **kwargs):
"""Free/Fix index of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
"""
src = self.roi.get_source_by_name(name)
self.free_source(name, free=free,
pars=index_parameters.get(src['SpectrumType'], []),
**kwargs)
|
Free/Fix index of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
|
entailment
|
def free_shape(self, name, free=True, **kwargs):
"""Free/Fix shape parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
"""
src = self.roi.get_source_by_name(name)
self.free_source(name, free=free,
pars=shape_parameters[src['SpectrumType']],
**kwargs)
|
Free/Fix shape parameters of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
|
entailment
|
def get_source_name(self, name):
"""Return the name of a source as it is defined in the
pyLikelihood model object."""
if name not in self.like.sourceNames():
name = self.roi.get_source_by_name(name).name
return name
|
Return the name of a source as it is defined in the
pyLikelihood model object.
|
entailment
|
def optimize(self, **kwargs):
"""Iteratively optimize the ROI model. The optimization is
performed in three sequential steps:
* Free the normalization of the N largest components (as
determined from NPred) that contain a fraction ``npred_frac``
of the total predicted counts in the model and perform a
simultaneous fit of the normalization parameters of these
components.
* Individually fit the normalizations of all sources that were
not included in the first step in order of their npred
values. Skip any sources that have NPred <
``npred_threshold``.
* Individually fit the shape and normalization parameters of
all sources with TS > ``shape_ts_threshold`` where TS is
determined from the first two steps of the ROI optimization.
To ensure that the model is fully optimized this method can be
run multiple times.
Parameters
----------
npred_frac : float
Threshold on the fractional number of counts in the N
largest components in the ROI. This parameter determines
the set of sources that are fit in the first optimization
step.
npred_threshold : float
Threshold on the minimum number of counts of individual
sources. This parameter determines the sources that are
fit in the second optimization step.
shape_ts_threshold : float
Threshold on source TS used for determining the sources
that will be fit in the third optimization step.
max_free_sources : int
Maximum number of sources that will be fit simultaneously
in the first optimization step.
skip : list
List of str source names to skip while optimizing.
optimizer : dict
Dictionary that overrides the default optimizer settings.
"""
loglevel = kwargs.pop('loglevel', self.loglevel)
timer = Timer.create(start=True)
self.logger.log(loglevel, 'Starting')
loglike0 = -self.like()
self.logger.debug('LogLike: %f' % loglike0)
# Extract options from kwargs
config = copy.deepcopy(self.config['roiopt'])
config['optimizer'] = copy.deepcopy(self.config['optimizer'])
fermipy.config.validate_config(kwargs, config)
config = merge_dict(config, kwargs)
# Extract options from kwargs
npred_frac_threshold = config['npred_frac']
npred_threshold = config['npred_threshold']
shape_ts_threshold = config['shape_ts_threshold']
max_free_sources = config['max_free_sources']
skip = copy.deepcopy(config['skip'])
o = defaults.make_default_dict(defaults.roiopt_output)
o['config'] = config
o['loglike0'] = loglike0
# preserve free parameters
free = self.get_free_param_vector()
# Fix all parameters
self.free_sources(free=False, loglevel=logging.DEBUG)
# Free norms of sources for which the sum of npred is a
# fraction > npred_frac of the total model counts in the ROI
npred_sum = 0
skip_sources = skip if skip != None else []
joint_norm_fit = []
# EAC, we need this try block by older version of the ST don't have has_weights function
try:
if self.like.logLike.has_weights():
npred_str = 'npred_wt'
else:
npred_str = 'npred'
except AttributeError:
npred_str = 'npred'
# FIXME, EAC, use npred_wt here
for s in sorted(self.roi.sources, key=lambda t: t[npred_str],
reverse=True):
npred_sum += s[npred_str]
npred_frac = npred_sum / self._roi_data[npred_str]
if s.name in skip_sources:
continue
self.free_norm(s.name, loglevel=logging.DEBUG)
joint_norm_fit.append(s.name)
if npred_frac > npred_frac_threshold:
break
if s[npred_str] < npred_threshold:
break
if len(joint_norm_fit) >= max_free_sources:
break
print ("Joint fit ", joint_norm_fit)
self.fit(loglevel=logging.DEBUG, **config['optimizer'])
self.free_sources(free=False, loglevel=logging.DEBUG)
# Step through remaining sources and re-fit normalizations
# FIXME, EAC, use npred_wt here
for s in sorted(self.roi.sources, key=lambda t: t[npred_str],
reverse=True):
if s.name in skip_sources or s.name in joint_norm_fit:
continue
if s[npred_str] < npred_threshold:
self.logger.debug(
'Skipping %s with npred %10.3f', s.name, s[npred_str])
continue
self.logger.debug('Fitting %s npred: %10.3f TS: %10.3f',
s.name, s[npred_str], s['ts'])
self.free_norm(s.name, loglevel=logging.DEBUG)
self.fit(loglevel=logging.DEBUG, **config['optimizer'])
self.logger.debug('Post-fit Results npred: %10.3f TS: %10.3f',
s[npred_str], s['ts'])
self.free_norm(s.name, free=False, loglevel=logging.DEBUG)
# Refit spectral shape parameters for sources with TS >
# shape_ts_threshold
for s in sorted(self.roi.sources,
key=lambda t: t['ts'] if np.isfinite(t['ts']) else 0,
reverse=True):
if s.name in skip_sources:
continue
if s['ts'] < shape_ts_threshold or not np.isfinite(s['ts']):
continue
print ('Fitting shape %s TS: %10.3f' % (s.name, s['ts']))
self.logger.debug('Fitting shape %s TS: %10.3f', s.name, s['ts'])
self.free_source(s.name, loglevel=logging.DEBUG)
self.fit(loglevel=logging.DEBUG, **config['optimizer'])
self.free_source(s.name, free=False, loglevel=logging.DEBUG)
self.set_free_param_vector(free)
loglike1 = -self.like()
o['loglike1'] = loglike1
o['dloglike'] = loglike1 - loglike0
self.logger.log(loglevel, 'Finished')
self.logger.log(loglevel, 'LogLike: %f Delta-LogLike: %f',
loglike1, loglike1 - loglike0)
self.logger.log(loglevel, 'Execution time: %.2f s', timer.elapsed_time)
return o
|
Iteratively optimize the ROI model. The optimization is
performed in three sequential steps:
* Free the normalization of the N largest components (as
determined from NPred) that contain a fraction ``npred_frac``
of the total predicted counts in the model and perform a
simultaneous fit of the normalization parameters of these
components.
* Individually fit the normalizations of all sources that were
not included in the first step in order of their npred
values. Skip any sources that have NPred <
``npred_threshold``.
* Individually fit the shape and normalization parameters of
all sources with TS > ``shape_ts_threshold`` where TS is
determined from the first two steps of the ROI optimization.
To ensure that the model is fully optimized this method can be
run multiple times.
Parameters
----------
npred_frac : float
Threshold on the fractional number of counts in the N
largest components in the ROI. This parameter determines
the set of sources that are fit in the first optimization
step.
npred_threshold : float
Threshold on the minimum number of counts of individual
sources. This parameter determines the sources that are
fit in the second optimization step.
shape_ts_threshold : float
Threshold on source TS used for determining the sources
that will be fit in the third optimization step.
max_free_sources : int
Maximum number of sources that will be fit simultaneously
in the first optimization step.
skip : list
List of str source names to skip while optimizing.
optimizer : dict
Dictionary that overrides the default optimizer settings.
|
entailment
|
def profile_norm(self, name, logemin=None, logemax=None, reoptimize=False,
xvals=None, npts=None, fix_shape=True, savestate=True,
**kwargs):
"""Profile the normalization of a source.
Parameters
----------
name : str
Source name.
reoptimize : bool
Re-optimize free parameters in the model at each point
in the profile likelihood scan.
"""
self.logger.debug('Profiling %s', name)
if savestate:
saved_state = LikelihoodState(self.like)
if fix_shape:
self.free_sources(False, pars='shape', loglevel=logging.DEBUG)
if npts is None:
npts = self.config['gtlike']['llscan_npts']
# Find the source
name = self.roi.get_source_by_name(name).name
parName = self.like.normPar(name).getName()
loge_bounds = self.loge_bounds
if logemin is not None or logemax is not None:
self.set_energy_range(logemin, logemax)
# Find a sequence of values for the normalization scan
if xvals is None:
if reoptimize:
xvals = self._find_scan_pts_reopt(name, npts=npts,
**kwargs)
else:
xvals = self._find_scan_pts(name, npts=9)
lnlp = self.profile(name, parName,
reoptimize=False, xvals=xvals)
lims = utils.get_parameter_limits(lnlp['xvals'],
lnlp['dloglike'],
cl_limit=0.99)
if not np.isfinite(lims['ul']):
self.logger.warning('Upper limit not found. '
'Refitting normalization.')
self.like.optimize(0)
xvals = self._find_scan_pts(name, npts=npts)
lnlp = self.profile(name, parName,
reoptimize=False,
xvals=xvals)
lims = utils.get_parameter_limits(lnlp['xvals'],
lnlp['dloglike'],
cl_limit=0.99)
if np.isfinite(lims['ll']):
xhi = np.linspace(lims['x0'], lims['ul'], npts - npts // 2)
xlo = np.linspace(lims['ll'], lims['x0'], npts // 2)
xvals = np.concatenate((xlo[:-1], xhi))
xvals = np.insert(xvals, 0, 0.0)
elif np.abs(lnlp['dloglike'][0] - lims['lnlmax']) > 0.1:
lims['ll'] = 0.0
xhi = np.linspace(lims['x0'], lims['ul'],
(npts + 1) - (npts + 1) // 2)
xlo = np.linspace(lims['ll'], lims['x0'], (npts + 1) // 2)
xvals = np.concatenate((xlo[:-1], xhi))
else:
xvals = np.linspace(0, lims['ul'], npts)
o = self.profile(name, parName,
reoptimize=reoptimize, xvals=xvals,
savestate=savestate, **kwargs)
if savestate:
saved_state.restore()
if logemin is not None or logemax is not None:
self.set_energy_range(*loge_bounds)
self.logger.debug('Finished')
return o
|
Profile the normalization of a source.
Parameters
----------
name : str
Source name.
reoptimize : bool
Re-optimize free parameters in the model at each point
in the profile likelihood scan.
|
entailment
|
def profile(self, name, parName, logemin=None, logemax=None,
reoptimize=False,
xvals=None, npts=None, savestate=True, **kwargs):
"""Profile the likelihood for the given source and parameter.
Parameters
----------
name : str
Source name.
parName : str
Parameter name.
reoptimize : bool
Re-fit nuisance parameters at each step in the scan. Note
that enabling this option will only re-fit parameters that
were free when the method was executed.
Returns
-------
lnlprofile : dict
Dictionary containing results of likelihood scan.
"""
# Find the source
name = self.roi.get_source_by_name(name).name
par = self.like.normPar(name)
parName = self.like.normPar(name).getName()
idx = self.like.par_index(name, parName)
bounds = self.like.model[idx].getBounds()
value = self.like.model[idx].getValue()
loge_bounds = self.loge_bounds
optimizer = kwargs.get('optimizer', self.config['optimizer'])
if savestate:
saved_state = self._latch_state()
# If parameter is fixed temporarily free it
par.setFree(True)
if optimizer['optimizer'] == 'NEWTON':
self._create_fitcache()
if logemin is not None or logemax is not None:
loge_bounds = self.set_energy_range(logemin, logemax)
else:
loge_bounds = self.loge_bounds
loglike0 = -self.like()
if xvals is None:
err = par.error()
val = par.getValue()
if err <= 0 or val <= 3 * err:
xvals = 10 ** np.linspace(-2.0, 2.0, 51)
if val < xvals[0]:
xvals = np.insert(xvals, val, 0)
else:
xvals = np.linspace(0, 1, 25)
xvals = np.concatenate((-1.0 * xvals[1:][::-1], xvals))
xvals = val * 10 ** xvals
if np.isnan(xvals).any():
raise RuntimeError(
"Parameter scan points for %s::%s include infinite value." % (name, parName))
# Update parameter bounds to encompass scan range
try:
self.like[idx].setBounds(min(min(xvals), value, bounds[0]),
max(max(xvals), value, bounds[1]))
except RuntimeError:
self.logger.warning(
"Caught failure on setBounds for %s::%s." % (name, parName))
o = {'xvals': xvals,
'npred': np.zeros(len(xvals)),
'npred_wt': np.zeros(len(xvals)),
'dnde': np.zeros(len(xvals)),
'flux': np.zeros(len(xvals)),
'eflux': np.zeros(len(xvals)),
'dloglike': np.zeros(len(xvals)),
'loglike': np.zeros(len(xvals))
}
if reoptimize and hasattr(self.like.components[0].logLike,
'setUpdateFixedWeights'):
for c in self.components:
c.like.logLike.setUpdateFixedWeights(False)
for i, x in enumerate(xvals):
try:
self.like[idx] = x
except RuntimeError:
self.logger.warning(
"Caught failure on set for %s::%s: %.2f" % (name, parName, x))
if self.like.nFreeParams() > 1 and reoptimize:
# Only reoptimize if not all frozen
self.like.freeze(idx)
fit_output = self._fit(errors=False, **optimizer)
loglike1 = fit_output['loglike']
self.like.thaw(idx)
else:
loglike1 = -self.like()
flux = self.like[name].flux(10 ** loge_bounds[0],
10 ** loge_bounds[1])
eflux = self.like[name].energyFlux(10 ** loge_bounds[0],
10 ** loge_bounds[1])
prefactor = self.like[idx]
o['dloglike'][i] = loglike1 - loglike0
o['loglike'][i] = loglike1
o['dnde'][i] = prefactor.getTrueValue()
o['flux'][i] = flux
o['eflux'][i] = eflux
cs = self.model_counts_spectrum(name,
loge_bounds[0],
loge_bounds[1], summed=True)
o['npred'][i] += np.sum(cs)
cs_wt = self.model_counts_spectrum(name,
loge_bounds[0],
loge_bounds[1], summed=True, weighted=True)
o['npred_wt'][i] += np.sum(cs_wt)
self.like[idx] = value
if reoptimize and hasattr(self.like.components[0].logLike,
'setUpdateFixedWeights'):
for c in self.components:
c.like.logLike.setUpdateFixedWeights(True)
# Restore model parameters to original values
if savestate:
saved_state.restore()
self.like[idx].setBounds(*bounds)
if logemin is not None or logemax is not None:
self.set_energy_range(*loge_bounds)
return o
|
Profile the likelihood for the given source and parameter.
Parameters
----------
name : str
Source name.
parName : str
Parameter name.
reoptimize : bool
Re-fit nuisance parameters at each step in the scan. Note
that enabling this option will only re-fit parameters that
were free when the method was executed.
Returns
-------
lnlprofile : dict
Dictionary containing results of likelihood scan.
|
entailment
|
def constrain_norms(self, srcNames, cov_scale=1.0):
"""Constrain the normalizations of one or more sources by
adding gaussian priors with sigma equal to the parameter
error times a scaling factor."""
# Get the covariance matrix
for name in srcNames:
par = self.like.normPar(name)
err = par.error()
val = par.getValue()
if par.error() == 0.0 or not par.isFree():
continue
self.add_gauss_prior(name, par.getName(),
val, err * cov_scale)
|
Constrain the normalizations of one or more sources by
adding gaussian priors with sigma equal to the parameter
error times a scaling factor.
|
entailment
|
def remove_priors(self):
"""Clear all priors."""
for src in self.roi.sources:
for par in self.like[src.name].funcs["Spectrum"].params.values():
par.removePrior()
|
Clear all priors.
|
entailment
|
def _create_optObject(self, **kwargs):
""" Make MINUIT or NewMinuit type optimizer object """
optimizer = kwargs.get('optimizer',
self.config['optimizer']['optimizer'])
if optimizer.upper() == 'MINUIT':
optObject = pyLike.Minuit(self.like.logLike)
elif optimizer.upper() == 'NEWMINUIT':
optObject = pyLike.NewMinuit(self.like.logLike)
else:
optFactory = pyLike.OptimizerFactory_instance()
optObject = optFactory.create(str(optimizer), self.like.logLike)
return optObject
|
Make MINUIT or NewMinuit type optimizer object
|
entailment
|
def fit(self, update=True, **kwargs):
"""Run the likelihood optimization. This will execute a fit of all
parameters that are currently free in the model and update the
charateristics of the corresponding model components (TS,
npred, etc.). The fit will be repeated N times (set with the
`retries` parameter) until a fit quality greater than or equal
to `min_fit_quality` and a fit status code of 0 is obtained.
If the fit does not succeed after N retries then all parameter
values will be reverted to their state prior to the execution
of the fit.
Parameters
----------
update : bool
Update the model dictionary for all sources with free
parameters.
tol : float
Set the optimizer tolerance.
verbosity : int
Set the optimizer output level.
optimizer : str
Set the likelihood optimizer (e.g. MINUIT or NEWMINUIT).
retries : int
Set the number of times to rerun the fit when the fit quality
is < 3.
min_fit_quality : int
Set the minimum fit quality. If the fit quality is smaller
than this value then all model parameters will be
restored to their values prior to the fit.
reoptimize : bool
Refit background sources when updating source properties
(TS and likelihood profiles).
Returns
-------
fit : dict
Dictionary containing diagnostic information from the fit
(fit quality, parameter covariances, etc.).
"""
loglevel = kwargs.pop('loglevel', self.loglevel)
self.logger.log(loglevel, "Starting fit.")
# Extract options from kwargs
config = copy.deepcopy(self.config['optimizer'])
config.setdefault('covar', True)
config.setdefault('reoptimize', False)
config = utils.merge_dict(config, kwargs)
num_free = self.like.nFreeParams()
loglike0 = -self.like()
# Initialize output dict
o = {'fit_quality': 3,
'fit_status': 0,
'fit_success': True,
'dloglike': 0.0,
'edm': 0.0,
'loglike': loglike0,
'covariance': None,
'correlation': None,
'values': np.ones(num_free) * np.nan,
'errors': np.ones(num_free) * np.nan,
'indices': np.zeros(num_free, dtype=int),
'is_norm': np.empty(num_free, dtype=bool),
'src_names': num_free * [None],
'par_names': num_free * [None],
'config': config
}
if not num_free:
self.logger.log(loglevel, "Skipping fit. No free parameters.")
return o
saved_state = LikelihoodState(self.like)
fit_output = self._fit(**config)
o.update(fit_output)
self.logger.debug("Fit complete.")
o['dloglike'] = o['loglike'] - loglike0
if not o['fit_success']:
self.logger.error('%s failed with status code %i fit quality %i',
config['optimizer'], o['fit_status'],
o['fit_quality'])
saved_state.restore()
return o
if update:
free_params = self.get_params(True)
self._extract_correlation(o, free_params)
for name in self.like.sourceNames():
freePars = self.get_free_source_params(name)
if len(freePars) == 0:
continue
self.update_source(name, reoptimize=config['reoptimize'])
# Update roi model counts
self._update_roi()
self.logger.log(loglevel, "Fit returned successfully. " +
"Quality: %3i Status: %3i",
o['fit_quality'], o['fit_status'])
self.logger.log(loglevel, "LogLike: %12.3f DeltaLogLike: %12.3f ",
o['loglike'], o['dloglike'])
return o
|
Run the likelihood optimization. This will execute a fit of all
parameters that are currently free in the model and update the
charateristics of the corresponding model components (TS,
npred, etc.). The fit will be repeated N times (set with the
`retries` parameter) until a fit quality greater than or equal
to `min_fit_quality` and a fit status code of 0 is obtained.
If the fit does not succeed after N retries then all parameter
values will be reverted to their state prior to the execution
of the fit.
Parameters
----------
update : bool
Update the model dictionary for all sources with free
parameters.
tol : float
Set the optimizer tolerance.
verbosity : int
Set the optimizer output level.
optimizer : str
Set the likelihood optimizer (e.g. MINUIT or NEWMINUIT).
retries : int
Set the number of times to rerun the fit when the fit quality
is < 3.
min_fit_quality : int
Set the minimum fit quality. If the fit quality is smaller
than this value then all model parameters will be
restored to their values prior to the fit.
reoptimize : bool
Refit background sources when updating source properties
(TS and likelihood profiles).
Returns
-------
fit : dict
Dictionary containing diagnostic information from the fit
(fit quality, parameter covariances, etc.).
|
entailment
|
def _fit_newton(self, fitcache=None, ebin=None, **kwargs):
"""Fast fitting method using newton fitter."""
tol = kwargs.get('tol', self.config['optimizer']['tol'])
max_iter = kwargs.get('max_iter',
self.config['optimizer']['max_iter'])
init_lambda = kwargs.get('init_lambda',
self.config['optimizer']['init_lambda'])
use_reduced = kwargs.get('use_reduced', True)
free_params = self.get_params(True)
free_norm_params = [p for p in free_params if p['is_norm'] is True]
if len(free_params) != len(free_norm_params):
msg = 'Executing Newton fitter with one ' + \
'or more free shape parameters.'
self.logger.error(msg)
raise Exception(msg)
verbosity = kwargs.get('verbosity', 0)
if fitcache is None:
fitcache = self._create_fitcache(**kwargs)
fitcache.update(self.get_params(),
tol, max_iter, init_lambda, use_reduced)
logemin = self.loge_bounds[0]
logemax = self.loge_bounds[1]
imin = int(utils.val_to_edge(self.log_energies, logemin)[0])
imax = int(utils.val_to_edge(self.log_energies, logemax)[0])
if ebin is not None:
fitcache.fitcache.setEnergyBin(ebin)
elif imin == 0 and imax == self.enumbins:
fitcache.fitcache.setEnergyBin(-1)
else:
fitcache.fitcache.setEnergyBins(imin, imax)
num_free = len(free_norm_params)
o = {'fit_status': 0,
'fit_quality': 3,
'fit_success': True,
'edm': 0,
'loglike': None,
'values': np.ones(num_free) * np.nan,
'errors': np.ones(num_free) * np.nan,
'indices': np.zeros(num_free, dtype=int),
'is_norm': np.empty(num_free, dtype=bool),
'src_names': num_free * [None],
'par_names': num_free * [None],
}
if num_free == 0:
return o
ref_vals = np.array(fitcache.fitcache.refValues())
free = np.array(fitcache.fitcache.currentFree())
norm_vals = ref_vals[free]
norm_idxs = []
for i, p in enumerate(free_norm_params):
norm_idxs += [p['idx']]
o['indices'][i] = p['idx']
o['src_names'][i] = p['src_name']
o['par_names'][i] = p['par_name']
o['is_norm'][i] = p['is_norm']
o['fit_status'] = fitcache.fit(verbose=verbosity)
o['edm'] = fitcache.fitcache.currentEDM()
pars, errs, cov = fitcache.get_pars()
pars *= norm_vals
errs *= norm_vals
cov = cov * np.outer(norm_vals, norm_vals)
o['values'] = pars
o['errors'] = errs
o['covariance'] = cov
errinv = np.zeros_like(o['errors'])
m = o['errors'] > 0
errinv[m] = 1. / o['errors'][m]
o['correlation'] = o['covariance'] * np.outer(errinv, errinv)
if o['fit_status'] in [-2, 0]:
for idx, val, err in zip(norm_idxs, pars, errs):
self._set_value_bounded(idx, val)
self.like[idx].setError(err)
self.like.syncSrcParams()
o['fit_success'] = True
else:
o['fit_success'] = False
if o['fit_status']:
self.logger.error('Error in NEWTON fit. Fit Status: %i',
o['fit_status'])
# FIXME: Figure out why currentLogLike gets out of sync
#loglike = fitcache.fitcache.currentLogLike()
#prior_vals, prior_errs, has_prior = gtutils.get_priors(self.like)
#loglike -= np.sum(has_prior) * np.log(np.sqrt(2 * np.pi))
loglike = -self.like()
o['loglike'] = loglike
return o
|
Fast fitting method using newton fitter.
|
entailment
|
def load_xml(self, xmlfile):
"""Load model definition from XML.
Parameters
----------
xmlfile : str
Name of the input XML file.
"""
self.logger.info('Loading XML')
for c in self.components:
c.load_xml(xmlfile)
for name in self.like.sourceNames():
self.update_source(name)
self._fitcache = None
self.logger.info('Finished Loading XML')
|
Load model definition from XML.
Parameters
----------
xmlfile : str
Name of the input XML file.
|
entailment
|
def load_parameters_from_yaml(self, yamlfile, update_sources=False):
"""Load model parameters from yaml
Parameters
----------
yamlfile : str
Name of the input yaml file.
"""
d = utils.load_yaml(yamlfile)
for src, src_pars in d.items():
for par_name, par_dict in src_pars.items():
if par_name in ['SpectrumType']:
continue
par_value = par_dict.get('value', None)
par_error = par_dict.get('error', None)
par_scale = par_dict.get('scale', None)
par_min = par_dict.get('min', None)
par_max = par_dict.get('max', None)
par_free = par_dict.get('free', None)
if par_min is not None and par_max is not None:
par_bounds = [par_min, par_max]
else:
par_bounds = None
try:
self.set_parameter(src, par_name, par_value, true_value=False,
scale=par_scale, bounds=par_bounds, error=par_error,
update_source=update_sources)
except RuntimeError as msg:
self.logger.warn(msg)
self.logger.warn("Did not set parameter %s:%s"%(src,par_name))
continue
except Exception as msg:
self.logger.warn(msg)
continue
if par_free is not None:
self.free_parameter(src, par_name, par_free)
self._sync_params_state()
|
Load model parameters from yaml
Parameters
----------
yamlfile : str
Name of the input yaml file.
|
entailment
|
def _restore_counts_maps(self):
"""
Revert counts maps to their state prior to injecting any simulated
components.
"""
for c in self.components:
c.restore_counts_maps()
if hasattr(self.like.components[0].logLike, 'setCountsMap'):
self._init_roi_model()
else:
self.write_xml('tmp')
self._like = SummedLikelihood()
for i, c in enumerate(self._components):
c._create_binned_analysis()
self._like.addComponent(c.like)
self._init_roi_model()
self.load_xml('tmp')
|
Revert counts maps to their state prior to injecting any simulated
components.
|
entailment
|
def simulate_source(self, src_dict=None):
"""
Inject simulated source counts into the data.
Parameters
----------
src_dict : dict
Dictionary defining the spatial and spectral properties of
the source that will be injected.
"""
self._fitcache = None
if src_dict is None:
src_dict = {}
else:
src_dict = copy.deepcopy(src_dict)
skydir = wcs_utils.get_target_skydir(src_dict, self.roi.skydir)
src_dict.setdefault('ra', skydir.ra.deg)
src_dict.setdefault('dec', skydir.dec.deg)
src_dict.setdefault('SpatialModel', 'PointSource')
src_dict.setdefault('SpatialWidth', 0.3)
src_dict.setdefault('Index', 2.0)
src_dict.setdefault('Prefactor', 1E-13)
self.add_source('mcsource', src_dict, free=True,
init_source=False)
for c in self.components:
c.simulate_roi('mcsource', clear=False)
self.delete_source('mcsource')
if hasattr(self.like.components[0].logLike, 'setCountsMap'):
self._init_roi_model()
else:
self.write_xml('tmp')
self._like = SummedLikelihood()
for i, c in enumerate(self._components):
c._create_binned_analysis('tmp.xml')
self._like.addComponent(c.like)
self._init_roi_model()
self.load_xml('tmp')
|
Inject simulated source counts into the data.
Parameters
----------
src_dict : dict
Dictionary defining the spatial and spectral properties of
the source that will be injected.
|
entailment
|
def simulate_roi(self, name=None, randomize=True, restore=False):
"""Generate a simulation of the ROI using the current best-fit model
and replace the data counts cube with this simulation. The
simulation is created by generating an array of Poisson random
numbers with expectation values drawn from the model cube of
the binned analysis instance. This function will update the
counts cube both in memory and in the source map file. The
counts cube can be restored to its original state by calling
this method with ``restore`` = True.
Parameters
----------
name : str
Name of the model component to be simulated. If None then
the whole ROI will be simulated.
restore : bool
Restore the data counts cube to its original state.
"""
self.logger.info('Simulating ROI')
self._fitcache = None
if restore:
self.logger.info('Restoring')
self._restore_counts_maps()
self.logger.info('Finished')
return
for c in self.components:
c.simulate_roi(name=name, clear=True, randomize=randomize)
if hasattr(self.like.components[0].logLike, 'setCountsMap'):
self._init_roi_model()
else:
self.write_xml('tmp')
self._like = SummedLikelihood()
for i, c in enumerate(self._components):
c._create_binned_analysis('tmp.xml')
self._like.addComponent(c.like)
self._init_roi_model()
self.load_xml('tmp')
self.logger.info('Finished')
|
Generate a simulation of the ROI using the current best-fit model
and replace the data counts cube with this simulation. The
simulation is created by generating an array of Poisson random
numbers with expectation values drawn from the model cube of
the binned analysis instance. This function will update the
counts cube both in memory and in the source map file. The
counts cube can be restored to its original state by calling
this method with ``restore`` = True.
Parameters
----------
name : str
Name of the model component to be simulated. If None then
the whole ROI will be simulated.
restore : bool
Restore the data counts cube to its original state.
|
entailment
|
def write_model_map(self, model_name, name=None):
"""Save the counts model map to a FITS file.
Parameters
----------
model_name : str
String that will be append to the name of the output file.
name : str
Name of the component.
Returns
-------
"""
maps = [c.write_model_map(model_name, name) for c in self.components]
outfile = os.path.join(self.workdir,
'mcube_%s.fits' % (model_name))
mmap = Map.from_geom(self.geom)
for m in maps:
mmap.coadd(m)
mmap.write(outfile, overwrite=True, conv='fgst-ccube')
return [mmap] + maps
|
Save the counts model map to a FITS file.
Parameters
----------
model_name : str
String that will be append to the name of the output file.
name : str
Name of the component.
Returns
-------
|
entailment
|
def write_weight_map(self, model_name):
"""Save the counts model map to a FITS file.
Parameters
----------
model_name : str
String that will be append to the name of the output file.
Returns
-------
"""
maps = [c.write_weight_map(model_name) for c in self.components]
outfile = os.path.join(self.workdir,
'wcube_%s.fits' % (model_name))
wmap = Map.from_geom(self.geom)
# FIXME: Should we average weights maps rather than coadding?
for m in maps:
wmap.coadd(m)
wmap.write(outfile, overwrite=True, conv='fgst-ccube')
return [wmap] + maps
|
Save the counts model map to a FITS file.
Parameters
----------
model_name : str
String that will be append to the name of the output file.
Returns
-------
|
entailment
|
def print_roi(self, loglevel=logging.INFO):
"""Print information about the spectral and spatial properties
of the ROI (sources, diffuse components)."""
self.logger.log(loglevel, '\n' + str(self.roi))
|
Print information about the spectral and spatial properties
of the ROI (sources, diffuse components).
|
entailment
|
def print_params(self, allpars=False, loglevel=logging.INFO):
"""Print information about the model parameters (values,
errors, bounds, scale)."""
pars = self.get_params()
o = '\n'
o += '%4s %-20s%10s%10s%10s%10s%10s%5s\n' % (
'idx', 'parname', 'value', 'error',
'min', 'max', 'scale', 'free')
o += '-' * 80 + '\n'
src_pars = collections.OrderedDict()
for p in pars:
src_pars.setdefault(p['src_name'], [])
src_pars[p['src_name']] += [p]
free_sources = []
for k, v in src_pars.items():
for p in v:
if not p['free']:
continue
free_sources += [k]
for k, v in src_pars.items():
if not allpars and k not in free_sources:
continue
o += '%s\n' % k
for p in v:
o += '%4i %-20.19s' % (p['idx'], p['par_name'])
o += '%10.3g%10.3g' % (p['value'], p['error'])
o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'],
p['scale'])
if p['free']:
o += ' *'
else:
o += ' '
o += '\n'
self.logger.log(loglevel, o)
|
Print information about the model parameters (values,
errors, bounds, scale).
|
entailment
|
def load_roi(self, infile, reload_sources=False, params=None, mask=None):
"""This function reloads the analysis state from a previously
saved instance generated with
`~fermipy.gtanalysis.GTAnalysis.write_roi`.
Parameters
----------
infile : str
reload_sources : bool
Regenerate source maps for non-diffuse sources.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
"""
infile = utils.resolve_path(infile, workdir=self.workdir)
roi_file, roi_data = utils.load_data(infile, workdir=self.workdir)
self.logger.info('Loading ROI file: %s', roi_file)
key_map = {'dfde': 'dnde',
'dfde100': 'dnde100',
'dfde1000': 'dnde1000',
'dfde10000': 'dnde10000',
'dfde_index': 'dnde_index',
'dfde100_index': 'dnde100_index',
'dfde1000_index': 'dnde1000_index',
'dfde10000_index': 'dnde10000_index',
'e2dfde': 'e2dnde',
'e2dfde100': 'e2dnde100',
'e2dfde1000': 'e2dnde1000',
'e2dfde10000': 'e2dnde10000',
'Npred': 'npred',
'Npred_wt': 'npred_wt',
'logLike': 'loglike',
'dlogLike': 'dloglike',
'emin': 'e_min',
'ectr': 'e_ctr',
'emax': 'e_max',
'logemin': 'loge_min',
'logectr': 'loge_ctr',
'logemax': 'loge_max',
'ref_dfde': 'ref_dnde',
'ref_e2dfde': 'ref_e2dnde',
'ref_dfde_emin': 'ref_dnde_e_min',
'ref_dfde_emax': 'ref_dnde_e_max',
}
self._roi_data = utils.update_keys(roi_data['roi'], key_map)
if 'erange' in self._roi_data:
self._roi_data['loge_bounds'] = self._roi_data.pop('erange')
self._loge_bounds = self._roi_data.setdefault('loge_bounds',
self.loge_bounds)
sources = roi_data.pop('sources')
sources = utils.update_keys(sources, key_map)
for k0, v0 in sources.items():
for k, v in defaults.source_flux_output.items():
if k not in v0:
continue
if v[2] == float and isinstance(v0[k], np.ndarray):
sources[k0][k], sources[k0][k + '_err'] \
= v0[k][0], v0[k][1]
self.roi.load_sources(sources.values())
for i, c in enumerate(self.components):
if 'src_expscale' in self._roi_data['components'][i]:
c._src_expscale = copy.deepcopy(self._roi_data['components']
[i]['src_expscale'])
self._create_likelihood(infile)
self.set_energy_range(self.loge_bounds[0], self.loge_bounds[1])
if params is not None:
self.load_parameters_from_yaml(params)
if mask is not None:
self.set_weights_map(mask, update_roi=False)
if reload_sources:
names = [s.name for s in self.roi.sources if not s.diffuse]
self.reload_sources(names, False)
self.logger.info('Finished Loading ROI')
|
This function reloads the analysis state from a previously
saved instance generated with
`~fermipy.gtanalysis.GTAnalysis.write_roi`.
Parameters
----------
infile : str
reload_sources : bool
Regenerate source maps for non-diffuse sources.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
|
entailment
|
def write_roi(self, outfile=None,
save_model_map=False, **kwargs):
"""Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file.
"""
# extract the results in a convenient format
make_plots = kwargs.get('make_plots', False)
save_weight_map = kwargs.get('save_weight_map', False)
if outfile is None:
pathprefix = os.path.join(self.config['fileio']['workdir'],
'results')
elif not os.path.isabs(outfile):
pathprefix = os.path.join(self.config['fileio']['workdir'],
outfile)
else:
pathprefix = outfile
pathprefix = utils.strip_suffix(pathprefix,
['fits', 'yaml', 'npy'])
# pathprefix, ext = os.path.splitext(pathprefix)
prefix = os.path.basename(pathprefix)
xmlfile = pathprefix + '.xml'
fitsfile = pathprefix + '.fits'
npyfile = pathprefix + '.npy'
self.write_xml(xmlfile)
self.write_fits(fitsfile)
if not self.config['gtlike']['use_external_srcmap']:
for c in self.components:
c.like.logLike.saveSourceMaps(str(c.files['srcmap']))
if save_model_map:
self.write_model_map(prefix)
if save_weight_map:
self.write_weight_map(prefix)
o = {}
o['roi'] = copy.deepcopy(self._roi_data)
o['config'] = copy.deepcopy(self.config)
o['version'] = fermipy.__version__
o['stversion'] = fermipy.get_st_version()
o['sources'] = {}
for s in self.roi.sources:
o['sources'][s.name] = copy.deepcopy(s.data)
for i, c in enumerate(self.components):
o['roi']['components'][i][
'src_expscale'] = copy.deepcopy(c.src_expscale)
self.logger.info('Writing %s...', npyfile)
np.save(npyfile, o)
if make_plots:
self.make_plots(prefix, None,
**kwargs.get('plotting', {}))
|
Write current state of the analysis to a file. This method
writes an XML model definition, a ROI dictionary, and a FITS
source catalog file. A previously saved analysis state can be
reloaded from the ROI dictionary file with the
`~fermipy.gtanalysis.GTAnalysis.load_roi` method.
Parameters
----------
outfile : str
String prefix of the output files. The extension of this
string will be stripped when generating the XML, YAML and
npy filenames.
make_plots : bool
Generate diagnostic plots.
save_model_map : bool
Save the current counts model to a FITS file.
|
entailment
|
def make_plots(self, prefix, mcube_map=None, **kwargs):
"""Make diagnostic plots using the current ROI model."""
#mcube_maps = kwargs.pop('mcube_maps', None)
if mcube_map is None:
mcube_map = self.model_counts_map()
plotter = plotting.AnalysisPlotter(self.config['plotting'],
fileio=self.config['fileio'],
logging=self.config['logging'])
plotter.run(self, mcube_map, prefix=prefix, **kwargs)
|
Make diagnostic plots using the current ROI model.
|
entailment
|
def curvature(self, name, **kwargs):
"""Test whether a source shows spectral curvature by comparing
the likelihood ratio of PowerLaw and LogParabola spectral
models.
Parameters
----------
name : str
Source name.
"""
name = self.roi.get_source_by_name(name).name
saved_state = LikelihoodState(self.like)
source = self.components[0].like.logLike.getSource(str(name))
old_spectrum = source.spectrum()
old_pars = copy.deepcopy(self.roi[name].spectral_pars)
old_type = self.roi[name]['SpectrumType']
if old_type != 'PowerLaw':
dnde = self.like[name].spectrum()(pyLike.dArg(1000.))
value, scale = utils.scale_parameter(dnde)
pars0 = {
'Prefactor':
{'value': value, 'scale': scale,
'min': 1E-5, 'max': 1000., 'free': True},
'Index':
{'value': 2.0, 'scale': -1.0, 'min': 0.0,
'max': 5.0, 'free': False},
'Scale':
{'value': 1E3, 'scale': 1.0, 'min': 1.,
'max': 1E6, 'free': False},
}
self.set_source_spectrum(str(name), 'PowerLaw',
spectrum_pars=pars0,
update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_pl = self._fit(loglevel=logging.DEBUG)
prefactor = self._get_param(name, 'Prefactor')
index = self._get_param(name, 'Index')
scale = self._get_param(name, 'Scale')
pars1 = {
'norm': copy.deepcopy(prefactor),
'alpha': copy.deepcopy(index),
'Eb': copy.deepcopy(scale),
}
pars1['alpha']['scale'] *= -1
pars1['alpha']['min'] = -5.0
pars1['alpha']['max'] = 5.0
self.set_source_spectrum(str(name), 'LogParabola',
spectrum_pars=pars1,
update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_lp = self._fit(loglevel=logging.DEBUG)
self.set_source_spectrum(str(name), old_type,
spectrum_pars=old_pars,
update_source=False)
pars2 = {
'Prefactor': copy.deepcopy(prefactor),
'Index1': copy.deepcopy(index),
'Cutoff': {'value': 1000.0, 'scale': 1E3,
'min': 10.0, 'max': 1E4, 'free': True},
'Index2': {'value': 1.0, 'scale': 1.0,
'min': 1.0, 'max': 1.0, 'free': False},
'Scale': copy.deepcopy(scale)
}
self.set_source_spectrum(str(name), 'PLSuperExpCutoff',
spectrum_pars=pars2,
update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_ple = self._fit(loglevel=logging.DEBUG)
# Revert to initial spectral model
self.set_source_spectrum(str(name), old_type,
spectrum_pars=old_pars,
update_source=False)
saved_state.restore()
lp_ts_curv = 2.0 * (fit_lp['loglike'] - fit_pl['loglike'])
ple_ts_curv = 2.0 * (fit_ple['loglike'] - fit_pl['loglike'])
o = MutableNamedTuple(ts_curv=lp_ts_curv,
lp_ts_curv=lp_ts_curv,
ple_ts_curv=ple_ts_curv,
loglike_pl=fit_pl['loglike'],
loglike_lp=fit_lp['loglike'],
loglike_ple=fit_ple['loglike'])
self.logger.info('LogLike_PL: %12.3f LogLike_LP: %12.3f LogLike_PLE: %12.3f',
o.loglike_pl, o.loglike_lp, o.loglike_ple)
self.logger.info('TS_curv: %.3f (LP)', o.lp_ts_curv)
self.logger.info('TS_curv: %.3f (PLE)', o.ple_ts_curv)
return o
|
Test whether a source shows spectral curvature by comparing
the likelihood ratio of PowerLaw and LogParabola spectral
models.
Parameters
----------
name : str
Source name.
|
entailment
|
def bowtie(self, name, fd=None, loge=None):
"""Generate a spectral uncertainty band (bowtie) for the given
source. This will create an uncertainty band on the
differential flux as a function of energy by propagating the
errors on the global fit parameters. Note that this band only
reflects the uncertainty for parameters that are currently
free in the model.
Parameters
----------
name : str
Source name.
fd : FluxDensity
Flux density object. If this parameter is None then one
will be created.
loge : array-like
Sequence of energies in log10(E/MeV) at which the flux band
will be evaluated.
"""
if loge is None:
logemin = self.log_energies[0]
logemax = self.log_energies[-1]
loge = np.linspace(logemin, logemax, 50)
o = {'energies': 10**loge,
'log_energies': loge,
'dnde': np.zeros(len(loge)) * np.nan,
'dnde_lo': np.zeros(len(loge)) * np.nan,
'dnde_hi': np.zeros(len(loge)) * np.nan,
'dnde_err': np.zeros(len(loge)) * np.nan,
'dnde_ferr': np.zeros(len(loge)) * np.nan,
'pivot_energy': np.nan}
try:
if fd is None:
fd = FluxDensity.FluxDensity(self.like, name)
except RuntimeError:
self.logger.error('Failed to create FluxDensity',
exc_info=True)
return o
dnde = [fd.value(10 ** x) for x in loge]
dnde_err = [fd.error(10 ** x) for x in loge]
dnde = np.array(dnde)
dnde_err = np.array(dnde_err)
m = dnde > 0
fhi = np.zeros_like(dnde)
flo = np.zeros_like(dnde)
ferr = np.zeros_like(dnde)
fhi[m] = dnde[m] * (1.0 + dnde_err[m] / dnde[m])
flo[m] = dnde[m] / (1.0 + dnde_err[m] / dnde[m])
ferr[m] = 0.5 * (fhi[m] - flo[m]) / dnde[m]
fhi[~m] = dnde_err[~m]
o['dnde'] = dnde
o['dnde_lo'] = flo
o['dnde_hi'] = fhi
o['dnde_err'] = dnde_err
o['dnde_ferr'] = ferr
try:
o['pivot_energy'] = 10 ** utils.interpolate_function_min(loge, o[
'dnde_ferr'])
except Exception:
self.logger.error('Failed to compute pivot energy',
exc_info=True)
return o
|
Generate a spectral uncertainty band (bowtie) for the given
source. This will create an uncertainty band on the
differential flux as a function of energy by propagating the
errors on the global fit parameters. Note that this band only
reflects the uncertainty for parameters that are currently
free in the model.
Parameters
----------
name : str
Source name.
fd : FluxDensity
Flux density object. If this parameter is None then one
will be created.
loge : array-like
Sequence of energies in log10(E/MeV) at which the flux band
will be evaluated.
|
entailment
|
def update_source(self, name, paramsonly=False, reoptimize=False, **kwargs):
"""Update the dictionary for this source.
Parameters
----------
name : str
paramsonly : bool
reoptimize : bool
Re-fit background parameters in likelihood scan.
"""
npts = self.config['gtlike']['llscan_npts']
optimizer = kwargs.get('optimizer', self.config['optimizer'])
sd = self.get_src_model(name, paramsonly, reoptimize, npts,
optimizer=optimizer)
src = self.roi.get_source_by_name(name)
src.update_data(sd)
|
Update the dictionary for this source.
Parameters
----------
name : str
paramsonly : bool
reoptimize : bool
Re-fit background parameters in likelihood scan.
|
entailment
|
def get_src_model(self, name, paramsonly=False, reoptimize=False,
npts=None, **kwargs):
"""Compose a dictionary for a source with the current best-fit
parameters.
Parameters
----------
name : str
paramsonly : bool
Skip computing TS and likelihood profile.
reoptimize : bool
Re-fit background parameters in likelihood scan.
npts : int
Number of points for likelihood scan.
Returns
-------
src_dict : dict
"""
self.logger.debug('Generating source dict for ' + name)
optimizer = kwargs.get('optimizer', self.config['optimizer'])
if npts is None:
npts = self.config['gtlike']['llscan_npts']
name = self.get_source_name(name)
source = self.like[name].src
spectrum = source.spectrum()
normPar = self.like.normPar(name)
src_dict = defaults.make_default_dict(defaults.source_flux_output)
src_dict.update({'name': name,
'pivot_energy': 1000.,
'ts': np.nan,
'loglike': np.nan,
'npred': 0.0,
'npred_wt': 0.0,
'loglike_scan': np.nan * np.ones(npts),
'dloglike_scan': np.nan * np.ones(npts),
'eflux_scan': np.nan * np.ones(npts),
'flux_scan': np.nan * np.ones(npts),
'norm_scan': np.nan * np.ones(npts),
})
src_dict.update(gtutils.gtlike_spectrum_to_vectors(spectrum))
src_dict['spectral_pars'] = gtutils.get_function_pars_dict(spectrum)
# Get Counts Spectrum
src_dict['model_counts'] = self.model_counts_spectrum(
name, summed=True)
src_dict['model_counts_wt'] = self.model_counts_spectrum(
name, summed=True, weighted=True)
# Get NPred
src_dict['npred'] = self.like.NpredValue(str(name))
# EAC, we need this b/c older version of the ST don't have the right signature
try:
src_dict['npred_wt'] = self.like.NpredValue(str(name), True)
except (TypeError, NotImplementedError):
src_dict['npred_wt'] = src_dict['npred']
# Get the Model Fluxes
try:
thesrc = self.like[name]
src_dict['flux'] = self.like.flux(name, self.energies[0],
self.energies[-1])
src_dict['flux100'] = self.like.flux(name, 100., 10 ** 5.5)
src_dict['flux1000'] = self.like.flux(name, 1000., 10 ** 5.5)
src_dict['flux10000'] = self.like.flux(name, 10000., 10 ** 5.5)
src_dict['eflux'] = self.like.energyFlux(name,
self.energies[0],
self.energies[-1])
src_dict['eflux100'] = self.like.energyFlux(name, 100.,
10 ** 5.5)
src_dict['eflux1000'] = self.like.energyFlux(name, 1000.,
10 ** 5.5)
src_dict['eflux10000'] = self.like.energyFlux(name, 10000.,
10 ** 5.5)
src_dict['dnde'] = self.like[name].spectrum()(
pyLike.dArg(src_dict['pivot_energy']))
src_dict['dnde100'] = self.like[name].spectrum()(
pyLike.dArg(100.))
src_dict['dnde1000'] = self.like[name].spectrum()(
pyLike.dArg(1000.))
src_dict['dnde10000'] = self.like[name].spectrum()(
pyLike.dArg(10000.))
if normPar.getValue() == 0:
normPar.setValue(1.0)
dnde_index = -get_spectral_index(self.like[name],
src_dict['pivot_energy'])
dnde100_index = -get_spectral_index(self.like[name],
100.)
dnde1000_index = -get_spectral_index(self.like[name],
1000.)
dnde10000_index = -get_spectral_index(self.like[name],
10000.)
normPar.setValue(0.0)
else:
dnde_index = -get_spectral_index(self.like[name],
src_dict['pivot_energy'])
dnde100_index = -get_spectral_index(self.like[name],
100.)
dnde1000_index = -get_spectral_index(self.like[name],
1000.)
dnde10000_index = -get_spectral_index(self.like[name],
10000.)
src_dict['dnde_index'] = dnde_index
src_dict['dnde100_index'] = dnde100_index
src_dict['dnde1000_index'] = dnde1000_index
src_dict['dnde10000_index'] = dnde10000_index
except Exception:
self.logger.error('Failed to update source parameters.',
exc_info=True)
# Only compute TS, errors, and ULs if the source was free in
# the fit
if not self.get_free_source_params(name) or paramsonly:
return src_dict
emax = 10 ** 5.5
try:
src_dict['flux_err'] = self.like.fluxError(name,
self.energies[0],
self.energies[-1])
src_dict['flux100_err'] = self.like.fluxError(name, 100., emax)
src_dict['flux1000_err'] = self.like.fluxError(name, 1000., emax)
src_dict['flux10000_err'] = self.like.fluxError(name, 10000., emax)
src_dict['eflux_err'] = \
self.like.energyFluxError(name, self.energies[0],
self.energies[-1])
src_dict['eflux100_err'] = self.like.energyFluxError(name, 100.,
emax)
src_dict['eflux1000_err'] = self.like.energyFluxError(name, 1000.,
emax)
src_dict['eflux10000_err'] = self.like.energyFluxError(name, 10000.,
emax)
except Exception:
pass
# self.logger.error('Failed to update source parameters.',
# exc_info=True)
lnlp = self.profile_norm(name, savestate=True,
reoptimize=reoptimize, npts=npts,
optimizer=optimizer)
src_dict['loglike_scan'] = lnlp['loglike']
src_dict['dloglike_scan'] = lnlp['dloglike']
src_dict['eflux_scan'] = lnlp['eflux']
src_dict['flux_scan'] = lnlp['flux']
src_dict['norm_scan'] = lnlp['xvals']
src_dict['loglike'] = np.max(lnlp['loglike'])
flux_ul_data = utils.get_parameter_limits(
lnlp['flux'], lnlp['dloglike'])
eflux_ul_data = utils.get_parameter_limits(
lnlp['eflux'], lnlp['dloglike'])
if normPar.getValue() == 0:
normPar.setValue(1.0)
flux = self.like.flux(name, self.energies[0], self.energies[-1])
flux100 = self.like.flux(name, 100., emax)
flux1000 = self.like.flux(name, 1000., emax)
flux10000 = self.like.flux(name, 10000., emax)
eflux = self.like.energyFlux(name, self.energies[0],
self.energies[-1])
eflux100 = self.like.energyFlux(name, 100., emax)
eflux1000 = self.like.energyFlux(name, 1000., emax)
eflux10000 = self.like.energyFlux(name, 10000., emax)
flux100_ratio = flux100 / flux
flux1000_ratio = flux1000 / flux
flux10000_ratio = flux10000 / flux
eflux100_ratio = eflux100 / eflux
eflux1000_ratio = eflux1000 / eflux
eflux10000_ratio = eflux10000 / eflux
normPar.setValue(0.0)
else:
flux100_ratio = src_dict['flux100'] / src_dict['flux']
flux1000_ratio = src_dict['flux1000'] / src_dict['flux']
flux10000_ratio = src_dict['flux10000'] / src_dict['flux']
eflux100_ratio = src_dict['eflux100'] / src_dict['eflux']
eflux1000_ratio = src_dict['eflux1000'] / src_dict['eflux']
eflux10000_ratio = src_dict['eflux10000'] / src_dict['eflux']
src_dict['flux_ul95'] = flux_ul_data['ul']
src_dict['flux100_ul95'] = flux_ul_data['ul'] * flux100_ratio
src_dict['flux1000_ul95'] = flux_ul_data['ul'] * flux1000_ratio
src_dict['flux10000_ul95'] = flux_ul_data['ul'] * flux10000_ratio
src_dict['eflux_ul95'] = eflux_ul_data['ul']
src_dict['eflux100_ul95'] = eflux_ul_data['ul'] * eflux100_ratio
src_dict['eflux1000_ul95'] = eflux_ul_data['ul'] * eflux1000_ratio
src_dict['eflux10000_ul95'] = eflux_ul_data['ul'] * eflux10000_ratio
# Extract covariance matrix
fd = None
try:
fd = FluxDensity.FluxDensity(self.like, name)
src_dict['covar'] = fd.covar
except RuntimeError:
pass
# if ex.message == 'Covariance matrix has not been
# computed.':
# Extract bowtie
if fd and len(src_dict['covar']) and src_dict['covar'].ndim >= 1:
loge = np.linspace(self.log_energies[0],
self.log_energies[-1], 50)
src_dict['model_flux'] = self.bowtie(name, fd=fd, loge=loge)
src_dict['dnde100_err'] = fd.error(100.)
src_dict['dnde1000_err'] = fd.error(1000.)
src_dict['dnde10000_err'] = fd.error(10000.)
src_dict['pivot_energy'] = src_dict['model_flux']['pivot_energy']
e0 = src_dict['pivot_energy']
src_dict['dnde'] = self.like[name].spectrum()(pyLike.dArg(e0))
src_dict['dnde_err'] = fd.error(e0)
if not reoptimize:
src_dict['ts'] = self.like.Ts2(name, reoptimize=reoptimize)
else:
src_dict['ts'] = -2.0 * lnlp['dloglike'][0]
return src_dict
|
Compose a dictionary for a source with the current best-fit
parameters.
Parameters
----------
name : str
paramsonly : bool
Skip computing TS and likelihood profile.
reoptimize : bool
Re-fit background parameters in likelihood scan.
npts : int
Number of points for likelihood scan.
Returns
-------
src_dict : dict
|
entailment
|
def compute_srcprob(self,xmlfile=None, overwrite=False):
"""Run the gtsrcprob app with the current model or a user provided xmlfile"""
for i,c in enumerate(self.components):
# compute diffuse response, necessary for srcprob
c._diffrsp_app(xmlfile=xmlfile)
# compute srcprob
c._srcprob_app(xmlfile = xmlfile, overwrite = overwrite)
|
Run the gtsrcprob app with the current model or a user provided xmlfile
|
entailment
|
def reload_source(self, name):
"""Recompute the source map for a single source in the model.
"""
src = self.roi.get_source_by_name(name)
if hasattr(self.like.logLike, 'loadSourceMap'):
self.like.logLike.loadSourceMap(str(name), True, False)
srcmap_utils.delete_source_map(self.files['srcmap'], name)
self.like.logLike.saveSourceMaps(str(self.files['srcmap']))
self._scale_srcmap(self._src_expscale, check_header=False,
names=[name])
self.like.logLike.buildFixedModelWts()
else:
self.write_xml('tmp')
src = self.delete_source(name)
self.add_source(name, src, free=True)
self.load_xml('tmp')
|
Recompute the source map for a single source in the model.
|
entailment
|
def reload_sources(self, names):
"""Recompute the source map for a list of sources in the model.
"""
try:
self.like.logLike.loadSourceMaps(names, True, True)
# loadSourceMaps doesn't overwrite the header so we need
# to ignore EXPSCALE by setting check_header=False
self._scale_srcmap(self._src_expscale, check_header=False,
names=names)
except:
for name in names:
self.reload_source(name)
|
Recompute the source map for a list of sources in the model.
|
entailment
|
def add_source(self, name, src_dict, free=None, save_source_maps=True,
use_pylike=True, use_single_psf=False):
"""Add a new source to the model. Source properties
(spectrum, spatial model) are set with the src_dict argument.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or Source object defining the properties of the
source.
free : bool
Initialize the source with the normalization parameter free.
save_source_maps : bool
Write the source map for this source to the source maps file.
use_pylike : bool
use_single_psf : bool
"""
# if self.roi.has_source(name):
# msg = 'Source %s already exists.' % name
# self.logger.error(msg)
# raise Exception(msg)
srcmap_utils.delete_source_map(self.files['srcmap'], name)
src = self.roi[name]
if self.config['gtlike']['expscale'] is not None and \
name not in self._src_expscale:
self._src_expscale[name] = self.config['gtlike']['expscale']
if self._like is None:
return
if not use_pylike:
self._update_srcmap_file([src], True)
pylike_src = self._create_source(src)
# Initialize source as free/fixed
if free is not None:
pylike_src.spectrum().normPar().setFree(free)
if hasattr(pyLike, 'PsfIntegConfig') and \
hasattr(pyLike.PsfIntegConfig, 'set_use_single_psf'):
config = pyLike.BinnedLikeConfig(self.like.logLike.config())
config.psf_integ_config().set_use_single_psf(use_single_psf)
self.like.addSource(pylike_src, config)
else:
self.like.addSource(pylike_src)
self.like.syncSrcParams(str(name))
self.like.logLike.buildFixedModelWts()
if save_source_maps and \
not self.config['gtlike']['use_external_srcmap']:
self.like.logLike.saveSourceMaps(str(self.files['srcmap']))
self.set_exposure_scale(name)
|
Add a new source to the model. Source properties
(spectrum, spatial model) are set with the src_dict argument.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or Source object defining the properties of the
source.
free : bool
Initialize the source with the normalization parameter free.
save_source_maps : bool
Write the source map for this source to the source maps file.
use_pylike : bool
use_single_psf : bool
|
entailment
|
def _create_source(self, src):
"""Create a pyLikelihood Source object from a
`~fermipy.roi_model.Model` object."""
if src['SpatialType'] == 'SkyDirFunction':
pylike_src = pyLike.PointSource(self.like.logLike.observation())
pylike_src.setDir(src.skydir.ra.deg, src.skydir.dec.deg, False,
False)
elif src['SpatialType'] == 'SpatialMap':
filepath = str(utils.path_to_xmlpath(src['Spatial_Filename']))
sm = pyLike.SpatialMap(filepath)
pylike_src = pyLike.DiffuseSource(sm,
self.like.logLike.observation(),
False)
elif src['SpatialType'] == 'RadialProfile':
filepath = str(utils.path_to_xmlpath(src['Spatial_Filename']))
sm = pyLike.RadialProfile(filepath)
sm.setCenter(src['ra'], src['dec'])
pylike_src = pyLike.DiffuseSource(sm,
self.like.logLike.observation(),
False)
elif src['SpatialType'] == 'RadialGaussian':
sm = pyLike.RadialGaussian(src.skydir.ra.deg, src.skydir.dec.deg,
src.spatial_pars['Sigma']['value'])
pylike_src = pyLike.DiffuseSource(sm,
self.like.logLike.observation(),
False)
elif src['SpatialType'] == 'RadialDisk':
sm = pyLike.RadialDisk(src.skydir.ra.deg, src.skydir.dec.deg,
src.spatial_pars['Radius']['value'])
pylike_src = pyLike.DiffuseSource(sm,
self.like.logLike.observation(),
False)
elif src['SpatialType'] == 'MapCubeFunction':
filepath = str(utils.path_to_xmlpath(src['Spatial_Filename']))
mcf = pyLike.MapCubeFunction2(filepath)
pylike_src = pyLike.DiffuseSource(mcf,
self.like.logLike.observation(),
False)
else:
raise Exception('Unrecognized spatial type: %s',
src['SpatialType'])
if src['SpectrumType'] == 'FileFunction':
fn = gtutils.create_spectrum_from_dict(src['SpectrumType'],
src.spectral_pars)
file_function = pyLike.FileFunction_cast(fn)
filename = str(os.path.expandvars(src['Spectrum_Filename']))
file_function.readFunction(filename)
elif src['SpectrumType'] == 'DMFitFunction':
fn = pyLike.DMFitFunction()
fn = gtutils.create_spectrum_from_dict(src['SpectrumType'],
src.spectral_pars, fn)
filename = str(os.path.expandvars(src['Spectrum_Filename']))
fn.readFunction(filename)
else:
fn = gtutils.create_spectrum_from_dict(src['SpectrumType'],
src.spectral_pars)
pylike_src.setSpectrum(fn)
pylike_src.setName(str(src.name))
return pylike_src
|
Create a pyLikelihood Source object from a
`~fermipy.roi_model.Model` object.
|
entailment
|
def set_exposure_scale(self, name, scale=None):
"""Set the exposure correction of a source.
Parameters
----------
name : str
Source name.
scale : factor
Exposure scale factor (1.0 = nominal exposure).
"""
name = self.roi.get_source_by_name(name).name
if scale is None and name not in self._src_expscale:
return
elif scale is None:
scale = self._src_expscale.get(name, 1.0)
else:
self._src_expscale[name] = scale
self._scale_srcmap({name: scale})
|
Set the exposure correction of a source.
Parameters
----------
name : str
Source name.
scale : factor
Exposure scale factor (1.0 = nominal exposure).
|
entailment
|
def set_edisp_flag(self, name, flag=True):
"""Enable/Disable the energy dispersion correction for a
source."""
src = self.roi.get_source_by_name(name)
name = src.name
self.like[name].src.set_edisp_flag(flag)
|
Enable/Disable the energy dispersion correction for a
source.
|
entailment
|
def set_energy_range(self, logemin, logemax):
"""Set the energy range of the analysis.
Parameters
----------
logemin: float
Lower end of energy range in log10(E/MeV).
logemax : float
Upper end of energy range in log10(E/MeV).
"""
if logemin is None:
logemin = self.log_energies[0]
if logemax is None:
logemax = self.log_energies[-1]
imin = int(utils.val_to_edge(self.log_energies, logemin)[0])
imax = int(utils.val_to_edge(self.log_energies, logemax)[0])
if imin - imax == 0:
imin = int(len(self.log_energies) - 1)
imax = int(len(self.log_energies) - 1)
klims = self.like.logLike.klims()
if imin != klims[0] or imax != klims[1]:
self.like.selectEbounds(imin, imax)
return np.array([self.log_energies[imin], self.log_energies[imax]])
|
Set the energy range of the analysis.
Parameters
----------
logemin: float
Lower end of energy range in log10(E/MeV).
logemax : float
Upper end of energy range in log10(E/MeV).
|
entailment
|
def counts_map(self):
"""Return 3-D counts map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
"""
try:
if isinstance(self.like, gtutils.SummedLikelihood):
cmap = self.like.components[0].logLike.countsMap()
p_method = cmap.projection().method()
else:
cmap = self.like.logLike.countsMap()
p_method = cmap.projection().method()
except Exception:
p_method = 0
if p_method == 0: # WCS
z = cmap.data()
z = np.array(z).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self.geom), z)
elif p_method == 1: # HPX
z = cmap.data()
z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix))
return HpxNDMap(copy.deepcopy(self.geom), z)
else:
self.logger.error('Did not recognize CountsMap type %i' % p_method,
exc_info=True)
return None
|
Return 3-D counts map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
|
entailment
|
def weight_map(self):
"""Return 3-D weights map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
"""
# EAC we need the try blocks b/c older versions of the ST don't have some of these functions
if isinstance(self.like, gtutils.SummedLikelihood):
cmap = self.like.components[0].logLike.countsMap()
try:
p_method = cmap.projection().method()
except AttributeError:
p_method = 0
try:
if self.like.components[0].logLike.has_weights():
wmap = self.like.components[0].logLike.weightMap()
else:
wmap = None
except Exception:
wmap = None
else:
cmap = self.like.logLike.countsMap()
try:
p_method = cmap.projection().method()
except AttributeError:
p_method = 0
try:
if self.like.logLike.has_weights():
wmap = self.like.logLike.weightMap()
else:
wmap = None
except Exception:
wmap = None
if p_method == 0: # WCS
if wmap is None:
z = np.ones((self.enumbins, self.npix, self.npix))
else:
z = wmap.model()
z = np.array(z).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self._geom), z)
elif p_method == 1: # HPX
nhpix = np.max(self.geom.npix)
if wmap is None:
z = np.ones((self.enumbins, nhpix))
else:
z = wmap.model()
z = np.array(z).reshape(self.enumbins, nhpix)
return HpxNDMap(self.geom, z)
else:
self.logger.error('Did not recognize CountsMap type %i' % p_method,
exc_info=True)
return None
|
Return 3-D weights map for this component as a Map object.
Returns
-------
map : `~fermipy.skymap.MapBase`
|
entailment
|
def model_counts_map(self, name=None, exclude=None, use_mask=False):
"""Return the model expectation map for a single source, a set
of sources, or all sources in the ROI. The map will be
computed using the current model parameters.
Parameters
----------
name : str
Parameter that defines the sources for which the model map
will be calculated. If name=None a model map will be
generated for all sources in the model. If name='diffuse'
a map for all diffuse sources will be generated.
exclude : list
Source name or list of source names that will be excluded
from the model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~fermipy.skymap.Map`
A map object containing the counts and WCS projection.
"""
if self.projtype == "WCS":
v = pyLike.FloatVector(self.npix ** 2 * self.enumbins)
elif self.projtype == "HPX":
v = pyLike.FloatVector(np.max(self.geom.npix) * self.enumbins)
else:
raise Exception("Unknown projection type %s", self.projtype)
exclude = utils.arg_to_list(exclude)
names = utils.arg_to_list(name)
excluded_names = []
for i, t in enumerate(exclude):
srcs = self.roi.get_sources_by_name(t)
excluded_names += [s.name for s in srcs]
if not hasattr(self.like.logLike, 'loadSourceMaps'):
# Update fixed model
self.like.logLike.buildFixedModelWts()
# Populate source map hash
self.like.logLike.buildFixedModelWts(True)
elif (name is None or name == 'all') and not exclude:
self.like.logLike.loadSourceMaps()
src_names = []
if (name is None) or (name == 'all'):
src_names = [src.name for src in self.roi.sources]
elif name == 'diffuse':
src_names = [src.name for src in self.roi.sources if
src.diffuse]
else:
srcs = [self.roi.get_source_by_name(t) for t in names]
src_names = [src.name for src in srcs]
# Remove sources in exclude list
src_names = [str(t) for t in src_names if t not in excluded_names]
# EAC we need the try blocks b/c older versions of the ST don't have some of these functions
if len(src_names) == len(self.roi.sources):
try:
self.like.logLike.computeModelMap(v, use_mask)
except (TypeError, NotImplementedError):
self.like.logLike.computeModelMap(v)
elif not hasattr(self.like.logLike, 'setSourceMapImage'):
for s in src_names:
model = self.like.logLike.sourceMap(str(s))
try:
self.like.logLike.updateModelMap(v, model, use_mask)
except (TypeError, NotImplementedError):
self.like.logLike.updateModelMap(v, model)
else:
try:
if hasattr(self.like.logLike, 'has_weights'):
self.like.logLike.computeModelMap(src_names, v, use_mask)
else:
self.like.logLike.computeModelMap(src_names, v)
except:
vsum = np.zeros(v.size())
for s in src_names:
vtmp = pyLike.FloatVector(v.size())
if hasattr(self.like.logLike, 'has_weights'):
self.like.logLike.computeModelMap(
str(s), vtmp, use_mask)
else:
self.like.logLike.computeModelMap(str(s), vtmp)
vsum += vtmp
v = pyLike.FloatVector(vsum)
if self.projtype == "WCS":
z = np.array(v).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self._geom), z)
elif self.projtype == "HPX":
z = np.array(v).reshape(self.enumbins, np.max(self._geom.npix))
return HpxNDMap(copy.deepcopy(self._geom), z)
else:
raise Exception(
"Did not recognize projection type %s", self.projtype)
|
Return the model expectation map for a single source, a set
of sources, or all sources in the ROI. The map will be
computed using the current model parameters.
Parameters
----------
name : str
Parameter that defines the sources for which the model map
will be calculated. If name=None a model map will be
generated for all sources in the model. If name='diffuse'
a map for all diffuse sources will be generated.
exclude : list
Source name or list of source names that will be excluded
from the model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~fermipy.skymap.Map`
A map object containing the counts and WCS projection.
|
entailment
|
def model_counts_spectrum(self, name, logemin, logemax, weighted=False):
"""Return the model counts spectrum of a source.
Parameters
----------
name : str
Source name.
"""
# EAC, we need this b/c older version of the ST don't have the right signature
try:
cs = np.array(self.like.logLike.modelCountsSpectrum(
str(name), weighted))
except (TypeError, NotImplementedError):
cs = np.array(self.like.logLike.modelCountsSpectrum(str(name)))
imin = utils.val_to_edge(self.log_energies, logemin)[0]
imax = utils.val_to_edge(self.log_energies, logemax)[0]
if imax <= imin:
raise Exception('Invalid energy range.')
return cs[imin:imax]
|
Return the model counts spectrum of a source.
Parameters
----------
name : str
Source name.
|
entailment
|
def setup(self, overwrite=False, **kwargs):
"""Run pre-processing step for this component. This will
generate all of the auxiliary files needed to instantiate a
likelihood object. By default this function will skip any
steps for which the output file already exists.
Parameters
----------
overwrite : bool
Run all pre-processing steps even if the output file of
that step is present in the working directory.
"""
loglevel = kwargs.get('loglevel', self.loglevel)
self.logger.log(loglevel, 'Running setup for component %s',
self.name)
use_external_srcmap = self.config['gtlike']['use_external_srcmap']
# Run data selection
if not use_external_srcmap:
self._select_data(overwrite=overwrite, **kwargs)
# Create LT Cube
if self._ext_ltcube is not None:
self.logger.log(loglevel, 'Using external LT cube.')
else:
self._create_ltcube(overwrite=overwrite, **kwargs)
self.logger.debug('Loading LT Cube %s', self.files['ltcube'])
self._ltc = LTCube.create(self.files['ltcube'])
# Extract tmin, tmax from LT cube
self._tmin = self._ltc.tstart
self._tmax = self._ltc.tstop
self.logger.debug('Creating PSF model')
self._psf = irfs.PSFModel.create(self.roi.skydir, self._ltc,
self.config['gtlike']['irfs'],
self.config['selection']['evtype'],
self.energies)
# Bin data and create exposure cube
if not use_external_srcmap:
self._bin_data(overwrite=overwrite, **kwargs)
self._create_expcube(overwrite=overwrite, **kwargs)
# This is needed in case the exposure map is in HEALPix
hpxhduname = "HPXEXPOSURES"
try:
self._bexp = Map.read(self.files['bexpmap'], hdu=hpxhduname)
except KeyError:
self._bexp = Map.read(self.files['bexpmap'])
# Write ROI XML
self.roi.write_xml(self.files['srcmdl'], self.config['model'])
# Create source maps file
if not use_external_srcmap:
self._create_srcmaps(overwrite=overwrite)
if not self.config['data']['cacheft1'] and os.path.isfile(self.files['ft1']):
self.logger.debug('Deleting FT1 file.')
os.remove(self.files['ft1'])
self.logger.log(loglevel, 'Finished setup for component %s',
self.name)
|
Run pre-processing step for this component. This will
generate all of the auxiliary files needed to instantiate a
likelihood object. By default this function will skip any
steps for which the output file already exists.
Parameters
----------
overwrite : bool
Run all pre-processing steps even if the output file of
that step is present in the working directory.
|
entailment
|
def _scale_srcmap(self, scale_map, check_header=True, names=None):
"""Apply exposure corrections to the source map file.
Parameters
----------
scale_map : dict
Dictionary of exposure corrections.
check_header : bool
Check EXPSCALE header keyword to see if an exposure
correction has already been applied to this source.
names : list, optional
Names of sources to which the exposure correction will be
applied. If None then all sources will be corrected.
"""
srcmap = fits.open(self.files['srcmap'])
for hdu in srcmap[1:]:
if hdu.name not in scale_map:
continue
if names is not None and hdu.name not in names:
continue
scale = scale_map[hdu.name]
if scale < 1e-20:
self.logger.warning(
"The expscale parameter was zero, setting it to 1e-8")
scale = 1e-8
if 'EXPSCALE' in hdu.header and check_header:
old_scale = hdu.header['EXPSCALE']
else:
old_scale = 1.0
hdu.data *= scale / old_scale
hdu.header['EXPSCALE'] = (scale,
'Exposure correction applied to this map')
srcmap.writeto(self.files['srcmap'], overwrite=True)
srcmap.close()
# Force reloading the map from disk
for name in scale_map.keys():
self.like.logLike.eraseSourceMap(str(name))
self.like.logLike.buildFixedModelWts()
|
Apply exposure corrections to the source map file.
Parameters
----------
scale_map : dict
Dictionary of exposure corrections.
check_header : bool
Check EXPSCALE header keyword to see if an exposure
correction has already been applied to this source.
names : list, optional
Names of sources to which the exposure correction will be
applied. If None then all sources will be corrected.
|
entailment
|
def _make_scaled_srcmap(self):
"""Make an exposure cube with the same binning as the counts map."""
self.logger.info('Computing scaled source map.')
bexp0 = fits.open(self.files['bexpmap_roi'])
bexp1 = fits.open(self.config['gtlike']['bexpmap'])
srcmap = fits.open(self.config['gtlike']['srcmap'])
if bexp0[0].data.shape != bexp1[0].data.shape:
raise Exception('Wrong shape for input exposure map file.')
bexp_ratio = bexp0[0].data / bexp1[0].data
self.logger.info(
'Min/Med/Max exposure correction: %f %f %f' % (np.min(bexp_ratio),
np.median(
bexp_ratio),
np.max(bexp_ratio)))
for hdu in srcmap[1:]:
if hdu.name == 'GTI':
continue
if hdu.name == 'EBOUNDS':
continue
hdu.data *= bexp_ratio
srcmap.writeto(self.files['srcmap'], overwrite=True)
|
Make an exposure cube with the same binning as the counts map.
|
entailment
|
def simulate_roi(self, name=None, clear=True, randomize=True):
"""Simulate the whole ROI or inject a simulation of one or
more model components into the data.
Parameters
----------
name : str
Name of the model component to be simulated. If None then
the whole ROI will be simulated.
clear : bool
Zero the current counts map before injecting the simulation.
randomize : bool
Fill with each pixel with random values drawn from a
poisson distribution. If false then fill each pixel with
the counts expectation value.
"""
cm = self.counts_map()
data = cm.data
m = self.model_counts_map(name)
if clear:
data.fill(0.0)
if randomize:
if m.data.min()<0.:
self.logger.warning('At least on negative value found in model map.'
' Changing it/them to 0')
indexcond = np.where( m.data <0. )
m.data[indexcond]=np.zeros(len(m.data[indexcond]))
data += np.random.poisson(m.data).astype(float)
else:
data += m.data
if hasattr(self.like.logLike, 'setCountsMap'):
self.like.logLike.setCountsMap(np.ravel(data))
srcmap_utils.update_source_maps(self.files['srcmap'],
{'PRIMARY': data},
logger=self.logger)
cm.write(self.files['ccubemc'], overwrite=True, conv='fgst-ccube')
|
Simulate the whole ROI or inject a simulation of one or
more model components into the data.
Parameters
----------
name : str
Name of the model component to be simulated. If None then
the whole ROI will be simulated.
clear : bool
Zero the current counts map before injecting the simulation.
randomize : bool
Fill with each pixel with random values drawn from a
poisson distribution. If false then fill each pixel with
the counts expectation value.
|
entailment
|
def write_model_map(self, model_name=None, name=None):
"""Save counts model map to a FITS file.
"""
if model_name is None:
suffix = self.config['file_suffix']
else:
suffix = '_%s%s' % (model_name, self.config['file_suffix'])
self.logger.info('Generating model map for component %s.', self.name)
outfile = os.path.join(self.config['fileio']['workdir'],
'mcube%s.fits' % (suffix))
cmap = self.model_counts_map(name, use_mask=False)
cmap.write(outfile, overwrite=True, conv='fgst-ccube')
return cmap
|
Save counts model map to a FITS file.
|
entailment
|
def write_weight_map(self, model_name=None):
"""Save counts model map to a FITS file.
"""
if model_name is None:
suffix = self.config['file_suffix']
else:
suffix = '_%s%s' % (model_name, self.config['file_suffix'])
self.logger.info('Generating model map for component %s.', self.name)
outfile = os.path.join(self.config['fileio']['workdir'],
'wcube%s.fits' % (suffix))
wmap = self.weight_map()
wmap.write(outfile, overwrite=True, conv='fgst-ccube')
return wmap
|
Save counts model map to a FITS file.
|
entailment
|
def _update_srcmap_file(self, sources, overwrite=True):
"""Check the contents of the source map file and generate
source maps for any components that are not present."""
if not os.path.isfile(self.files['srcmap']):
return
hdulist = fits.open(self.files['srcmap'])
hdunames = [hdu.name.upper() for hdu in hdulist]
srcmaps = {}
for src in sources:
if src.name.upper() in hdunames and not overwrite:
continue
self.logger.debug('Creating source map for %s', src.name)
srcmaps[src.name] = self._create_srcmap(src.name, src)
if srcmaps:
self.logger.debug(
'Updating source map file for component %s.', self.name)
srcmap_utils.update_source_maps(self.files['srcmap'], srcmaps,
logger=self.logger)
hdulist.close()
|
Check the contents of the source map file and generate
source maps for any components that are not present.
|
entailment
|
def _create_srcmap(self, name, src, **kwargs):
"""Generate the source map for a source."""
psf_scale_fn = kwargs.get('psf_scale_fn', None)
skydir = src.skydir
spatial_model = src['SpatialModel']
spatial_width = src['SpatialWidth']
xpix, ypix = self.geom.to_image().coord_to_pix(skydir)
exp = self._bexp.interp_by_coord(
(skydir, self._bexp.geom.axes[0].center))
cache = self._srcmap_cache.get(name, None)
if cache is not None:
k = cache.create_map([ypix, xpix])
else:
k = srcmap_utils.make_srcmap(self._psf, exp, spatial_model,
spatial_width,
npix=self.npix, xpix=xpix, ypix=ypix,
cdelt=self.config['binning']['binsz'],
psf_scale_fn=psf_scale_fn,
sparse=True)
return k
|
Generate the source map for a source.
|
entailment
|
def _update_srcmap(self, name, src, **kwargs):
"""Update the source map for an existing source in memory."""
k = self._create_srcmap(name, src, **kwargs)
scale = self._src_expscale.get(name, 1.0)
k *= scale
# Force the source map to be cached
# FIXME: No longer necessary to force cacheing in ST after 11-05-02
self.like.logLike.sourceMap(str(name)).model()
self.like.logLike.setSourceMapImage(str(name), np.ravel(k))
self.like.logLike.sourceMap(str(name)).model()
normPar = self.like.normPar(name)
if not normPar.isFree():
self.like.logLike.buildFixedModelWts()
|
Update the source map for an existing source in memory.
|
entailment
|
def generate_model(self, model_name=None, outfile=None):
"""Generate a counts model map from an XML model file using
gtmodel.
Parameters
----------
model_name : str
Name of the model. If no name is given it will use the
baseline model.
outfile : str
Override the name of the output model file.
"""
if model_name is not None:
model_name = os.path.splitext(model_name)[0]
if model_name is None or model_name == '':
srcmdl = self.files['srcmdl']
else:
srcmdl = self.get_model_path(model_name)
if not os.path.isfile(srcmdl):
raise Exception("Model file does not exist: %s", srcmdl)
if model_name is None:
suffix = self.config['file_suffix']
else:
suffix = '_%s%s' % (model_name, self.config['file_suffix'])
outfile = os.path.join(self.config['fileio']['workdir'],
'mcube%s.fits' % (suffix))
# May consider generating a custom source model file
if not os.path.isfile(outfile):
kw = dict(srcmaps=self.files['srcmap'],
srcmdl=srcmdl,
bexpmap=self.files['bexpmap'],
outfile=outfile,
expcube=self.files['ltcube'],
irfs=self.config['gtlike']['irfs'],
evtype=self.config['selection']['evtype'],
edisp=bool(self.config['gtlike']['edisp']),
outtype='ccube',
chatter=self.config['logging']['chatter'])
run_gtapp('gtmodel', self.logger, kw)
else:
self.logger.info('Skipping gtmodel')
|
Generate a counts model map from an XML model file using
gtmodel.
Parameters
----------
model_name : str
Name of the model. If no name is given it will use the
baseline model.
outfile : str
Override the name of the output model file.
|
entailment
|
def write_xml(self, xmlfile):
"""Write the XML model for this analysis component."""
xmlfile = self.get_model_path(xmlfile)
self.logger.info('Writing %s...', xmlfile)
self.like.writeXml(str(xmlfile))
|
Write the XML model for this analysis component.
|
entailment
|
def get_model_path(self, name):
"""Infer the path to the XML model name."""
name, ext = os.path.splitext(name)
ext = '.xml'
xmlfile = name + self.config['file_suffix'] + ext
xmlfile = utils.resolve_path(xmlfile,
workdir=self.config['fileio']['workdir'])
return xmlfile
|
Infer the path to the XML model name.
|
entailment
|
def _tscube_app(self, xmlfile):
"""Run gttscube as an application."""
xmlfile = self.get_model_path(xmlfile)
outfile = os.path.join(self.config['fileio']['workdir'],
'tscube%s.fits' % (self.config['file_suffix']))
kw = dict(cmap=self.files['ccube'],
expcube=self.files['ltcube'],
bexpmap=self.files['bexpmap'],
irfs=self.config['gtlike']['irfs'],
evtype=self.config['selection']['evtype'],
srcmdl=xmlfile,
nxpix=self.npix, nypix=self.npix,
binsz=self.config['binning']['binsz'],
xref=float(self.roi.skydir.ra.deg),
yref=float(self.roi.skydir.dec.deg),
proj=self.config['binning']['proj'],
stlevel=0,
coordsys=self.config['binning']['coordsys'],
outfile=outfile)
run_gtapp('gttscube', self.logger, kw)
|
Run gttscube as an application.
|
entailment
|
def _diffrsp_app(self,xmlfile=None, **kwargs):
"""
Compute the diffuse response
"""
loglevel = kwargs.get('loglevel', self.loglevel)
self.logger.log(loglevel, 'Computing diffuse repsonce for component %s.',
self.name)
# set the srcmdl
srcmdl_file = self.files['srcmdl']
if xmlfile is not None:
srcmdl_file = self.get_model_path(xmlfile)
kw = dict(evfile=self.files['ft1'],
scfile=self.data_files['scfile'],
irfs = self.config['gtlike']['irfs'],
evtype = self.config['selection']['evtype'],
srcmdl = srcmdl_file)
run_gtapp('gtdiffrsp', self.logger, kw, loglevel=loglevel)
return
|
Compute the diffuse response
|
entailment
|
def _srcprob_app(self,xmlfile=None, overwrite=False, **kwargs):
"""
Run srcprob for an analysis component as an application
"""
loglevel = kwargs.get('loglevel', self.loglevel)
self.logger.log(loglevel, 'Computing src probability for component %s.',
self.name)
# set the srcmdl
srcmdl_file = self.files['srcmdl']
if xmlfile is not None:
srcmdl_file = self.get_model_path(xmlfile)
# set the outfile
# it's defined here and not in self.files dict
# so that it is copied with the stage_output module
# even if savefits is False
outfile = os.path.join(self.workdir,
'ft1_srcprob{0[file_suffix]:s}.fits'.format(self.config))
kw = dict(evfile=self.files['ft1'],
scfile=self.data_files['scfile'],
outfile= outfile,
irfs = self.config['gtlike']['irfs'],
srcmdl = srcmdl_file)
self.logger.debug(kw)
# run gtapp for the srcprob
if os.path.isfile(outfile) and not overwrite:
self.logger.info('Skipping gtsrcprob')
else:
run_gtapp('gtsrcprob', self.logger, kw, loglevel=loglevel)
|
Run srcprob for an analysis component as an application
|
entailment
|
def purge_dict(idict):
"""Remove null items from a dictionary """
odict = {}
for key, val in idict.items():
if is_null(val):
continue
odict[key] = val
return odict
|
Remove null items from a dictionary
|
entailment
|
def main(cls):
"""Hook to run this `Chain` from the command line """
chain = cls.create()
args = chain._run_argparser(sys.argv[1:])
chain._run_chain(sys.stdout, args.dry_run)
chain._finalize(args.dry_run)
|
Hook to run this `Chain` from the command line
|
entailment
|
def _latch_file_info(self):
"""Internal function to update the dictionaries
keeping track of input and output files
"""
self._map_arguments(self.args)
self.files.latch_file_info(self.args)
self.sub_files.file_dict.clear()
self.sub_files.update(self.files.file_dict)
for link in self._links.values():
self.sub_files.update(link.files.file_dict)
self.sub_files.update(link.sub_files.file_dict)
|
Internal function to update the dictionaries
keeping track of input and output files
|
entailment
|
def _set_link(self, linkname, cls, **kwargs):
"""Transfer options kwargs to a `Link` object,
optionally building the `Link if needed.
Parameters
----------
linkname : str
Unique name of this particular link
cls : type
Type of `Link` being created or managed
"""
val_copy = purge_dict(kwargs.copy())
sub_link_prefix = val_copy.pop('link_prefix', '')
link_prefix = self.link_prefix + sub_link_prefix
create_args = dict(linkname=linkname,
link_prefix=link_prefix,
job_archive=val_copy.pop('job_archive', None),
file_stage=val_copy.pop('file_stage', None))
job_args = val_copy
if linkname in self._links:
link = self._links[linkname]
link.update_args(job_args)
else:
link = cls.create(**create_args)
self._links[link.linkname] = link
logfile_default = os.path.join('logs', '%s.log' % link.full_linkname)
logfile = kwargs.setdefault('logfile', logfile_default)
link._register_job(JobDetails.topkey, job_args,
logfile, status=JobStatus.unknown)
return link
|
Transfer options kwargs to a `Link` object,
optionally building the `Link if needed.
Parameters
----------
linkname : str
Unique name of this particular link
cls : type
Type of `Link` being created or managed
|
entailment
|
def _set_links_job_archive(self):
"""Pass self._job_archive along to links"""
for link in self._links.values():
link._job_archive = self._job_archive
|
Pass self._job_archive along to links
|
entailment
|
def _run_chain(self,
stream=sys.stdout,
dry_run=False,
stage_files=True,
force_run=False,
resubmit_failed=False):
"""Run all the links in the chain
Parameters
-----------
stream : `file`
Stream to print to,
Must have 'write' function
dry_run : bool
Print commands but do not run them
stage_files : bool
Stage files to and from the scratch area
force_run : bool
Run jobs, even if they are marked as done
resubmit_failed : bool
Resubmit failed jobs
"""
self._set_links_job_archive()
failed = False
if self._file_stage is not None:
input_file_mapping, output_file_mapping = self._map_scratch_files(
self.sub_files)
if stage_files:
self._file_stage.make_scratch_dirs(input_file_mapping, dry_run)
self._file_stage.make_scratch_dirs(
output_file_mapping, dry_run)
self._stage_input_files(input_file_mapping, dry_run)
for link in self._links.values():
logfile = os.path.join('logs', "%s.log" % link.full_linkname)
link._archive_self(logfile, status=JobStatus.unknown)
key = JobDetails.make_fullkey(link.full_linkname)
if hasattr(link, 'check_status'):
link.check_status(stream, no_wait=True,
check_once=True, do_print=False)
else:
pass
link_status = link.check_job_status(key)
if link_status in [JobStatus.done]:
if not force_run:
print ("Skipping done link", link.full_linkname)
continue
elif link_status in [JobStatus.running]:
if not force_run and not resubmit_failed:
print ("Skipping running link", link.full_linkname)
continue
elif link_status in [JobStatus.failed,
JobStatus.partial_failed]:
if not resubmit_failed:
print ("Skipping failed link", link.full_linkname)
continue
print ("Running link ", link.full_linkname)
link.run_with_log(dry_run=dry_run, stage_files=False,
resubmit_failed=resubmit_failed)
link_status = link.check_jobs_status()
link._set_status_self(status=link_status)
if link_status in [JobStatus.failed, JobStatus.partial_failed]:
print ("Stoping chain execution at failed link %s" %
link.full_linkname)
failed = True
break
# elif link_status in [JobStatus.partial_failed]:
# print ("Resubmitting partially failed link %s" %
# link.full_linkname)
# link.run_with_log(dry_run=dry_run, stage_files=False,
# resubmit_failed=resubmit_failed)
# link_status = link.check_jobs_status()
# link._set_status_self(status=link_status)
# if link_status in [JobStatus.partial_failed]:
# print ("Stoping chain execution: resubmission failed %s" %
# link.full_linkname)
# failed = True
# break
if self._file_stage is not None and stage_files and not failed:
self._stage_output_files(output_file_mapping, dry_run)
chain_status = self.check_links_status()
print ("Chain status: %s" % (JOB_STATUS_STRINGS[chain_status]))
if chain_status == 5:
job_status = 0
else:
job_status = -1
self._write_status_to_log(job_status, stream)
self._set_status_self(status=chain_status)
if self._job_archive:
self._job_archive.file_archive.update_file_status()
self._job_archive.write_table_file()
|
Run all the links in the chain
Parameters
-----------
stream : `file`
Stream to print to,
Must have 'write' function
dry_run : bool
Print commands but do not run them
stage_files : bool
Stage files to and from the scratch area
force_run : bool
Run jobs, even if they are marked as done
resubmit_failed : bool
Resubmit failed jobs
|
entailment
|
def clear_jobs(self, recursive=True):
"""Clear a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
for link in self._links.values():
link.clear_jobs(recursive)
self.jobs.clear()
|
Clear a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
|
entailment
|
def get_jobs(self, recursive=True):
"""Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
ret_dict = self.jobs.copy()
for link in self._links.values():
ret_dict.update(link.get_jobs(recursive))
return ret_dict
return self.jobs
|
Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
|
entailment
|
def missing_input_files(self):
"""Make and return a dictionary of the missing input files.
This returns a dictionary mapping
filepath to list of `Link` that use the file as input.
"""
ret_dict = OrderedDict()
for link in self._links.values():
link_dict = link.missing_input_files()
for key, value in link_dict.items():
try:
ret_dict[key] += value
except KeyError:
ret_dict[key] = value
return ret_dict
|
Make and return a dictionary of the missing input files.
This returns a dictionary mapping
filepath to list of `Link` that use the file as input.
|
entailment
|
def check_links_status(self,
fail_running=False,
fail_pending=False):
""""Check the status of all the jobs run from the
`Link` objects in this `Chain` and return a status
flag that summarizes that.
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
status : `JobStatus`
Job status flag that summarizes the status of all the jobs,
"""
status_vector = JobStatusVector()
for link in self._links.values():
key = JobDetails.make_fullkey(link.full_linkname)
link_status = link.check_job_status(key,
fail_running=fail_running,
fail_pending=fail_pending)
status_vector[link_status] += 1
return status_vector.get_status()
|
Check the status of all the jobs run from the
`Link` objects in this `Chain` and return a status
flag that summarizes that.
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
status : `JobStatus`
Job status flag that summarizes the status of all the jobs,
|
entailment
|
def run(self, stream=sys.stdout, dry_run=False,
stage_files=True, resubmit_failed=False):
"""Runs this `Chain`.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
Must have 'write' function
dry_run : bool
Print command but do not run it.
stage_files : bool
Copy files to and from scratch staging area.
resubmit_failed : bool
Flag for sub-classes to resubmit failed jobs.
"""
self._run_chain(stream, dry_run, stage_files,
resubmit_failed=resubmit_failed)
|
Runs this `Chain`.
Parameters
-----------
stream : `file`
Stream that this `Link` will print to,
Must have 'write' function
dry_run : bool
Print command but do not run it.
stage_files : bool
Copy files to and from scratch staging area.
resubmit_failed : bool
Flag for sub-classes to resubmit failed jobs.
|
entailment
|
def update_args(self, override_args):
"""Update the argument used to invoke the application
Note that this will also update the dictionary of input
and output files.
Parameters
-----------
override_args : dict
dictionary passed to the links
"""
self.args = extract_arguments(override_args, self.args)
self._map_arguments(self.args)
scratch_dir = self.args.get('scratch', None)
if is_not_null(scratch_dir):
self._file_stage = FileStageManager(scratch_dir, '.')
for link in self._links.values():
link._set_file_stage(self._file_stage)
self._latch_file_info()
|
Update the argument used to invoke the application
Note that this will also update the dictionary of input
and output files.
Parameters
-----------
override_args : dict
dictionary passed to the links
|
entailment
|
def print_status(self, indent="", recurse=False):
"""Print a summary of the job status for each `Link` in this `Chain`"""
print ("%s%30s : %15s : %20s" %
(indent, "Linkname", "Link Status", "Jobs Status"))
for link in self._links.values():
if hasattr(link, 'check_status'):
status_vect = link.check_status(
stream=sys.stdout, no_wait=True, do_print=False)
else:
status_vect = None
key = JobDetails.make_fullkey(link.full_linkname)
link_status = JOB_STATUS_STRINGS[link.check_job_status(key)]
if status_vect is None:
jobs_status = JOB_STATUS_STRINGS[link.check_jobs_status()]
else:
jobs_status = status_vect
print ("%s%30s : %15s : %20s" %
(indent, link.linkname, link_status, jobs_status))
if hasattr(link, 'print_status') and recurse:
print ("---------- %30s -----------" % link.linkname)
link.print_status(indent + " ", recurse=True)
print ("------------------------------------------------")
|
Print a summary of the job status for each `Link` in this `Chain`
|
entailment
|
def print_summary(self, stream=sys.stdout, indent="", recurse_level=2):
"""Print a summary of the activity done by this `Chain`.
Parameters
-----------
stream : `file`
Stream to print to, must have 'write' method.
indent : str
Indentation at start of line
recurse_level : int
Number of recursion levels to print
"""
Link.print_summary(self, stream, indent, recurse_level)
if recurse_level > 0:
recurse_level -= 1
indent += " "
for link in self._links.values():
stream.write("\n")
link.print_summary(stream, indent, recurse_level)
|
Print a summary of the activity done by this `Chain`.
Parameters
-----------
stream : `file`
Stream to print to, must have 'write' method.
indent : str
Indentation at start of line
recurse_level : int
Number of recursion levels to print
|
entailment
|
def register_classes():
"""Register these classes with the `LinkFactory` """
Gtlink_exphpsun.register_class()
Gtlink_suntemp.register_class()
Gtexphpsun_SG.register_class()
Gtsuntemp_SG.register_class()
SunMoonChain.register_class()
|
Register these classes with the `LinkFactory`
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
mktime = args['mktimefilter']
base_config = dict(nxpix=args['nxpix'],
nypix=args['nypix'],
binsz=args['binsz'])
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}')
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
irf_ver=NAME_FACTORY.irf_ver(),
coordsys=comp.coordsys,
mktime=mktime,
fullpath=True)
outfile = NAME_FACTORY.bexpcube(**name_keys)
ltcube = NAME_FACTORY.ltcube(**name_keys)
full_config = base_config.copy()
full_config.update(dict(infile=ltcube,
outfile=outfile,
irfs=NAME_FACTORY.irfs(**name_keys),
evtype=comp.evtype,
emin=comp.emin,
emax=comp.emax,
enumbins=comp.enumbins,
logfile=make_nfs_path(outfile.replace('.fits', '.log'))))
job_configs[key] = full_config
return job_configs
|
Hook to build job configurations
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
mktime = args['mktimefilter']
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}')
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
irf_ver=NAME_FACTORY.irf_ver(),
mktime=mktime,
fullpath=True)
outfile = NAME_FACTORY.bexpcube_sun(**name_keys)
ltcube_sun = NAME_FACTORY.ltcube_sun(**name_keys)
job_configs[key] = dict(infile=NAME_FACTORY.ltcube_sun(**name_keys),
outfile=outfile,
irfs=NAME_FACTORY.irfs(**name_keys),
evtype=comp.evtype,
emin=comp.emin,
emax=comp.emax,
enumbins=comp.enumbins,
logfile=make_nfs_path(outfile.replace('.fits', '.log')))
return job_configs
|
Hook to build job configurations
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
# FIXME
mktime = args['mktimefilter']
for comp in components:
for sourcekey in args['sourcekeys']:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}') + "_%s" % sourcekey
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
irf_ver=NAME_FACTORY.irf_ver(),
sourcekey=sourcekey,
mktime=mktime,
coordsys=comp.coordsys,
fullpath=True)
outfile = NAME_FACTORY.template_sunmoon(**name_keys)
job_configs[key] = dict(expsun=NAME_FACTORY.bexpcube_sun(**name_keys),
avgexp=NAME_FACTORY.bexpcube(**name_keys),
sunprof=NAME_FACTORY.angprofile(**name_keys),
cmap='none',
outfile=outfile,
irfs=NAME_FACTORY.irfs(**name_keys),
evtype=comp.evtype,
emin=comp.emin,
emax=comp.emax,
enumbins=comp.enumbins,
logfile=outfile.replace('.fits', '.log'))
return job_configs
|
Hook to build job configurations
|
entailment
|
def _map_arguments(self, input_dict):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
config_yaml = input_dict['config']
config_dict = load_yaml(config_yaml)
data = config_dict.get('data')
comp = config_dict.get('comp')
sourcekeys = config_dict.get('sourcekeys')
mktimefilter = config_dict.get('mktimefilter')
self._set_link('expcube2', Gtexpcube2wcs_SG,
comp=comp, data=data,
mktimefilter=mktimefilter)
self._set_link('exphpsun', Gtexphpsun_SG,
comp=comp, data=data,
mktimefilter=mktimefilter)
self._set_link('suntemp', Gtsuntemp_SG,
comp=comp, data=data,
mktimefilter=mktimefilter,
sourcekeys=sourcekeys)
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def get_component_info(self, comp):
"""Return the information about sub-component specific to a particular data selection
Parameters
----------
comp : `binning.Component` object
Specifies the sub-component
Returns `ModelComponentInfo` object
"""
if self.components is None:
raise ValueError(
'Model component %s does not have sub-components' % self.sourcekey)
if self.moving:
comp_key = "zmax%i" % (comp.zmax)
elif self.selection_dependent:
comp_key = comp.make_key('{ebin_name}_{evtype_name}')
else:
raise ValueError(
'Model component %s is not moving or selection dependent' % self.sourcekey)
return self.components[comp_key]
|
Return the information about sub-component specific to a particular data selection
Parameters
----------
comp : `binning.Component` object
Specifies the sub-component
Returns `ModelComponentInfo` object
|
entailment
|
def add_component_info(self, compinfo):
"""Add sub-component specific information to a particular data selection
Parameters
----------
compinfo : `ModelComponentInfo` object
Sub-component being added
"""
if self.components is None:
self.components = {}
self.components[compinfo.comp_key] = compinfo
|
Add sub-component specific information to a particular data selection
Parameters
----------
compinfo : `ModelComponentInfo` object
Sub-component being added
|
entailment
|
def clone_and_merge_sub(self, key):
"""Clones self and merges clone with sub-component specific information
Parameters
----------
key : str
Key specifying which sub-component
Returns `ModelComponentInfo` object
"""
new_comp = copy.deepcopy(self)
#sub_com = self.components[key]
new_comp.components = None
new_comp.comp_key = key
return new_comp
|
Clones self and merges clone with sub-component specific information
Parameters
----------
key : str
Key specifying which sub-component
Returns `ModelComponentInfo` object
|
entailment
|
def add_columns(t0, t1):
"""Add columns of table t1 to table t0."""
for colname in t1.colnames:
col = t1.columns[colname]
if colname in t0.columns:
continue
new_col = Column(name=col.name, length=len(t0), dtype=col.dtype) # ,
# shape=col.shape)
t0.add_column(new_col)
|
Add columns of table t1 to table t0.
|
entailment
|
def join_tables(left, right, key_left, key_right,
cols_right=None):
"""Perform a join of two tables.
Parameters
----------
left : `~astropy.Table`
Left table for join.
right : `~astropy.Table`
Right table for join.
key_left : str
Key used to match elements from ``left`` table.
key_right : str
Key used to match elements from ``right`` table.
cols_right : list
Subset of columns from ``right`` table that will be appended
to joined table.
"""
right = right.copy()
if cols_right is None:
cols_right = right.colnames
else:
cols_right = [c for c in cols_right if c in right.colnames]
if key_left != key_right:
right[key_right].name = key_left
if key_left not in cols_right:
cols_right += [key_left]
out = join(left, right[cols_right], keys=key_left,
join_type='left')
for col in out.colnames:
if out[col].dtype.kind in ['S', 'U']:
out[col].fill_value = ''
elif out[col].dtype.kind in ['i']:
out[col].fill_value = 0
else:
out[col].fill_value = np.nan
return out.filled()
|
Perform a join of two tables.
Parameters
----------
left : `~astropy.Table`
Left table for join.
right : `~astropy.Table`
Right table for join.
key_left : str
Key used to match elements from ``left`` table.
key_right : str
Key used to match elements from ``right`` table.
cols_right : list
Subset of columns from ``right`` table that will be appended
to joined table.
|
entailment
|
def strip_columns(tab):
"""Strip whitespace from string columns."""
for colname in tab.colnames:
if tab[colname].dtype.kind in ['S', 'U']:
tab[colname] = np.core.defchararray.strip(tab[colname])
|
Strip whitespace from string columns.
|
entailment
|
def row_to_dict(row):
"""Convert a table row to a dictionary."""
o = {}
for colname in row.colnames:
if isinstance(row[colname], np.string_) and row[colname].dtype.kind in ['S', 'U']:
o[colname] = str(row[colname])
else:
o[colname] = row[colname]
return o
|
Convert a table row to a dictionary.
|
entailment
|
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
obs = BinnedAnalysis.BinnedObs(irfs=args.irfs,
expCube=args.expcube,
srcMaps=args.srcmaps,
binnedExpMap=args.bexpmap)
like = BinnedAnalysis.BinnedAnalysis(obs,
optimizer='MINUIT',
srcModel=GtMergeSrcmaps.NULL_MODEL,
wmap=None)
like.logLike.set_use_single_fixed_map(False)
print("Reading xml model from %s" % args.srcmdl)
source_factory = pyLike.SourceFactory(obs.observation)
source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory, False, True, True)
strv = pyLike.StringVector()
source_factory.fetchSrcNames(strv)
source_names = [strv[i] for i in range(strv.size())]
missing_sources = []
srcs_to_merge = []
for source_name in source_names:
try:
source = source_factory.releaseSource(source_name)
# EAC, add the source directly to the model
like.logLike.addSource(source)
srcs_to_merge.append(source_name)
except KeyError:
missing_sources.append(source_name)
comp = like.mergeSources(args.merged, source_names, 'ConstantValue')
like.logLike.getSourceMap(comp.getName())
print("Merged %i sources into %s" % (len(srcs_to_merge), comp.getName()))
if missing_sources:
print("Missed sources: ", missing_sources)
print("Writing output source map file %s" % args.outfile)
like.logLike.saveSourceMaps(args.outfile, False, False)
if args.gzip:
os.system("gzip -9 %s" % args.outfile)
print("Writing output xml file %s" % args.outxml)
like.writeXml(args.outxml)
|
Run this analysis
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
ret_dict = make_catalog_comp_dict(sources=args['library'], basedir='.')
comp_info_dict = ret_dict['comp_info_dict']
for split_ver, split_dict in comp_info_dict.items():
for source_key, source_dict in split_dict.items():
full_key = "%s_%s" % (split_ver, source_key)
merged_name = "%s_%s" % (source_dict.catalog_info.catalog_name, source_key)
if source_dict.model_type != 'CompositeSource':
continue
for comp in components:
zcut = "zmax%i" % comp.zmax
key = "%s_%s" % (full_key, comp.make_key('{ebin_name}_{evtype_name}'))
name_keys = dict(zcut=zcut,
sourcekey=full_key,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
mktime='none',
irf_ver=NAME_FACTORY.irf_ver())
nested_name_keys = dict(zcut=zcut,
sourcekey=source_dict.catalog_info.catalog_name,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
mktime='none',
irf_ver=NAME_FACTORY.irf_ver())
outfile = NAME_FACTORY.srcmaps(**name_keys)
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[key] = dict(srcmaps=NAME_FACTORY.srcmaps(**nested_name_keys),
expcube=NAME_FACTORY.ltcube(**name_keys),
irfs=NAME_FACTORY.irfs(**name_keys),
bexpmap=NAME_FACTORY.bexpcube(**name_keys),
srcmdl=NAME_FACTORY.srcmdl_xml(**name_keys),
merged=merged_name,
outfile=outfile,
outxml=NAME_FACTORY.nested_srcmdl_xml(**name_keys),
logfile=logfile)
return job_configs
|
Hook to build job configurations
|
entailment
|
def check_log(logfile, exited='Exited with exit code',
successful='Successfully completed', exists=True):
""" Often logfile doesn't exist because the job hasn't begun
to run. It is unclear what you want to do in that case...
Parameters
----------
logfile : str
String with path to logfile
exists : bool
Is the logfile required to exist
exited : str
String in logfile used to determine if a job exited.
successful : str
String in logfile used to determine if a job succeeded.
"""
if not os.path.exists(logfile):
return not exists
if exited in open(logfile).read():
return 'Exited'
elif successful in open(logfile).read():
return 'Successful'
else:
return 'None'
|
Often logfile doesn't exist because the job hasn't begun
to run. It is unclear what you want to do in that case...
Parameters
----------
logfile : str
String with path to logfile
exists : bool
Is the logfile required to exist
exited : str
String in logfile used to determine if a job exited.
successful : str
String in logfile used to determine if a job succeeded.
|
entailment
|
def dispatch_job(jobname, exe, args, opts, batch_opts, dry_run=True):
"""Dispatch an LSF job.
Parameters
----------
exe : str
Execution string.
args : list
Positional arguments.
opts : dict
Dictionary of command-line options.
"""
batch_opts.setdefault('W', 300)
batch_opts.setdefault('R', 'rhel60 && scratch > 10')
cmd_opts = ''
for k, v in opts.items():
if isinstance(v, list):
cmd_opts += ' '.join(['--%s=%s' % (k, t) for t in v])
elif isinstance(v, bool) and v:
cmd_opts += ' --%s ' % (k)
elif isinstance(v, bool):
continue
elif v is not None:
cmd_opts += ' --%s=\"%s\" ' % (k, v)
bash_script = "{exe} {args} {opts}"
scriptexe = jobname + '.sh'
with open(os.path.join(scriptexe), 'wt') as f:
f.write(bash_script.format(exe=exe, args=' '.join(args),
opts=cmd_opts))
batch_optstr = parse_lsf_opts(**batch_opts)
batch_cmd = 'bsub %s ' % (batch_optstr)
batch_cmd += ' bash %s' % scriptexe
print(batch_cmd)
if not dry_run:
os.system(batch_cmd)
|
Dispatch an LSF job.
Parameters
----------
exe : str
Execution string.
args : list
Positional arguments.
opts : dict
Dictionary of command-line options.
|
entailment
|
def _make_input_file_list(binnedfile, num_files):
"""Make the list of input files for a particular energy bin X psf type """
outdir_base = os.path.abspath(os.path.dirname(binnedfile))
outbasename = os.path.basename(binnedfile)
filelist = ""
for i in range(num_files):
split_key = "%06i" % i
output_dir = os.path.join(outdir_base, split_key)
filepath = os.path.join(output_dir,
outbasename.replace('.fits', '_%s.fits' % split_key))
filelist += ' %s' % filepath
return filelist
|
Make the list of input files for a particular energy bin X psf type
|
entailment
|
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
comp_file = args.get('comp', None)
datafile = args.get('data', None)
do_ltsum = args.get('do_ltsum', False)
NAME_FACTORY.update_base_dict(datafile)
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
num_files = args.get('nfiles', 96)
self.comp_dict = yaml.safe_load(open(comp_file))
coordsys = self.comp_dict.pop('coordsys')
for key_e, comp_e in sorted(self.comp_dict.items()):
if 'mktimefilters' in comp_e:
mktimelist = comp_e['mktimefilters']
else:
mktimelist = ['none']
if 'evtclasses' in comp_e:
evtclasslist_vals = comp_e['evtclasses']
else:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
for mktimekey in mktimelist:
zcut = "zmax%i" % comp_e['zmax']
kwargs_mktime = dict(zcut=zcut,
ebin=key_e,
psftype='ALL',
coordsys=coordsys,
mktime=mktimekey)
if do_ltsum:
ltsum_listfile = 'ltsumlist_%s_%s' % (key_e, mktimekey)
ltsum_outfile = 'ltsum_%s_%s' % (key_e, mktimekey)
linkname = 'ltsum_%s_%s' % (key_e, mktimekey)
self._set_link(likname, Gtlink_ltsum,
infile1=ltsum_listfile,
infile2=None,
outfile=ltsum_outfile,
logfile=os.path.join(outdir_base, "%s.log" % linkname))
for evtclassval in evtclasslist_vals:
for psf_type in sorted(comp_e['psf_types'].keys()):
fullkey = "%s_%s_%s_%s"%(key_e, mktimekey, evtclassval, psf_type)
linkname = 'coadd_%s' % (fullkey)
kwargs_bin = kwargs_mktime.copy()
kwargs_bin['psftype'] = psf_type
kwargs_bin['evclass'] = evtclassval
ccube_name =\
os.path.basename(NAME_FACTORY.ccube(**kwargs_bin))
outputfile = os.path.join(outdir_base, ccube_name)
args = _make_input_file_list(outputfile, num_files)
self._set_link(linkname,
Link_FermipyCoadd,
args=args,
output=outputfile,
logfile=os.path.join(outdir_base, "%s.log" % linkname))
|
Map from the top-level arguments to the arguments provided to
the indiviudal links
|
entailment
|
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
datafile = args['data']
if datafile is None or datafile == 'None':
return job_configs
NAME_FACTORY.update_base_dict(args['data'])
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
inputfiles = create_inputlist(args['ft1file'])
num_files = len(inputfiles)
for comp in components:
zcut = "zmax%i" % comp.zmax
mktimelist = copy.copy(comp.mktimefilters)
if not mktimelist:
mktimelist.append('none')
evtclasslist_keys = copy.copy(comp.evtclasses)
if not evtclasslist_keys:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
else:
evtclasslist_vals = copy.copy(evtclasslist_keys)
for mktimekey in mktimelist:
for evtclassval in evtclasslist_vals:
fullkey = comp.make_key(
'%s_%s_{ebin_name}_%s_{evtype_name}' %
(evtclassval, zcut, mktimekey))
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
mktime=mktimekey,
evclass=evtclassval,
fullpath=True)
ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys))
outfile = os.path.join(outdir_base, ccube_name)
infiles = _make_input_file_list(outfile, num_files)
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(args=infiles,
output=outfile,
logfile=logfile)
return job_configs
|
Hook to build job configurations
|
entailment
|
def create_from_flux(cls, params, emin, emax, flux, scale=1.0):
"""Create a spectral function instance given its flux."""
params = params.copy()
params[0] = 1.0
params[0] = flux / cls.eval_flux(emin, emax, params, scale=scale)
return cls(params, scale)
|
Create a spectral function instance given its flux.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.