code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self.logger.log(loglevel, '\n' + str(self.roi))
def print_roi(self, loglevel=logging.INFO)
Print information about the spectral and spatial properties of the ROI (sources, diffuse components).
4.63894
4.158492
1.115534
pars = self.get_params() o = '\n' o += '%4s %-20s%10s%10s%10s%10s%10s%5s\n' % ( 'idx', 'parname', 'value', 'error', 'min', 'max', 'scale', 'free') o += '-' * 80 + '\n' src_pars = collections.OrderedDict() for p in pars: src_pars.setdefault(p['src_name'], []) src_pars[p['src_name']] += [p] free_sources = [] for k, v in src_pars.items(): for p in v: if not p['free']: continue free_sources += [k] for k, v in src_pars.items(): if not allpars and k not in free_sources: continue o += '%s\n' % k for p in v: o += '%4i %-20.19s' % (p['idx'], p['par_name']) o += '%10.3g%10.3g' % (p['value'], p['error']) o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'], p['scale']) if p['free']: o += ' *' else: o += ' ' o += '\n' self.logger.log(loglevel, o)
def print_params(self, allpars=False, loglevel=logging.INFO)
Print information about the model parameters (values, errors, bounds, scale).
2.310238
2.263674
1.02057
infile = utils.resolve_path(infile, workdir=self.workdir) roi_file, roi_data = utils.load_data(infile, workdir=self.workdir) self.logger.info('Loading ROI file: %s', roi_file) key_map = {'dfde': 'dnde', 'dfde100': 'dnde100', 'dfde1000': 'dnde1000', 'dfde10000': 'dnde10000', 'dfde_index': 'dnde_index', 'dfde100_index': 'dnde100_index', 'dfde1000_index': 'dnde1000_index', 'dfde10000_index': 'dnde10000_index', 'e2dfde': 'e2dnde', 'e2dfde100': 'e2dnde100', 'e2dfde1000': 'e2dnde1000', 'e2dfde10000': 'e2dnde10000', 'Npred': 'npred', 'Npred_wt': 'npred_wt', 'logLike': 'loglike', 'dlogLike': 'dloglike', 'emin': 'e_min', 'ectr': 'e_ctr', 'emax': 'e_max', 'logemin': 'loge_min', 'logectr': 'loge_ctr', 'logemax': 'loge_max', 'ref_dfde': 'ref_dnde', 'ref_e2dfde': 'ref_e2dnde', 'ref_dfde_emin': 'ref_dnde_e_min', 'ref_dfde_emax': 'ref_dnde_e_max', } self._roi_data = utils.update_keys(roi_data['roi'], key_map) if 'erange' in self._roi_data: self._roi_data['loge_bounds'] = self._roi_data.pop('erange') self._loge_bounds = self._roi_data.setdefault('loge_bounds', self.loge_bounds) sources = roi_data.pop('sources') sources = utils.update_keys(sources, key_map) for k0, v0 in sources.items(): for k, v in defaults.source_flux_output.items(): if k not in v0: continue if v[2] == float and isinstance(v0[k], np.ndarray): sources[k0][k], sources[k0][k + '_err'] \ = v0[k][0], v0[k][1] self.roi.load_sources(sources.values()) for i, c in enumerate(self.components): if 'src_expscale' in self._roi_data['components'][i]: c._src_expscale = copy.deepcopy(self._roi_data['components'] [i]['src_expscale']) self._create_likelihood(infile) self.set_energy_range(self.loge_bounds[0], self.loge_bounds[1]) if params is not None: self.load_parameters_from_yaml(params) if mask is not None: self.set_weights_map(mask, update_roi=False) if reload_sources: names = [s.name for s in self.roi.sources if not s.diffuse] self.reload_sources(names, False) self.logger.info('Finished Loading ROI')
def load_roi(self, infile, reload_sources=False, params=None, mask=None)
This function reloads the analysis state from a previously saved instance generated with `~fermipy.gtanalysis.GTAnalysis.write_roi`. Parameters ---------- infile : str reload_sources : bool Regenerate source maps for non-diffuse sources. params : str Path to a yaml file with updated parameter values mask : str Path to a fits file with an updated mask
2.446193
2.425833
1.008393
# extract the results in a convenient format make_plots = kwargs.get('make_plots', False) save_weight_map = kwargs.get('save_weight_map', False) if outfile is None: pathprefix = os.path.join(self.config['fileio']['workdir'], 'results') elif not os.path.isabs(outfile): pathprefix = os.path.join(self.config['fileio']['workdir'], outfile) else: pathprefix = outfile pathprefix = utils.strip_suffix(pathprefix, ['fits', 'yaml', 'npy']) # pathprefix, ext = os.path.splitext(pathprefix) prefix = os.path.basename(pathprefix) xmlfile = pathprefix + '.xml' fitsfile = pathprefix + '.fits' npyfile = pathprefix + '.npy' self.write_xml(xmlfile) self.write_fits(fitsfile) if not self.config['gtlike']['use_external_srcmap']: for c in self.components: c.like.logLike.saveSourceMaps(str(c.files['srcmap'])) if save_model_map: self.write_model_map(prefix) if save_weight_map: self.write_weight_map(prefix) o = {} o['roi'] = copy.deepcopy(self._roi_data) o['config'] = copy.deepcopy(self.config) o['version'] = fermipy.__version__ o['stversion'] = fermipy.get_st_version() o['sources'] = {} for s in self.roi.sources: o['sources'][s.name] = copy.deepcopy(s.data) for i, c in enumerate(self.components): o['roi']['components'][i][ 'src_expscale'] = copy.deepcopy(c.src_expscale) self.logger.info('Writing %s...', npyfile) np.save(npyfile, o) if make_plots: self.make_plots(prefix, None, **kwargs.get('plotting', {}))
def write_roi(self, outfile=None, save_model_map=False, **kwargs)
Write current state of the analysis to a file. This method writes an XML model definition, a ROI dictionary, and a FITS source catalog file. A previously saved analysis state can be reloaded from the ROI dictionary file with the `~fermipy.gtanalysis.GTAnalysis.load_roi` method. Parameters ---------- outfile : str String prefix of the output files. The extension of this string will be stripped when generating the XML, YAML and npy filenames. make_plots : bool Generate diagnostic plots. save_model_map : bool Save the current counts model to a FITS file.
3.339094
3.218287
1.037538
#mcube_maps = kwargs.pop('mcube_maps', None) if mcube_map is None: mcube_map = self.model_counts_map() plotter = plotting.AnalysisPlotter(self.config['plotting'], fileio=self.config['fileio'], logging=self.config['logging']) plotter.run(self, mcube_map, prefix=prefix, **kwargs)
def make_plots(self, prefix, mcube_map=None, **kwargs)
Make diagnostic plots using the current ROI model.
3.825089
3.878519
0.986224
if loge is None: logemin = self.log_energies[0] logemax = self.log_energies[-1] loge = np.linspace(logemin, logemax, 50) o = {'energies': 10**loge, 'log_energies': loge, 'dnde': np.zeros(len(loge)) * np.nan, 'dnde_lo': np.zeros(len(loge)) * np.nan, 'dnde_hi': np.zeros(len(loge)) * np.nan, 'dnde_err': np.zeros(len(loge)) * np.nan, 'dnde_ferr': np.zeros(len(loge)) * np.nan, 'pivot_energy': np.nan} try: if fd is None: fd = FluxDensity.FluxDensity(self.like, name) except RuntimeError: self.logger.error('Failed to create FluxDensity', exc_info=True) return o dnde = [fd.value(10 ** x) for x in loge] dnde_err = [fd.error(10 ** x) for x in loge] dnde = np.array(dnde) dnde_err = np.array(dnde_err) m = dnde > 0 fhi = np.zeros_like(dnde) flo = np.zeros_like(dnde) ferr = np.zeros_like(dnde) fhi[m] = dnde[m] * (1.0 + dnde_err[m] / dnde[m]) flo[m] = dnde[m] / (1.0 + dnde_err[m] / dnde[m]) ferr[m] = 0.5 * (fhi[m] - flo[m]) / dnde[m] fhi[~m] = dnde_err[~m] o['dnde'] = dnde o['dnde_lo'] = flo o['dnde_hi'] = fhi o['dnde_err'] = dnde_err o['dnde_ferr'] = ferr try: o['pivot_energy'] = 10 ** utils.interpolate_function_min(loge, o[ 'dnde_ferr']) except Exception: self.logger.error('Failed to compute pivot energy', exc_info=True) return o
def bowtie(self, name, fd=None, loge=None)
Generate a spectral uncertainty band (bowtie) for the given source. This will create an uncertainty band on the differential flux as a function of energy by propagating the errors on the global fit parameters. Note that this band only reflects the uncertainty for parameters that are currently free in the model. Parameters ---------- name : str Source name. fd : FluxDensity Flux density object. If this parameter is None then one will be created. loge : array-like Sequence of energies in log10(E/MeV) at which the flux band will be evaluated.
2.065023
2.030905
1.016799
npts = self.config['gtlike']['llscan_npts'] optimizer = kwargs.get('optimizer', self.config['optimizer']) sd = self.get_src_model(name, paramsonly, reoptimize, npts, optimizer=optimizer) src = self.roi.get_source_by_name(name) src.update_data(sd)
def update_source(self, name, paramsonly=False, reoptimize=False, **kwargs)
Update the dictionary for this source. Parameters ---------- name : str paramsonly : bool reoptimize : bool Re-fit background parameters in likelihood scan.
7.13528
6.425087
1.110534
for i,c in enumerate(self.components): # compute diffuse response, necessary for srcprob c._diffrsp_app(xmlfile=xmlfile) # compute srcprob c._srcprob_app(xmlfile = xmlfile, overwrite = overwrite)
def compute_srcprob(self,xmlfile=None, overwrite=False)
Run the gtsrcprob app with the current model or a user provided xmlfile
6.900464
6.909337
0.998716
src = self.roi.get_source_by_name(name) if hasattr(self.like.logLike, 'loadSourceMap'): self.like.logLike.loadSourceMap(str(name), True, False) srcmap_utils.delete_source_map(self.files['srcmap'], name) self.like.logLike.saveSourceMaps(str(self.files['srcmap'])) self._scale_srcmap(self._src_expscale, check_header=False, names=[name]) self.like.logLike.buildFixedModelWts() else: self.write_xml('tmp') src = self.delete_source(name) self.add_source(name, src, free=True) self.load_xml('tmp')
def reload_source(self, name)
Recompute the source map for a single source in the model.
7.647137
7.100763
1.076946
try: self.like.logLike.loadSourceMaps(names, True, True) # loadSourceMaps doesn't overwrite the header so we need # to ignore EXPSCALE by setting check_header=False self._scale_srcmap(self._src_expscale, check_header=False, names=names) except: for name in names: self.reload_source(name)
def reload_sources(self, names)
Recompute the source map for a list of sources in the model.
13.600182
12.655519
1.074644
# if self.roi.has_source(name): # msg = 'Source %s already exists.' % name # self.logger.error(msg) # raise Exception(msg) srcmap_utils.delete_source_map(self.files['srcmap'], name) src = self.roi[name] if self.config['gtlike']['expscale'] is not None and \ name not in self._src_expscale: self._src_expscale[name] = self.config['gtlike']['expscale'] if self._like is None: return if not use_pylike: self._update_srcmap_file([src], True) pylike_src = self._create_source(src) # Initialize source as free/fixed if free is not None: pylike_src.spectrum().normPar().setFree(free) if hasattr(pyLike, 'PsfIntegConfig') and \ hasattr(pyLike.PsfIntegConfig, 'set_use_single_psf'): config = pyLike.BinnedLikeConfig(self.like.logLike.config()) config.psf_integ_config().set_use_single_psf(use_single_psf) self.like.addSource(pylike_src, config) else: self.like.addSource(pylike_src) self.like.syncSrcParams(str(name)) self.like.logLike.buildFixedModelWts() if save_source_maps and \ not self.config['gtlike']['use_external_srcmap']: self.like.logLike.saveSourceMaps(str(self.files['srcmap'])) self.set_exposure_scale(name)
def add_source(self, name, src_dict, free=None, save_source_maps=True, use_pylike=True, use_single_psf=False)
Add a new source to the model. Source properties (spectrum, spatial model) are set with the src_dict argument. Parameters ---------- name : str Source name. src_dict : dict or `~fermipy.roi_model.Source` object Dictionary or Source object defining the properties of the source. free : bool Initialize the source with the normalization parameter free. save_source_maps : bool Write the source map for this source to the source maps file. use_pylike : bool use_single_psf : bool
4.392702
4.28223
1.025798
if src['SpatialType'] == 'SkyDirFunction': pylike_src = pyLike.PointSource(self.like.logLike.observation()) pylike_src.setDir(src.skydir.ra.deg, src.skydir.dec.deg, False, False) elif src['SpatialType'] == 'SpatialMap': filepath = str(utils.path_to_xmlpath(src['Spatial_Filename'])) sm = pyLike.SpatialMap(filepath) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src['SpatialType'] == 'RadialProfile': filepath = str(utils.path_to_xmlpath(src['Spatial_Filename'])) sm = pyLike.RadialProfile(filepath) sm.setCenter(src['ra'], src['dec']) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src['SpatialType'] == 'RadialGaussian': sm = pyLike.RadialGaussian(src.skydir.ra.deg, src.skydir.dec.deg, src.spatial_pars['Sigma']['value']) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src['SpatialType'] == 'RadialDisk': sm = pyLike.RadialDisk(src.skydir.ra.deg, src.skydir.dec.deg, src.spatial_pars['Radius']['value']) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src['SpatialType'] == 'MapCubeFunction': filepath = str(utils.path_to_xmlpath(src['Spatial_Filename'])) mcf = pyLike.MapCubeFunction2(filepath) pylike_src = pyLike.DiffuseSource(mcf, self.like.logLike.observation(), False) else: raise Exception('Unrecognized spatial type: %s', src['SpatialType']) if src['SpectrumType'] == 'FileFunction': fn = gtutils.create_spectrum_from_dict(src['SpectrumType'], src.spectral_pars) file_function = pyLike.FileFunction_cast(fn) filename = str(os.path.expandvars(src['Spectrum_Filename'])) file_function.readFunction(filename) elif src['SpectrumType'] == 'DMFitFunction': fn = pyLike.DMFitFunction() fn = gtutils.create_spectrum_from_dict(src['SpectrumType'], src.spectral_pars, fn) filename = str(os.path.expandvars(src['Spectrum_Filename'])) fn.readFunction(filename) else: fn = gtutils.create_spectrum_from_dict(src['SpectrumType'], src.spectral_pars) pylike_src.setSpectrum(fn) pylike_src.setName(str(src.name)) return pylike_src
def _create_source(self, src)
Create a pyLikelihood Source object from a `~fermipy.roi_model.Model` object.
2.360204
2.235133
1.055957
name = self.roi.get_source_by_name(name).name if scale is None and name not in self._src_expscale: return elif scale is None: scale = self._src_expscale.get(name, 1.0) else: self._src_expscale[name] = scale self._scale_srcmap({name: scale})
def set_exposure_scale(self, name, scale=None)
Set the exposure correction of a source. Parameters ---------- name : str Source name. scale : factor Exposure scale factor (1.0 = nominal exposure).
3.898864
4.137551
0.942312
src = self.roi.get_source_by_name(name) name = src.name self.like[name].src.set_edisp_flag(flag)
def set_edisp_flag(self, name, flag=True)
Enable/Disable the energy dispersion correction for a source.
6.676288
6.367172
1.048548
if logemin is None: logemin = self.log_energies[0] if logemax is None: logemax = self.log_energies[-1] imin = int(utils.val_to_edge(self.log_energies, logemin)[0]) imax = int(utils.val_to_edge(self.log_energies, logemax)[0]) if imin - imax == 0: imin = int(len(self.log_energies) - 1) imax = int(len(self.log_energies) - 1) klims = self.like.logLike.klims() if imin != klims[0] or imax != klims[1]: self.like.selectEbounds(imin, imax) return np.array([self.log_energies[imin], self.log_energies[imax]])
def set_energy_range(self, logemin, logemax)
Set the energy range of the analysis. Parameters ---------- logemin: float Lower end of energy range in log10(E/MeV). logemax : float Upper end of energy range in log10(E/MeV).
2.458932
2.506639
0.980968
try: if isinstance(self.like, gtutils.SummedLikelihood): cmap = self.like.components[0].logLike.countsMap() p_method = cmap.projection().method() else: cmap = self.like.logLike.countsMap() p_method = cmap.projection().method() except Exception: p_method = 0 if p_method == 0: # WCS z = cmap.data() z = np.array(z).reshape(self.enumbins, self.npix, self.npix) return WcsNDMap(copy.deepcopy(self.geom), z) elif p_method == 1: # HPX z = cmap.data() z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix)) return HpxNDMap(copy.deepcopy(self.geom), z) else: self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True) return None
def counts_map(self)
Return 3-D counts map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase`
3.949512
3.722921
1.060864
# EAC we need the try blocks b/c older versions of the ST don't have some of these functions if isinstance(self.like, gtutils.SummedLikelihood): cmap = self.like.components[0].logLike.countsMap() try: p_method = cmap.projection().method() except AttributeError: p_method = 0 try: if self.like.components[0].logLike.has_weights(): wmap = self.like.components[0].logLike.weightMap() else: wmap = None except Exception: wmap = None else: cmap = self.like.logLike.countsMap() try: p_method = cmap.projection().method() except AttributeError: p_method = 0 try: if self.like.logLike.has_weights(): wmap = self.like.logLike.weightMap() else: wmap = None except Exception: wmap = None if p_method == 0: # WCS if wmap is None: z = np.ones((self.enumbins, self.npix, self.npix)) else: z = wmap.model() z = np.array(z).reshape(self.enumbins, self.npix, self.npix) return WcsNDMap(copy.deepcopy(self._geom), z) elif p_method == 1: # HPX nhpix = np.max(self.geom.npix) if wmap is None: z = np.ones((self.enumbins, nhpix)) else: z = wmap.model() z = np.array(z).reshape(self.enumbins, nhpix) return HpxNDMap(self.geom, z) else: self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True) return None
def weight_map(self)
Return 3-D weights map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase`
3.129799
2.991108
1.046368
# EAC, we need this b/c older version of the ST don't have the right signature try: cs = np.array(self.like.logLike.modelCountsSpectrum( str(name), weighted)) except (TypeError, NotImplementedError): cs = np.array(self.like.logLike.modelCountsSpectrum(str(name))) imin = utils.val_to_edge(self.log_energies, logemin)[0] imax = utils.val_to_edge(self.log_energies, logemax)[0] if imax <= imin: raise Exception('Invalid energy range.') return cs[imin:imax]
def model_counts_spectrum(self, name, logemin, logemax, weighted=False)
Return the model counts spectrum of a source. Parameters ---------- name : str Source name.
5.441438
5.600766
0.971552
loglevel = kwargs.get('loglevel', self.loglevel) self.logger.log(loglevel, 'Running setup for component %s', self.name) use_external_srcmap = self.config['gtlike']['use_external_srcmap'] # Run data selection if not use_external_srcmap: self._select_data(overwrite=overwrite, **kwargs) # Create LT Cube if self._ext_ltcube is not None: self.logger.log(loglevel, 'Using external LT cube.') else: self._create_ltcube(overwrite=overwrite, **kwargs) self.logger.debug('Loading LT Cube %s', self.files['ltcube']) self._ltc = LTCube.create(self.files['ltcube']) # Extract tmin, tmax from LT cube self._tmin = self._ltc.tstart self._tmax = self._ltc.tstop self.logger.debug('Creating PSF model') self._psf = irfs.PSFModel.create(self.roi.skydir, self._ltc, self.config['gtlike']['irfs'], self.config['selection']['evtype'], self.energies) # Bin data and create exposure cube if not use_external_srcmap: self._bin_data(overwrite=overwrite, **kwargs) self._create_expcube(overwrite=overwrite, **kwargs) # This is needed in case the exposure map is in HEALPix hpxhduname = "HPXEXPOSURES" try: self._bexp = Map.read(self.files['bexpmap'], hdu=hpxhduname) except KeyError: self._bexp = Map.read(self.files['bexpmap']) # Write ROI XML self.roi.write_xml(self.files['srcmdl'], self.config['model']) # Create source maps file if not use_external_srcmap: self._create_srcmaps(overwrite=overwrite) if not self.config['data']['cacheft1'] and os.path.isfile(self.files['ft1']): self.logger.debug('Deleting FT1 file.') os.remove(self.files['ft1']) self.logger.log(loglevel, 'Finished setup for component %s', self.name)
def setup(self, overwrite=False, **kwargs)
Run pre-processing step for this component. This will generate all of the auxiliary files needed to instantiate a likelihood object. By default this function will skip any steps for which the output file already exists. Parameters ---------- overwrite : bool Run all pre-processing steps even if the output file of that step is present in the working directory.
3.639829
3.649161
0.997443
srcmap = fits.open(self.files['srcmap']) for hdu in srcmap[1:]: if hdu.name not in scale_map: continue if names is not None and hdu.name not in names: continue scale = scale_map[hdu.name] if scale < 1e-20: self.logger.warning( "The expscale parameter was zero, setting it to 1e-8") scale = 1e-8 if 'EXPSCALE' in hdu.header and check_header: old_scale = hdu.header['EXPSCALE'] else: old_scale = 1.0 hdu.data *= scale / old_scale hdu.header['EXPSCALE'] = (scale, 'Exposure correction applied to this map') srcmap.writeto(self.files['srcmap'], overwrite=True) srcmap.close() # Force reloading the map from disk for name in scale_map.keys(): self.like.logLike.eraseSourceMap(str(name)) self.like.logLike.buildFixedModelWts()
def _scale_srcmap(self, scale_map, check_header=True, names=None)
Apply exposure corrections to the source map file. Parameters ---------- scale_map : dict Dictionary of exposure corrections. check_header : bool Check EXPSCALE header keyword to see if an exposure correction has already been applied to this source. names : list, optional Names of sources to which the exposure correction will be applied. If None then all sources will be corrected.
3.526294
3.374887
1.044863
self.logger.info('Computing scaled source map.') bexp0 = fits.open(self.files['bexpmap_roi']) bexp1 = fits.open(self.config['gtlike']['bexpmap']) srcmap = fits.open(self.config['gtlike']['srcmap']) if bexp0[0].data.shape != bexp1[0].data.shape: raise Exception('Wrong shape for input exposure map file.') bexp_ratio = bexp0[0].data / bexp1[0].data self.logger.info( 'Min/Med/Max exposure correction: %f %f %f' % (np.min(bexp_ratio), np.median( bexp_ratio), np.max(bexp_ratio))) for hdu in srcmap[1:]: if hdu.name == 'GTI': continue if hdu.name == 'EBOUNDS': continue hdu.data *= bexp_ratio srcmap.writeto(self.files['srcmap'], overwrite=True)
def _make_scaled_srcmap(self)
Make an exposure cube with the same binning as the counts map.
3.188476
2.938056
1.085233
cm = self.counts_map() data = cm.data m = self.model_counts_map(name) if clear: data.fill(0.0) if randomize: if m.data.min()<0.: self.logger.warning('At least on negative value found in model map.' ' Changing it/them to 0') indexcond = np.where( m.data <0. ) m.data[indexcond]=np.zeros(len(m.data[indexcond])) data += np.random.poisson(m.data).astype(float) else: data += m.data if hasattr(self.like.logLike, 'setCountsMap'): self.like.logLike.setCountsMap(np.ravel(data)) srcmap_utils.update_source_maps(self.files['srcmap'], {'PRIMARY': data}, logger=self.logger) cm.write(self.files['ccubemc'], overwrite=True, conv='fgst-ccube')
def simulate_roi(self, name=None, clear=True, randomize=True)
Simulate the whole ROI or inject a simulation of one or more model components into the data. Parameters ---------- name : str Name of the model component to be simulated. If None then the whole ROI will be simulated. clear : bool Zero the current counts map before injecting the simulation. randomize : bool Fill with each pixel with random values drawn from a poisson distribution. If false then fill each pixel with the counts expectation value.
6.363642
6.577056
0.967552
if model_name is None: suffix = self.config['file_suffix'] else: suffix = '_%s%s' % (model_name, self.config['file_suffix']) self.logger.info('Generating model map for component %s.', self.name) outfile = os.path.join(self.config['fileio']['workdir'], 'mcube%s.fits' % (suffix)) cmap = self.model_counts_map(name, use_mask=False) cmap.write(outfile, overwrite=True, conv='fgst-ccube') return cmap
def write_model_map(self, model_name=None, name=None)
Save counts model map to a FITS file.
5.037896
4.549333
1.107392
if model_name is None: suffix = self.config['file_suffix'] else: suffix = '_%s%s' % (model_name, self.config['file_suffix']) self.logger.info('Generating model map for component %s.', self.name) outfile = os.path.join(self.config['fileio']['workdir'], 'wcube%s.fits' % (suffix)) wmap = self.weight_map() wmap.write(outfile, overwrite=True, conv='fgst-ccube') return wmap
def write_weight_map(self, model_name=None)
Save counts model map to a FITS file.
4.839941
4.281009
1.130561
if not os.path.isfile(self.files['srcmap']): return hdulist = fits.open(self.files['srcmap']) hdunames = [hdu.name.upper() for hdu in hdulist] srcmaps = {} for src in sources: if src.name.upper() in hdunames and not overwrite: continue self.logger.debug('Creating source map for %s', src.name) srcmaps[src.name] = self._create_srcmap(src.name, src) if srcmaps: self.logger.debug( 'Updating source map file for component %s.', self.name) srcmap_utils.update_source_maps(self.files['srcmap'], srcmaps, logger=self.logger) hdulist.close()
def _update_srcmap_file(self, sources, overwrite=True)
Check the contents of the source map file and generate source maps for any components that are not present.
2.73895
2.626925
1.042645
psf_scale_fn = kwargs.get('psf_scale_fn', None) skydir = src.skydir spatial_model = src['SpatialModel'] spatial_width = src['SpatialWidth'] xpix, ypix = self.geom.to_image().coord_to_pix(skydir) exp = self._bexp.interp_by_coord( (skydir, self._bexp.geom.axes[0].center)) cache = self._srcmap_cache.get(name, None) if cache is not None: k = cache.create_map([ypix, xpix]) else: k = srcmap_utils.make_srcmap(self._psf, exp, spatial_model, spatial_width, npix=self.npix, xpix=xpix, ypix=ypix, cdelt=self.config['binning']['binsz'], psf_scale_fn=psf_scale_fn, sparse=True) return k
def _create_srcmap(self, name, src, **kwargs)
Generate the source map for a source.
4.912871
5.104256
0.962505
k = self._create_srcmap(name, src, **kwargs) scale = self._src_expscale.get(name, 1.0) k *= scale # Force the source map to be cached # FIXME: No longer necessary to force cacheing in ST after 11-05-02 self.like.logLike.sourceMap(str(name)).model() self.like.logLike.setSourceMapImage(str(name), np.ravel(k)) self.like.logLike.sourceMap(str(name)).model() normPar = self.like.normPar(name) if not normPar.isFree(): self.like.logLike.buildFixedModelWts()
def _update_srcmap(self, name, src, **kwargs)
Update the source map for an existing source in memory.
9.1695
9.349719
0.980725
if model_name is not None: model_name = os.path.splitext(model_name)[0] if model_name is None or model_name == '': srcmdl = self.files['srcmdl'] else: srcmdl = self.get_model_path(model_name) if not os.path.isfile(srcmdl): raise Exception("Model file does not exist: %s", srcmdl) if model_name is None: suffix = self.config['file_suffix'] else: suffix = '_%s%s' % (model_name, self.config['file_suffix']) outfile = os.path.join(self.config['fileio']['workdir'], 'mcube%s.fits' % (suffix)) # May consider generating a custom source model file if not os.path.isfile(outfile): kw = dict(srcmaps=self.files['srcmap'], srcmdl=srcmdl, bexpmap=self.files['bexpmap'], outfile=outfile, expcube=self.files['ltcube'], irfs=self.config['gtlike']['irfs'], evtype=self.config['selection']['evtype'], edisp=bool(self.config['gtlike']['edisp']), outtype='ccube', chatter=self.config['logging']['chatter']) run_gtapp('gtmodel', self.logger, kw) else: self.logger.info('Skipping gtmodel')
def generate_model(self, model_name=None, outfile=None)
Generate a counts model map from an XML model file using gtmodel. Parameters ---------- model_name : str Name of the model. If no name is given it will use the baseline model. outfile : str Override the name of the output model file.
3.723623
3.745809
0.994077
xmlfile = self.get_model_path(xmlfile) self.logger.info('Writing %s...', xmlfile) self.like.writeXml(str(xmlfile))
def write_xml(self, xmlfile)
Write the XML model for this analysis component.
6.133198
5.042827
1.216222
name, ext = os.path.splitext(name) ext = '.xml' xmlfile = name + self.config['file_suffix'] + ext xmlfile = utils.resolve_path(xmlfile, workdir=self.config['fileio']['workdir']) return xmlfile
def get_model_path(self, name)
Infer the path to the XML model name.
5.452937
4.953116
1.10091
xmlfile = self.get_model_path(xmlfile) outfile = os.path.join(self.config['fileio']['workdir'], 'tscube%s.fits' % (self.config['file_suffix'])) kw = dict(cmap=self.files['ccube'], expcube=self.files['ltcube'], bexpmap=self.files['bexpmap'], irfs=self.config['gtlike']['irfs'], evtype=self.config['selection']['evtype'], srcmdl=xmlfile, nxpix=self.npix, nypix=self.npix, binsz=self.config['binning']['binsz'], xref=float(self.roi.skydir.ra.deg), yref=float(self.roi.skydir.dec.deg), proj=self.config['binning']['proj'], stlevel=0, coordsys=self.config['binning']['coordsys'], outfile=outfile) run_gtapp('gttscube', self.logger, kw)
def _tscube_app(self, xmlfile)
Run gttscube as an application.
4.743749
4.462643
1.062991
loglevel = kwargs.get('loglevel', self.loglevel) self.logger.log(loglevel, 'Computing diffuse repsonce for component %s.', self.name) # set the srcmdl srcmdl_file = self.files['srcmdl'] if xmlfile is not None: srcmdl_file = self.get_model_path(xmlfile) kw = dict(evfile=self.files['ft1'], scfile=self.data_files['scfile'], irfs = self.config['gtlike']['irfs'], evtype = self.config['selection']['evtype'], srcmdl = srcmdl_file) run_gtapp('gtdiffrsp', self.logger, kw, loglevel=loglevel) return
def _diffrsp_app(self,xmlfile=None, **kwargs)
Compute the diffuse response
6.108247
5.79961
1.053217
loglevel = kwargs.get('loglevel', self.loglevel) self.logger.log(loglevel, 'Computing src probability for component %s.', self.name) # set the srcmdl srcmdl_file = self.files['srcmdl'] if xmlfile is not None: srcmdl_file = self.get_model_path(xmlfile) # set the outfile # it's defined here and not in self.files dict # so that it is copied with the stage_output module # even if savefits is False outfile = os.path.join(self.workdir, 'ft1_srcprob{0[file_suffix]:s}.fits'.format(self.config)) kw = dict(evfile=self.files['ft1'], scfile=self.data_files['scfile'], outfile= outfile, irfs = self.config['gtlike']['irfs'], srcmdl = srcmdl_file) self.logger.debug(kw) # run gtapp for the srcprob if os.path.isfile(outfile) and not overwrite: self.logger.info('Skipping gtsrcprob') else: run_gtapp('gtsrcprob', self.logger, kw, loglevel=loglevel)
def _srcprob_app(self,xmlfile=None, overwrite=False, **kwargs)
Run srcprob for an analysis component as an application
5.63652
5.565384
1.012782
odict = {} for key, val in idict.items(): if is_null(val): continue odict[key] = val return odict
def purge_dict(idict)
Remove null items from a dictionary
2.875653
2.57683
1.115965
chain = cls.create() args = chain._run_argparser(sys.argv[1:]) chain._run_chain(sys.stdout, args.dry_run) chain._finalize(args.dry_run)
def main(cls)
Hook to run this `Chain` from the command line
7.317206
6.071602
1.205152
self._map_arguments(self.args) self.files.latch_file_info(self.args) self.sub_files.file_dict.clear() self.sub_files.update(self.files.file_dict) for link in self._links.values(): self.sub_files.update(link.files.file_dict) self.sub_files.update(link.sub_files.file_dict)
def _latch_file_info(self)
Internal function to update the dictionaries keeping track of input and output files
3.965502
3.469166
1.143071
val_copy = purge_dict(kwargs.copy()) sub_link_prefix = val_copy.pop('link_prefix', '') link_prefix = self.link_prefix + sub_link_prefix create_args = dict(linkname=linkname, link_prefix=link_prefix, job_archive=val_copy.pop('job_archive', None), file_stage=val_copy.pop('file_stage', None)) job_args = val_copy if linkname in self._links: link = self._links[linkname] link.update_args(job_args) else: link = cls.create(**create_args) self._links[link.linkname] = link logfile_default = os.path.join('logs', '%s.log' % link.full_linkname) logfile = kwargs.setdefault('logfile', logfile_default) link._register_job(JobDetails.topkey, job_args, logfile, status=JobStatus.unknown) return link
def _set_link(self, linkname, cls, **kwargs)
Transfer options kwargs to a `Link` object, optionally building the `Link if needed. Parameters ---------- linkname : str Unique name of this particular link cls : type Type of `Link` being created or managed
4.115702
4.47203
0.920321
for link in self._links.values(): link._job_archive = self._job_archive
def _set_links_job_archive(self)
Pass self._job_archive along to links
5.04184
2.718594
1.854576
self._set_links_job_archive() failed = False if self._file_stage is not None: input_file_mapping, output_file_mapping = self._map_scratch_files( self.sub_files) if stage_files: self._file_stage.make_scratch_dirs(input_file_mapping, dry_run) self._file_stage.make_scratch_dirs( output_file_mapping, dry_run) self._stage_input_files(input_file_mapping, dry_run) for link in self._links.values(): logfile = os.path.join('logs', "%s.log" % link.full_linkname) link._archive_self(logfile, status=JobStatus.unknown) key = JobDetails.make_fullkey(link.full_linkname) if hasattr(link, 'check_status'): link.check_status(stream, no_wait=True, check_once=True, do_print=False) else: pass link_status = link.check_job_status(key) if link_status in [JobStatus.done]: if not force_run: print ("Skipping done link", link.full_linkname) continue elif link_status in [JobStatus.running]: if not force_run and not resubmit_failed: print ("Skipping running link", link.full_linkname) continue elif link_status in [JobStatus.failed, JobStatus.partial_failed]: if not resubmit_failed: print ("Skipping failed link", link.full_linkname) continue print ("Running link ", link.full_linkname) link.run_with_log(dry_run=dry_run, stage_files=False, resubmit_failed=resubmit_failed) link_status = link.check_jobs_status() link._set_status_self(status=link_status) if link_status in [JobStatus.failed, JobStatus.partial_failed]: print ("Stoping chain execution at failed link %s" % link.full_linkname) failed = True break # elif link_status in [JobStatus.partial_failed]: # print ("Resubmitting partially failed link %s" % # link.full_linkname) # link.run_with_log(dry_run=dry_run, stage_files=False, # resubmit_failed=resubmit_failed) # link_status = link.check_jobs_status() # link._set_status_self(status=link_status) # if link_status in [JobStatus.partial_failed]: # print ("Stoping chain execution: resubmission failed %s" % # link.full_linkname) # failed = True # break if self._file_stage is not None and stage_files and not failed: self._stage_output_files(output_file_mapping, dry_run) chain_status = self.check_links_status() print ("Chain status: %s" % (JOB_STATUS_STRINGS[chain_status])) if chain_status == 5: job_status = 0 else: job_status = -1 self._write_status_to_log(job_status, stream) self._set_status_self(status=chain_status) if self._job_archive: self._job_archive.file_archive.update_file_status() self._job_archive.write_table_file()
def _run_chain(self, stream=sys.stdout, dry_run=False, stage_files=True, force_run=False, resubmit_failed=False)
Run all the links in the chain Parameters ----------- stream : `file` Stream to print to, Must have 'write' function dry_run : bool Print commands but do not run them stage_files : bool Stage files to and from the scratch area force_run : bool Run jobs, even if they are marked as done resubmit_failed : bool Resubmit failed jobs
2.761474
2.73516
1.009621
if recursive: for link in self._links.values(): link.clear_jobs(recursive) self.jobs.clear()
def clear_jobs(self, recursive=True)
Clear a dictionary with all the jobs If recursive is True this will include jobs from all internal `Link`
4.070715
4.499475
0.904709
if recursive: ret_dict = self.jobs.copy() for link in self._links.values(): ret_dict.update(link.get_jobs(recursive)) return ret_dict return self.jobs
def get_jobs(self, recursive=True)
Return a dictionary with all the jobs If recursive is True this will include jobs from all internal `Link`
3.28142
2.965339
1.106592
ret_dict = OrderedDict() for link in self._links.values(): link_dict = link.missing_input_files() for key, value in link_dict.items(): try: ret_dict[key] += value except KeyError: ret_dict[key] = value return ret_dict
def missing_input_files(self)
Make and return a dictionary of the missing input files. This returns a dictionary mapping filepath to list of `Link` that use the file as input.
2.664376
2.465096
1.080841
status_vector = JobStatusVector() for link in self._links.values(): key = JobDetails.make_fullkey(link.full_linkname) link_status = link.check_job_status(key, fail_running=fail_running, fail_pending=fail_pending) status_vector[link_status] += 1 return status_vector.get_status()
def check_links_status(self, fail_running=False, fail_pending=False)
Check the status of all the jobs run from the `Link` objects in this `Chain` and return a status flag that summarizes that. Parameters ---------- fail_running : `bool` If True, consider running jobs as failed fail_pending : `bool` If True, consider pending jobs as failed Returns ------- status : `JobStatus` Job status flag that summarizes the status of all the jobs,
4.4107
4.907584
0.898752
self._run_chain(stream, dry_run, stage_files, resubmit_failed=resubmit_failed)
def run(self, stream=sys.stdout, dry_run=False, stage_files=True, resubmit_failed=False)
Runs this `Chain`. Parameters ----------- stream : `file` Stream that this `Link` will print to, Must have 'write' function dry_run : bool Print command but do not run it. stage_files : bool Copy files to and from scratch staging area. resubmit_failed : bool Flag for sub-classes to resubmit failed jobs.
3.482951
3.571304
0.97526
self.args = extract_arguments(override_args, self.args) self._map_arguments(self.args) scratch_dir = self.args.get('scratch', None) if is_not_null(scratch_dir): self._file_stage = FileStageManager(scratch_dir, '.') for link in self._links.values(): link._set_file_stage(self._file_stage) self._latch_file_info()
def update_args(self, override_args)
Update the argument used to invoke the application Note that this will also update the dictionary of input and output files. Parameters ----------- override_args : dict dictionary passed to the links
5.739028
5.790628
0.991089
print ("%s%30s : %15s : %20s" % (indent, "Linkname", "Link Status", "Jobs Status")) for link in self._links.values(): if hasattr(link, 'check_status'): status_vect = link.check_status( stream=sys.stdout, no_wait=True, do_print=False) else: status_vect = None key = JobDetails.make_fullkey(link.full_linkname) link_status = JOB_STATUS_STRINGS[link.check_job_status(key)] if status_vect is None: jobs_status = JOB_STATUS_STRINGS[link.check_jobs_status()] else: jobs_status = status_vect print ("%s%30s : %15s : %20s" % (indent, link.linkname, link_status, jobs_status)) if hasattr(link, 'print_status') and recurse: print ("---------- %30s -----------" % link.linkname) link.print_status(indent + " ", recurse=True) print ("------------------------------------------------")
def print_status(self, indent="", recurse=False)
Print a summary of the job status for each `Link` in this `Chain`
3.581372
3.357779
1.06659
Link.print_summary(self, stream, indent, recurse_level) if recurse_level > 0: recurse_level -= 1 indent += " " for link in self._links.values(): stream.write("\n") link.print_summary(stream, indent, recurse_level)
def print_summary(self, stream=sys.stdout, indent="", recurse_level=2)
Print a summary of the activity done by this `Chain`. Parameters ----------- stream : `file` Stream to print to, must have 'write' method. indent : str Indentation at start of line recurse_level : int Number of recursion levels to print
2.650452
2.946751
0.899449
Gtlink_exphpsun.register_class() Gtlink_suntemp.register_class() Gtexphpsun_SG.register_class() Gtsuntemp_SG.register_class() SunMoonChain.register_class()
def register_classes()
Register these classes with the `LinkFactory`
13.101266
12.877335
1.01739
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) mktime = args['mktimefilter'] base_config = dict(nxpix=args['nxpix'], nypix=args['nypix'], binsz=args['binsz']) for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, irf_ver=NAME_FACTORY.irf_ver(), coordsys=comp.coordsys, mktime=mktime, fullpath=True) outfile = NAME_FACTORY.bexpcube(**name_keys) ltcube = NAME_FACTORY.ltcube(**name_keys) full_config = base_config.copy() full_config.update(dict(infile=ltcube, outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), evtype=comp.evtype, emin=comp.emin, emax=comp.emax, enumbins=comp.enumbins, logfile=make_nfs_path(outfile.replace('.fits', '.log')))) job_configs[key] = full_config return job_configs
def build_job_configs(self, args)
Hook to build job configurations
4.741343
4.741525
0.999962
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) mktime = args['mktimefilter'] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktime, fullpath=True) outfile = NAME_FACTORY.bexpcube_sun(**name_keys) ltcube_sun = NAME_FACTORY.ltcube_sun(**name_keys) job_configs[key] = dict(infile=NAME_FACTORY.ltcube_sun(**name_keys), outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), evtype=comp.evtype, emin=comp.emin, emax=comp.emax, enumbins=comp.enumbins, logfile=make_nfs_path(outfile.replace('.fits', '.log'))) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
5.533751
5.529974
1.000683
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) # FIXME mktime = args['mktimefilter'] for comp in components: for sourcekey in args['sourcekeys']: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') + "_%s" % sourcekey name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, irf_ver=NAME_FACTORY.irf_ver(), sourcekey=sourcekey, mktime=mktime, coordsys=comp.coordsys, fullpath=True) outfile = NAME_FACTORY.template_sunmoon(**name_keys) job_configs[key] = dict(expsun=NAME_FACTORY.bexpcube_sun(**name_keys), avgexp=NAME_FACTORY.bexpcube(**name_keys), sunprof=NAME_FACTORY.angprofile(**name_keys), cmap='none', outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), evtype=comp.evtype, emin=comp.emin, emax=comp.emax, enumbins=comp.enumbins, logfile=outfile.replace('.fits', '.log')) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
6.177313
6.181164
0.999377
config_yaml = input_dict['config'] config_dict = load_yaml(config_yaml) data = config_dict.get('data') comp = config_dict.get('comp') sourcekeys = config_dict.get('sourcekeys') mktimefilter = config_dict.get('mktimefilter') self._set_link('expcube2', Gtexpcube2wcs_SG, comp=comp, data=data, mktimefilter=mktimefilter) self._set_link('exphpsun', Gtexphpsun_SG, comp=comp, data=data, mktimefilter=mktimefilter) self._set_link('suntemp', Gtsuntemp_SG, comp=comp, data=data, mktimefilter=mktimefilter, sourcekeys=sourcekeys)
def _map_arguments(self, input_dict)
Map from the top-level arguments to the arguments provided to the indiviudal links
4.578999
4.553231
1.005659
if self.components is None: raise ValueError( 'Model component %s does not have sub-components' % self.sourcekey) if self.moving: comp_key = "zmax%i" % (comp.zmax) elif self.selection_dependent: comp_key = comp.make_key('{ebin_name}_{evtype_name}') else: raise ValueError( 'Model component %s is not moving or selection dependent' % self.sourcekey) return self.components[comp_key]
def get_component_info(self, comp)
Return the information about sub-component specific to a particular data selection Parameters ---------- comp : `binning.Component` object Specifies the sub-component Returns `ModelComponentInfo` object
7.609865
6.553121
1.161258
if self.components is None: self.components = {} self.components[compinfo.comp_key] = compinfo
def add_component_info(self, compinfo)
Add sub-component specific information to a particular data selection Parameters ---------- compinfo : `ModelComponentInfo` object Sub-component being added
3.390644
4.371345
0.775652
new_comp = copy.deepcopy(self) #sub_com = self.components[key] new_comp.components = None new_comp.comp_key = key return new_comp
def clone_and_merge_sub(self, key)
Clones self and merges clone with sub-component specific information Parameters ---------- key : str Key specifying which sub-component Returns `ModelComponentInfo` object
5.409286
5.925289
0.912915
for colname in t1.colnames: col = t1.columns[colname] if colname in t0.columns: continue new_col = Column(name=col.name, length=len(t0), dtype=col.dtype) # , # shape=col.shape) t0.add_column(new_col)
def add_columns(t0, t1)
Add columns of table t1 to table t0.
3.036202
2.93865
1.033196
right = right.copy() if cols_right is None: cols_right = right.colnames else: cols_right = [c for c in cols_right if c in right.colnames] if key_left != key_right: right[key_right].name = key_left if key_left not in cols_right: cols_right += [key_left] out = join(left, right[cols_right], keys=key_left, join_type='left') for col in out.colnames: if out[col].dtype.kind in ['S', 'U']: out[col].fill_value = '' elif out[col].dtype.kind in ['i']: out[col].fill_value = 0 else: out[col].fill_value = np.nan return out.filled()
def join_tables(left, right, key_left, key_right, cols_right=None)
Perform a join of two tables. Parameters ---------- left : `~astropy.Table` Left table for join. right : `~astropy.Table` Right table for join. key_left : str Key used to match elements from ``left`` table. key_right : str Key used to match elements from ``right`` table. cols_right : list Subset of columns from ``right`` table that will be appended to joined table.
2.077047
2.237758
0.928182
for colname in tab.colnames: if tab[colname].dtype.kind in ['S', 'U']: tab[colname] = np.core.defchararray.strip(tab[colname])
def strip_columns(tab)
Strip whitespace from string columns.
2.281694
2.191125
1.041335
o = {} for colname in row.colnames: if isinstance(row[colname], np.string_) and row[colname].dtype.kind in ['S', 'U']: o[colname] = str(row[colname]) else: o[colname] = row[colname] return o
def row_to_dict(row)
Convert a table row to a dictionary.
2.719582
2.696579
1.008531
args = self._parser.parse_args(argv) obs = BinnedAnalysis.BinnedObs(irfs=args.irfs, expCube=args.expcube, srcMaps=args.srcmaps, binnedExpMap=args.bexpmap) like = BinnedAnalysis.BinnedAnalysis(obs, optimizer='MINUIT', srcModel=GtMergeSrcmaps.NULL_MODEL, wmap=None) like.logLike.set_use_single_fixed_map(False) print("Reading xml model from %s" % args.srcmdl) source_factory = pyLike.SourceFactory(obs.observation) source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory, False, True, True) strv = pyLike.StringVector() source_factory.fetchSrcNames(strv) source_names = [strv[i] for i in range(strv.size())] missing_sources = [] srcs_to_merge = [] for source_name in source_names: try: source = source_factory.releaseSource(source_name) # EAC, add the source directly to the model like.logLike.addSource(source) srcs_to_merge.append(source_name) except KeyError: missing_sources.append(source_name) comp = like.mergeSources(args.merged, source_names, 'ConstantValue') like.logLike.getSourceMap(comp.getName()) print("Merged %i sources into %s" % (len(srcs_to_merge), comp.getName())) if missing_sources: print("Missed sources: ", missing_sources) print("Writing output source map file %s" % args.outfile) like.logLike.saveSourceMaps(args.outfile, False, False) if args.gzip: os.system("gzip -9 %s" % args.outfile) print("Writing output xml file %s" % args.outxml) like.writeXml(args.outxml)
def run_analysis(self, argv)
Run this analysis
5.027795
5.02489
1.000578
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_catalog_comp_dict(sources=args['library'], basedir='.') comp_info_dict = ret_dict['comp_info_dict'] for split_ver, split_dict in comp_info_dict.items(): for source_key, source_dict in split_dict.items(): full_key = "%s_%s" % (split_ver, source_key) merged_name = "%s_%s" % (source_dict.catalog_info.catalog_name, source_key) if source_dict.model_type != 'CompositeSource': continue for comp in components: zcut = "zmax%i" % comp.zmax key = "%s_%s" % (full_key, comp.make_key('{ebin_name}_{evtype_name}')) name_keys = dict(zcut=zcut, sourcekey=full_key, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, mktime='none', irf_ver=NAME_FACTORY.irf_ver()) nested_name_keys = dict(zcut=zcut, sourcekey=source_dict.catalog_info.catalog_name, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, mktime='none', irf_ver=NAME_FACTORY.irf_ver()) outfile = NAME_FACTORY.srcmaps(**name_keys) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[key] = dict(srcmaps=NAME_FACTORY.srcmaps(**nested_name_keys), expcube=NAME_FACTORY.ltcube(**name_keys), irfs=NAME_FACTORY.irfs(**name_keys), bexpmap=NAME_FACTORY.bexpcube(**name_keys), srcmdl=NAME_FACTORY.srcmdl_xml(**name_keys), merged=merged_name, outfile=outfile, outxml=NAME_FACTORY.nested_srcmdl_xml(**name_keys), logfile=logfile) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
4.252571
4.257729
0.998789
if not os.path.exists(logfile): return not exists if exited in open(logfile).read(): return 'Exited' elif successful in open(logfile).read(): return 'Successful' else: return 'None'
def check_log(logfile, exited='Exited with exit code', successful='Successfully completed', exists=True)
Often logfile doesn't exist because the job hasn't begun to run. It is unclear what you want to do in that case... Parameters ---------- logfile : str String with path to logfile exists : bool Is the logfile required to exist exited : str String in logfile used to determine if a job exited. successful : str String in logfile used to determine if a job succeeded.
2.626949
3.129052
0.839535
batch_opts.setdefault('W', 300) batch_opts.setdefault('R', 'rhel60 && scratch > 10') cmd_opts = '' for k, v in opts.items(): if isinstance(v, list): cmd_opts += ' '.join(['--%s=%s' % (k, t) for t in v]) elif isinstance(v, bool) and v: cmd_opts += ' --%s ' % (k) elif isinstance(v, bool): continue elif v is not None: cmd_opts += ' --%s=\"%s\" ' % (k, v) bash_script = "{exe} {args} {opts}" scriptexe = jobname + '.sh' with open(os.path.join(scriptexe), 'wt') as f: f.write(bash_script.format(exe=exe, args=' '.join(args), opts=cmd_opts)) batch_optstr = parse_lsf_opts(**batch_opts) batch_cmd = 'bsub %s ' % (batch_optstr) batch_cmd += ' bash %s' % scriptexe print(batch_cmd) if not dry_run: os.system(batch_cmd)
def dispatch_job(jobname, exe, args, opts, batch_opts, dry_run=True)
Dispatch an LSF job. Parameters ---------- exe : str Execution string. args : list Positional arguments. opts : dict Dictionary of command-line options.
3.01331
2.980302
1.011075
outdir_base = os.path.abspath(os.path.dirname(binnedfile)) outbasename = os.path.basename(binnedfile) filelist = "" for i in range(num_files): split_key = "%06i" % i output_dir = os.path.join(outdir_base, split_key) filepath = os.path.join(output_dir, outbasename.replace('.fits', '_%s.fits' % split_key)) filelist += ' %s' % filepath return filelist
def _make_input_file_list(binnedfile, num_files)
Make the list of input files for a particular energy bin X psf type
2.500659
2.506118
0.997822
comp_file = args.get('comp', None) datafile = args.get('data', None) do_ltsum = args.get('do_ltsum', False) NAME_FACTORY.update_base_dict(datafile) outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes') num_files = args.get('nfiles', 96) self.comp_dict = yaml.safe_load(open(comp_file)) coordsys = self.comp_dict.pop('coordsys') for key_e, comp_e in sorted(self.comp_dict.items()): if 'mktimefilters' in comp_e: mktimelist = comp_e['mktimefilters'] else: mktimelist = ['none'] if 'evtclasses' in comp_e: evtclasslist_vals = comp_e['evtclasses'] else: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] for mktimekey in mktimelist: zcut = "zmax%i" % comp_e['zmax'] kwargs_mktime = dict(zcut=zcut, ebin=key_e, psftype='ALL', coordsys=coordsys, mktime=mktimekey) if do_ltsum: ltsum_listfile = 'ltsumlist_%s_%s' % (key_e, mktimekey) ltsum_outfile = 'ltsum_%s_%s' % (key_e, mktimekey) linkname = 'ltsum_%s_%s' % (key_e, mktimekey) self._set_link(likname, Gtlink_ltsum, infile1=ltsum_listfile, infile2=None, outfile=ltsum_outfile, logfile=os.path.join(outdir_base, "%s.log" % linkname)) for evtclassval in evtclasslist_vals: for psf_type in sorted(comp_e['psf_types'].keys()): fullkey = "%s_%s_%s_%s"%(key_e, mktimekey, evtclassval, psf_type) linkname = 'coadd_%s' % (fullkey) kwargs_bin = kwargs_mktime.copy() kwargs_bin['psftype'] = psf_type kwargs_bin['evclass'] = evtclassval ccube_name =\ os.path.basename(NAME_FACTORY.ccube(**kwargs_bin)) outputfile = os.path.join(outdir_base, ccube_name) args = _make_input_file_list(outputfile, num_files) self._set_link(linkname, Link_FermipyCoadd, args=args, output=outputfile, logfile=os.path.join(outdir_base, "%s.log" % linkname))
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
3.741738
3.678395
1.01722
job_configs = {} components = Component.build_from_yamlfile(args['comp']) datafile = args['data'] if datafile is None or datafile == 'None': return job_configs NAME_FACTORY.update_base_dict(args['data']) outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes') inputfiles = create_inputlist(args['ft1file']) num_files = len(inputfiles) for comp in components: zcut = "zmax%i" % comp.zmax mktimelist = copy.copy(comp.mktimefilters) if not mktimelist: mktimelist.append('none') evtclasslist_keys = copy.copy(comp.evtclasses) if not evtclasslist_keys: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] else: evtclasslist_vals = copy.copy(evtclasslist_keys) for mktimekey in mktimelist: for evtclassval in evtclasslist_vals: fullkey = comp.make_key( '%s_%s_{ebin_name}_%s_{evtype_name}' % (evtclassval, zcut, mktimekey)) name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktimekey, evclass=evtclassval, fullpath=True) ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys)) outfile = os.path.join(outdir_base, ccube_name) infiles = _make_input_file_list(outfile, num_files) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[fullkey] = dict(args=infiles, output=outfile, logfile=logfile) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
4.677965
4.687281
0.998013
params = params.copy() params[0] = 1.0 params[0] = flux / cls.eval_flux(emin, emax, params, scale=scale) return cls(params, scale)
def create_from_flux(cls, params, emin, emax, flux, scale=1.0)
Create a spectral function instance given its flux.
3.753379
3.693936
1.016092
params = params.copy() params[0] = 1.0 params[0] = eflux / cls.eval_eflux(emin, emax, params, scale=scale) return cls(params, scale)
def create_from_eflux(cls, params, emin, emax, eflux, scale=1.0)
Create a spectral function instance given its energy flux.
3.609648
3.604789
1.001348
emin = np.expand_dims(emin, -1) emax = np.expand_dims(emax, -1) params = copy.deepcopy(params) for i, p in enumerate(params): params[i] = np.expand_dims(params[i], -1) xedges = np.linspace(0.0, 1.0, npt + 1) logx_edge = np.log(emin) + xedges * (np.log(emax) - np.log(emin)) logx = 0.5 * (logx_edge[..., 1:] + logx_edge[..., :-1]) xw = np.exp(logx_edge[..., 1:]) - np.exp(logx_edge[..., :-1]) dnde = fn(np.exp(logx), params, scale, extra_params) return np.sum(dnde * xw, axis=-1)
def _integrate(cls, fn, emin, emax, params, scale=1.0, extra_params=None, npt=20)
Fast numerical integration method using mid-point rule.
2.194089
2.235432
0.981506
params = self.params if params is None else params return np.squeeze(self.eval_dnde(x, params, self.scale, self.extra_params))
def dnde(self, x, params=None)
Evaluate differential flux.
5.052118
5.057128
0.999009
params = self.params if params is None else params return np.squeeze(self.eval_ednde(x, params, self.scale, self.extra_params))
def ednde(self, x, params=None)
Evaluate E times differential flux.
5.672923
5.374223
1.05558
params = self.params if params is None else params return np.squeeze(self.eval_e2dnde(x, params, self.scale, self.extra_params))
def e2dnde(self, x, params=None)
Evaluate E^2 times differential flux.
4.995207
4.706222
1.061405
params = self.params if params is None else params return np.squeeze(self.eval_dnde_deriv(x, params, self.scale, self.extra_params))
def dnde_deriv(self, x, params=None)
Evaluate derivative of the differential flux with respect to E.
5.025597
5.243203
0.958498
params = self.params if params is None else params return np.squeeze(self.eval_ednde_deriv(x, params, self.scale, self.extra_params))
def ednde_deriv(self, x, params=None)
Evaluate derivative of E times differential flux with respect to E.
5.200324
6.066468
0.857224
params = self.params if params is None else params return np.squeeze(self.eval_e2dnde_deriv(x, params, self.scale, self.extra_params))
def e2dnde_deriv(self, x, params=None)
Evaluate derivative of E^2 times differential flux with respect to E.
4.87439
5.471406
0.890884
params = self.params if params is None else params return np.squeeze(self.eval_flux(emin, emax, params, self.scale, self.extra_params))
def flux(self, emin, emax, params=None)
Evaluate the integral flux.
4.845782
4.667356
1.038229
params = self.params if params is None else params return np.squeeze(self.eval_eflux(emin, emax, params, self.scale, self.extra_params))
def eflux(self, emin, emax, params=None)
Evaluate the integral energy flux.
4.858835
4.891016
0.99342
timer = Timer.create(start=True) name = self.roi.get_source_by_name(name).name schema = ConfigSchema(self.defaults['extension'], optimizer=self.defaults['optimizer']) schema.add_option('prefix', '') schema.add_option('outfile', None, '', str) config = utils.create_dict(self.config['extension'], optimizer=self.config['optimizer']) config = schema.create_config(config, **kwargs) self.logger.info('Running extension fit for %s', name) free_state = FreeParameterState(self) ext = self._extension(name, **config) free_state.restore() self.logger.info('Finished extension fit.') if config['make_plots']: self._plotter.make_extension_plots(ext, self.roi, prefix=config['prefix']) outfile = config.get('outfile', None) if outfile is None: outfile = utils.format_filename(self.workdir, 'ext', prefix=[config['prefix'], name.lower().replace(' ', '_')]) else: outfile = os.path.join(self.workdir, os.path.splitext(outfile)[0]) if config['write_fits']: self._make_extension_fits(ext, outfile + '.fits') if config['write_npy']: np.save(outfile + '.npy', dict(ext)) self.logger.info('Execution time: %.2f s', timer.elapsed_time) return ext
def extension(self, name, **kwargs)
Test this source for spatial extension with the likelihood ratio method (TS_ext). This method will substitute an extended spatial model for the given source and perform a one-dimensional scan of the spatial extension parameter over the range specified with the width parameters. The 1-D profile likelihood is then used to compute the best-fit value, upper limit, and TS for extension. The nuisance parameters that will be simultaneously fit when performing the spatial scan can be controlled with the ``fix_shape``, ``free_background``, and ``free_radius`` options. By default the position of the source will be fixed to its current position. A simultaneous fit to position and extension can be performed by setting ``fit_position`` to True. Parameters ---------- name : str Source name. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- extension : dict Dictionary containing results of the extension analysis. The same dictionary is also saved to the dictionary of this source under 'extension'.
4.411203
4.130324
1.068004
import matplotlib.pyplot as plt if xlims is None: xmin = nll.interp.xmin xmax = nll.interp.xmax else: xmin = xlims[0] xmax = xlims[1] y1 = nll.interp(xmin) y2 = nll.interp(xmax) ymin = min(y1, y2, 0.0) ymax = max(y1, y2, 0.5) xvals = np.linspace(xmin, xmax, nstep) yvals = nll.interp(xvals) fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) ax.set_xlabel(NORM_LABEL[fluxType]) ax.set_ylabel(r'$-\Delta \log\mathcal{L}$') ax.plot(xvals, yvals) return fig, ax
def plotNLL_v_Flux(nll, fluxType, nstep=25, xlims=None)
Plot the (negative) log-likelihood as a function of normalization nll : a LnLFN object nstep : Number of steps to plot xlims : x-axis limits, if None, take tem from the nll object returns fig,ax, which are matplotlib figure and axes objects
2.094213
2.027917
1.032692
import matplotlib.pyplot as plt ymin = ylims[0] ymax = ylims[1] if zlims is None: zmin = -10 zmax = 0. else: zmin = zlims[0] zmax = zlims[1] fig = plt.figure() ax = fig.add_subplot(111) ax.set_xscale('log') ax.set_yscale('log') ax.set_ylim((ymin, ymax)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) normVals = np.logspace(np.log10(ymin), np.log10(ymax), nstep) ztmp = [] for i in range(castroData.nx): ztmp.append(castroData[i].interp(normVals)) ztmp = np.asarray(ztmp).T ztmp *= -1. ztmp = np.where(ztmp < zmin, np.nan, ztmp) if global_min: global_offset = castroData.nll_offsets.min() offsets = global_offset - castroData.nll_offsets ztmp += offsets cmap = plt.get_cmap('jet_r') xedge = castroData.x_edges() ax.set_xlim((xedge[0], xedge[-1])) im = ax.pcolormesh(xedge, normVals, ztmp, vmin=zmin, vmax=zmax, cmap=cmap, linewidth=0) #cax = ax #cbar = plt.colorbar(im) #cbar.set_label(r"$\Delta \log \mathcal{L}$") cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax)) #ax.set_ylim() #plt.gca().set_yscale('log') #plt.gca().set_xscale('log') #plt.gca().set_xlim(sed['e_min'][0], sed['e_max'][-1]) # #cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax)) # cbar = fig.colorbar(im, ticks=np.arange(zmin,zmax), # fraction=0.10,panchor=(1.05,0.5)) #cbar.set_label(r'$\Delta \log\mathcal{L}$') #cax = None return fig, ax, im, ztmp, cax, cbar
def plotCastro_base(castroData, ylims, xlabel, ylabel, nstep=25, zlims=None, global_min=False)
Make a color plot (castro plot) of the log-likelihood as a function of energy and flux normalization castroData : A CastroData_Base object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits xlabel : x-axis title ylabel : y-axis title nstep : Number of y-axis steps to plot for each energy bin zlims : z-axis limits global_min : Plot the log-likelihood w.r.t. the global min. returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects
2.515322
2.459067
1.022877
xlabel = "Energy [MeV]" ylabel = NORM_LABEL[castroData.norm_type] return plotCastro_base(castroData, ylims, xlabel, ylabel, nstep, zlims)
def plotCastro(castroData, ylims, nstep=25, zlims=None)
Make a color plot (castro plot) of the delta log-likelihood as a function of energy and flux normalization castroData : A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits nstep : Number of y-axis steps to plot for each energy bin zlims : z-axis limits returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects
5.060329
5.870055
0.862058
import matplotlib.pyplot as plt xmin = castroData.refSpec.ebins[0] xmax = castroData.refSpec.ebins[-1] ymin = ylims[0] ymax = ylims[1] fig = plt.figure() ax = fig.add_subplot(111) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) ax.set_xlabel("Energy [GeV]") ax.set_ylabel(NORM_LABEL[castroData.norm_type]) plotSED_OnAxis(ax, castroData, TS_thresh, errSigma) for spec in specVals: ax.loglog(castroData.refSpec.eref, spec) pass return fig, ax
def plotSED(castroData, ylims, TS_thresh=4.0, errSigma=1.0, specVals=[])
Make a color plot (castro plot) of the (negative) log-likelihood as a function of energy and flux normalization castroData : A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects
2.453071
2.444484
1.003513
import matplotlib.pyplot as plt xmin = min(castroData1.refSpec.ebins[0], castroData2.refSpec.ebins[0]) xmax = max(castroData1.refSpec.ebins[-1], castroData2.refSpec.ebins[-1]) ymin = ylims[0] ymax = ylims[1] fig = plt.figure() ax = fig.add_subplot(111) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) ax.set_xlabel("Energy [GeV]") ax.set_ylabel(NORM_LABEL[castroData1.norm_type]) plotSED_OnAxis(ax, castroData1, TS_thresh, errSigma, colorLim='blue', colorPoint='blue') plotSED_OnAxis(ax, castroData2, TS_thresh, errSigma, colorLim='red', colorPoint='red') for spec in specVals: ax.loglog(castroData1.refSpec.eref, spec) return fig, ax
def compare_SED(castroData1, castroData2, ylims, TS_thresh=4.0, errSigma=1.0, specVals=[])
Compare two SEDs castroData1: A CastroData object, with the log-likelihood v. normalization for each energy bin castroData2: A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects
2.238965
2.185586
1.024423
library_yamlfile = kwargs.get('library', 'models/library.yaml') gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs)) if library_yamlfile is None or library_yamlfile == 'None': return gmm diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml(library_yamlfile) for diffuse_value in diffuse_comps.values(): if diffuse_value is None: continue if diffuse_value['model_type'] != 'galprop_rings': continue versions = diffuse_value['versions'] for version in versions: gmm.make_ring_dict(version) return gmm
def make_ring_dicts(**kwargs)
Build and return the information about the Galprop rings
5.023585
4.327066
1.160968
library_yamlfile = kwargs.pop('library', 'models/library.yaml') components = kwargs.pop('components', None) if components is None: comp_yamlfile = kwargs.pop('comp', 'config/binning.yaml') components = Component.build_from_yamlfile(comp_yamlfile) gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs)) dmm = kwargs.get('DiffuseModelManager', DiffuseModelManager(**kwargs)) if library_yamlfile is None or library_yamlfile == 'None': diffuse_comps = {} else: diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml( library_yamlfile) diffuse_comp_info_dict = dmm.make_diffuse_comp_info_dict( diffuse_comps, components) for diffuse_value in diffuse_comps.values(): if diffuse_value is None: continue if diffuse_value['model_type'] != 'galprop_rings': continue versions = diffuse_value['versions'] for version in versions: galprop_dict = gmm.make_diffuse_comp_info_dict(version) diffuse_comp_info_dict.update(galprop_dict) return dict(comp_info_dict=diffuse_comp_info_dict, GalpropMapManager=gmm, DiffuseModelManager=dmm)
def make_diffuse_comp_info_dict(**kwargs)
Build and return the information about the diffuse components
3.276216
3.249221
1.008308
galprop_rings_yaml = self._name_factory.galprop_rings_yaml(galkey=galkey, fullpath=True) galprop_rings = yaml.safe_load(open(galprop_rings_yaml)) return galprop_rings
def read_galprop_rings_yaml(self, galkey)
Read the yaml file for a partiuclar galprop key
2.971288
2.95527
1.00542
format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_ringkey(source_name=source_name, ringkey="ring_%i" % ring) format_dict['galprop_run'] = galprop_run return self._name_factory.galprop_gasmap(**format_dict)
def make_ring_filename(self, source_name, ring, galprop_run)
Make the name of a gasmap file for a single ring Parameters ---------- source_name : str The galprop component, used to define path to gasmap files ring : int The ring index galprop_run : str String identifying the galprop parameters
5.798524
6.066635
0.955806
format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name, galpropkey=galkey) format_dict['fullpath'] = fullpath return self._name_factory.merged_gasmap(**format_dict)
def make_merged_name(self, source_name, galkey, fullpath)
Make the name of a gasmap file for a set of merged rings Parameters ---------- source_name : str The galprop component, used to define path to gasmap files galkey : str A short key identifying the galprop parameters fullpath : bool Return the full path name
5.241634
5.458386
0.96029
format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name, galpropkey=galkey) format_dict['fullpath'] = fullpath return self._name_factory.srcmdl_xml(**format_dict)
def make_xml_name(self, source_name, galkey, fullpath)
Make the name of an xml file for a model definition for a set of merged rings Parameters ---------- source_name : str The galprop component, used to define path to gasmap files galkey : str A short key identifying the galprop parameters fullpath : bool Return the full path name
5.307508
6.430643
0.825346
flist = [] for sourcekey in sourcekeys: for ring in rings: flist += [self.make_ring_filename(sourcekey, ring, galprop_run)] return flist
def make_ring_filelist(self, sourcekeys, rings, galprop_run)
Make a list of all the template files for a merged component Parameters ---------- sourcekeys : list-like of str The names of the componenents to merge rings : list-like of int The indices of the rings to merge galprop_run : str String identifying the galprop parameters
3.079977
4.08829
0.753366
galprop_rings = self.read_galprop_rings_yaml(galkey) galprop_run = galprop_rings['galprop_run'] ring_limits = galprop_rings['ring_limits'] comp_dict = galprop_rings['diffuse_comp_dict'] remove_rings = galprop_rings.get('remove_rings', []) ring_dict = {} nring = len(ring_limits) - 1 for source_name, source_value in comp_dict.items(): base_dict = dict(source_name=source_name, galkey=galkey, galprop_run=galprop_run) for iring in range(nring): sourcekey = "%s_%i" % (source_name, iring) if sourcekey in remove_rings: continue full_key = "%s_%s" % (sourcekey, galkey) rings = range(ring_limits[iring], ring_limits[iring + 1]) base_dict.update(dict(ring=iring, sourcekey=sourcekey, files=self.make_ring_filelist(source_value, rings, galprop_run), merged_gasmap=self.make_merged_name(sourcekey, galkey, False))) ring_dict[full_key] = GalpropMergedRingInfo(**base_dict) self._ring_dicts[galkey] = ring_dict return ring_dict
def make_ring_dict(self, galkey)
Make a dictionary mapping the merged component names to list of template files Parameters ---------- galkey : str Unique key for this ring dictionary Returns `model_component.GalpropMergedRingInfo`
3.161854
2.872315
1.100803
kwargs = dict(source_name=merged_name, source_ver=galkey, model_type='MapCubeSource', Spatial_Filename=self.make_merged_name( merged_name, galkey, fullpath=True), srcmdl_name=self.make_xml_name(merged_name, galkey, fullpath=True)) return MapCubeComponentInfo(**kwargs)
def make_diffuse_comp_info(self, merged_name, galkey)
Make the information about a single merged component Parameters ---------- merged_name : str The name of the merged component galkey : str A short key identifying the galprop parameters Returns `Model_component.ModelComponentInfo`
7.076873
8.690914
0.814284
galprop_rings = self.read_galprop_rings_yaml(galkey) ring_limits = galprop_rings.get('ring_limits') comp_dict = galprop_rings.get('diffuse_comp_dict') remove_rings = galprop_rings.get('remove_rings', []) diffuse_comp_info_dict = {} nring = len(ring_limits) - 1 for source_key in sorted(comp_dict.keys()): for iring in range(nring): source_name = "%s_%i" % (source_key, iring) if source_name in remove_rings: continue full_key = "%s_%s" % (source_name, galkey) diffuse_comp_info_dict[full_key] =\ self.make_diffuse_comp_info(source_name, galkey) self._diffuse_comp_info_dicts[galkey] = diffuse_comp_info_dict return diffuse_comp_info_dict
def make_diffuse_comp_info_dict(self, galkey)
Make a dictionary maping from merged component to information about that component Parameters ---------- galkey : str A short key identifying the galprop parameters
2.542178
2.627842
0.967401
format_dict = self.__dict__.copy() format_dict['sourcekey'] = sourcekey if model_type == 'IsoSource': return self._name_factory.spectral_template(**format_dict) elif model_type in ['MapCubeSource', 'SpatialMap']: return self._name_factory.diffuse_template(**format_dict) else: raise ValueError("Unexpected model_type %s" % model_type)
def make_template_name(self, model_type, sourcekey)
Make the name of a template file for particular component Parameters ---------- model_type : str Type of model to use for this component sourcekey : str Key to identify this component Returns filename or None if component does not require a template file
4.240011
5.164139
0.821049
format_dict = self.__dict__.copy() format_dict['sourcekey'] = sourcekey return self._name_factory.srcmdl_xml(**format_dict)
def make_xml_name(self, sourcekey)
Make the name of an xml file for a model definition of a single component Parameters ---------- sourcekey : str Key to identify this component
7.717375
9.953256
0.775362
model_type = diffuse_dict['model_type'] sourcekey = '%s_%s' % (source_name, source_ver) if comp_key is None: template_name = self.make_template_name(model_type, sourcekey) srcmdl_name = self.make_xml_name(sourcekey) else: template_name = self.make_template_name( model_type, "%s_%s" % (sourcekey, comp_key)) srcmdl_name = self.make_xml_name("%s_%s" % (sourcekey, comp_key)) template_name = self._name_factory.fullpath(localpath=template_name) srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name) kwargs = dict(source_name=source_name, source_ver=source_ver, model_type=model_type, srcmdl_name=srcmdl_name, components=components, comp_key=comp_key) kwargs.update(diffuse_dict) if model_type == 'IsoSource': kwargs['Spectral_Filename'] = template_name return IsoComponentInfo(**kwargs) elif model_type == 'MapCubeSource': kwargs['Spatial_Filename'] = template_name return MapCubeComponentInfo(**kwargs) elif model_type == 'SpatialMap': kwargs['Spatial_Filename'] = template_name return SpatialMapComponentInfo(**kwargs) else: raise ValueError("Unexpected model type %s" % model_type)
def make_diffuse_comp_info(self, source_name, source_ver, diffuse_dict, components=None, comp_key=None)
Make a dictionary mapping the merged component names to list of template files Parameters ---------- source_name : str Name of the source source_ver : str Key identifying the version of the source diffuse_dict : dict Information about this component comp_key : str Used when we need to keep track of sub-components, i.e., for moving and selection dependent sources. Returns `model_component.ModelComponentInfo` or `model_component.IsoComponentInfo`
2.347715
2.273707
1.032549
ret_dict = {} for key, value in diffuse_sources.items(): if value is None: continue model_type = value.get('model_type', 'MapCubeSource') if model_type in ['galprop_rings', 'catalog']: continue selection_dependent = value.get('selection_dependent', False) moving = value.get('moving', False) versions = value.get('versions', []) for version in versions: # sourcekey = self._name_factory.sourcekey(source_name=key, # source_ver=version) comp_dict = None if selection_dependent: # For selection dependent diffuse sources we need to split # by binning component comp_dict = {} for comp in components: comp_key = comp.make_key('{ebin_name}_{evtype_name}') comp_dict[comp_key] = self.make_diffuse_comp_info( key, version, value, None, comp_key) elif moving: # For moving diffuse sources we need to split by zmax cut comp_dict = {} zmax_dict = {} for comp in components: zmax_dict[int(comp.zmax)] = True zmax_list = sorted(zmax_dict.keys()) for zmax in zmax_list: comp_key = "zmax%i" % (zmax) comp_dict[comp_key] = self.make_diffuse_comp_info( key, version, value, None, comp_key) comp_info = self.make_diffuse_comp_info( key, version, value, comp_dict) ret_dict[comp_info.sourcekey] = comp_info self._diffuse_comp_info_dict.update(ret_dict) return ret_dict
def make_diffuse_comp_info_dict(self, diffuse_sources, components)
Make a dictionary maping from diffuse component to information about that component Parameters ---------- diffuse_sources : dict Dictionary with diffuse source defintions components : dict Dictionary with event selection defintions, needed for selection depenedent diffuse components Returns ------- ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo`
3.366265
3.240682
1.038752
# FIXME, This is here for python 3.5, where astropy is now returning bytes # instead of str if table[colname].dtype.kind in ['S', 'U']: mask = table[colname].astype(str) == value else: mask = table[colname] == value if mask.sum() != 1: raise KeyError("%i rows in column %s match value %s" % (mask.sum(), colname, value)) return np.argmax(mask)
def get_unique_match(table, colname, value)
Get the row matching value for a particular column. If exactly one row matchs, return index of that row, Otherwise raise KeyError.
4.346708
3.894549
1.116101
import argparse parser = argparse.ArgumentParser(usage="file_archive.py [options]", description="Browse a job archive") parser.add_argument('--files', action='store', dest='file_archive_table', type=str, default='file_archive_temp.fits', help="File archive file") parser.add_argument('--base', action='store', dest='base_path', type=str, default=os.path.abspath('.'), help="File archive base path") args = parser.parse_args(sys.argv[1:]) FileArchive.build_archive(**args.__dict__)
def main_browse()
Entry point for command line use for browsing a FileArchive
4.031202
3.594333
1.121544
self.file_dict.clear() for key, val in self.file_args.items(): try: file_path = args[key] if file_path is None: continue # 'args' is special if key[0:4] == 'args': if isinstance(file_path, list): tokens = file_path elif isinstance(file_path, str): tokens = file_path.split() else: raise TypeError( "Args has type %s, expect list or str" % type(file_path)) for token in tokens: self.file_dict[token.replace('.gz', '')] = val else: self.file_dict[file_path.replace('.gz', '')] = val except KeyError: pass
def latch_file_info(self, args)
Extract the file paths from a set of arguments
3.031425
2.88023
1.052494