sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def create_from_eflux(cls, params, emin, emax, eflux, scale=1.0): """Create a spectral function instance given its energy flux.""" params = params.copy() params[0] = 1.0 params[0] = eflux / cls.eval_eflux(emin, emax, params, scale=scale) return cls(params, scale)
Create a spectral function instance given its energy flux.
entailment
def _integrate(cls, fn, emin, emax, params, scale=1.0, extra_params=None, npt=20): """Fast numerical integration method using mid-point rule.""" emin = np.expand_dims(emin, -1) emax = np.expand_dims(emax, -1) params = copy.deepcopy(params) for i, p in enumerate(params): params[i] = np.expand_dims(params[i], -1) xedges = np.linspace(0.0, 1.0, npt + 1) logx_edge = np.log(emin) + xedges * (np.log(emax) - np.log(emin)) logx = 0.5 * (logx_edge[..., 1:] + logx_edge[..., :-1]) xw = np.exp(logx_edge[..., 1:]) - np.exp(logx_edge[..., :-1]) dnde = fn(np.exp(logx), params, scale, extra_params) return np.sum(dnde * xw, axis=-1)
Fast numerical integration method using mid-point rule.
entailment
def dnde(self, x, params=None): """Evaluate differential flux.""" params = self.params if params is None else params return np.squeeze(self.eval_dnde(x, params, self.scale, self.extra_params))
Evaluate differential flux.
entailment
def ednde(self, x, params=None): """Evaluate E times differential flux.""" params = self.params if params is None else params return np.squeeze(self.eval_ednde(x, params, self.scale, self.extra_params))
Evaluate E times differential flux.
entailment
def e2dnde(self, x, params=None): """Evaluate E^2 times differential flux.""" params = self.params if params is None else params return np.squeeze(self.eval_e2dnde(x, params, self.scale, self.extra_params))
Evaluate E^2 times differential flux.
entailment
def dnde_deriv(self, x, params=None): """Evaluate derivative of the differential flux with respect to E.""" params = self.params if params is None else params return np.squeeze(self.eval_dnde_deriv(x, params, self.scale, self.extra_params))
Evaluate derivative of the differential flux with respect to E.
entailment
def ednde_deriv(self, x, params=None): """Evaluate derivative of E times differential flux with respect to E.""" params = self.params if params is None else params return np.squeeze(self.eval_ednde_deriv(x, params, self.scale, self.extra_params))
Evaluate derivative of E times differential flux with respect to E.
entailment
def e2dnde_deriv(self, x, params=None): """Evaluate derivative of E^2 times differential flux with respect to E.""" params = self.params if params is None else params return np.squeeze(self.eval_e2dnde_deriv(x, params, self.scale, self.extra_params))
Evaluate derivative of E^2 times differential flux with respect to E.
entailment
def flux(self, emin, emax, params=None): """Evaluate the integral flux.""" params = self.params if params is None else params return np.squeeze(self.eval_flux(emin, emax, params, self.scale, self.extra_params))
Evaluate the integral flux.
entailment
def eflux(self, emin, emax, params=None): """Evaluate the integral energy flux.""" params = self.params if params is None else params return np.squeeze(self.eval_eflux(emin, emax, params, self.scale, self.extra_params))
Evaluate the integral energy flux.
entailment
def extension(self, name, **kwargs): """Test this source for spatial extension with the likelihood ratio method (TS_ext). This method will substitute an extended spatial model for the given source and perform a one-dimensional scan of the spatial extension parameter over the range specified with the width parameters. The 1-D profile likelihood is then used to compute the best-fit value, upper limit, and TS for extension. The nuisance parameters that will be simultaneously fit when performing the spatial scan can be controlled with the ``fix_shape``, ``free_background``, and ``free_radius`` options. By default the position of the source will be fixed to its current position. A simultaneous fit to position and extension can be performed by setting ``fit_position`` to True. Parameters ---------- name : str Source name. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- extension : dict Dictionary containing results of the extension analysis. The same dictionary is also saved to the dictionary of this source under 'extension'. """ timer = Timer.create(start=True) name = self.roi.get_source_by_name(name).name schema = ConfigSchema(self.defaults['extension'], optimizer=self.defaults['optimizer']) schema.add_option('prefix', '') schema.add_option('outfile', None, '', str) config = utils.create_dict(self.config['extension'], optimizer=self.config['optimizer']) config = schema.create_config(config, **kwargs) self.logger.info('Running extension fit for %s', name) free_state = FreeParameterState(self) ext = self._extension(name, **config) free_state.restore() self.logger.info('Finished extension fit.') if config['make_plots']: self._plotter.make_extension_plots(ext, self.roi, prefix=config['prefix']) outfile = config.get('outfile', None) if outfile is None: outfile = utils.format_filename(self.workdir, 'ext', prefix=[config['prefix'], name.lower().replace(' ', '_')]) else: outfile = os.path.join(self.workdir, os.path.splitext(outfile)[0]) if config['write_fits']: self._make_extension_fits(ext, outfile + '.fits') if config['write_npy']: np.save(outfile + '.npy', dict(ext)) self.logger.info('Execution time: %.2f s', timer.elapsed_time) return ext
Test this source for spatial extension with the likelihood ratio method (TS_ext). This method will substitute an extended spatial model for the given source and perform a one-dimensional scan of the spatial extension parameter over the range specified with the width parameters. The 1-D profile likelihood is then used to compute the best-fit value, upper limit, and TS for extension. The nuisance parameters that will be simultaneously fit when performing the spatial scan can be controlled with the ``fix_shape``, ``free_background``, and ``free_radius`` options. By default the position of the source will be fixed to its current position. A simultaneous fit to position and extension can be performed by setting ``fit_position`` to True. Parameters ---------- name : str Source name. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- extension : dict Dictionary containing results of the extension analysis. The same dictionary is also saved to the dictionary of this source under 'extension'.
entailment
def plotNLL_v_Flux(nll, fluxType, nstep=25, xlims=None): """ Plot the (negative) log-likelihood as a function of normalization nll : a LnLFN object nstep : Number of steps to plot xlims : x-axis limits, if None, take tem from the nll object returns fig,ax, which are matplotlib figure and axes objects """ import matplotlib.pyplot as plt if xlims is None: xmin = nll.interp.xmin xmax = nll.interp.xmax else: xmin = xlims[0] xmax = xlims[1] y1 = nll.interp(xmin) y2 = nll.interp(xmax) ymin = min(y1, y2, 0.0) ymax = max(y1, y2, 0.5) xvals = np.linspace(xmin, xmax, nstep) yvals = nll.interp(xvals) fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) ax.set_xlabel(NORM_LABEL[fluxType]) ax.set_ylabel(r'$-\Delta \log\mathcal{L}$') ax.plot(xvals, yvals) return fig, ax
Plot the (negative) log-likelihood as a function of normalization nll : a LnLFN object nstep : Number of steps to plot xlims : x-axis limits, if None, take tem from the nll object returns fig,ax, which are matplotlib figure and axes objects
entailment
def plotCastro_base(castroData, ylims, xlabel, ylabel, nstep=25, zlims=None, global_min=False): """ Make a color plot (castro plot) of the log-likelihood as a function of energy and flux normalization castroData : A CastroData_Base object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits xlabel : x-axis title ylabel : y-axis title nstep : Number of y-axis steps to plot for each energy bin zlims : z-axis limits global_min : Plot the log-likelihood w.r.t. the global min. returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects """ import matplotlib.pyplot as plt ymin = ylims[0] ymax = ylims[1] if zlims is None: zmin = -10 zmax = 0. else: zmin = zlims[0] zmax = zlims[1] fig = plt.figure() ax = fig.add_subplot(111) ax.set_xscale('log') ax.set_yscale('log') ax.set_ylim((ymin, ymax)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) normVals = np.logspace(np.log10(ymin), np.log10(ymax), nstep) ztmp = [] for i in range(castroData.nx): ztmp.append(castroData[i].interp(normVals)) ztmp = np.asarray(ztmp).T ztmp *= -1. ztmp = np.where(ztmp < zmin, np.nan, ztmp) if global_min: global_offset = castroData.nll_offsets.min() offsets = global_offset - castroData.nll_offsets ztmp += offsets cmap = plt.get_cmap('jet_r') xedge = castroData.x_edges() ax.set_xlim((xedge[0], xedge[-1])) im = ax.pcolormesh(xedge, normVals, ztmp, vmin=zmin, vmax=zmax, cmap=cmap, linewidth=0) #cax = ax #cbar = plt.colorbar(im) #cbar.set_label(r"$\Delta \log \mathcal{L}$") cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax)) #ax.set_ylim() #plt.gca().set_yscale('log') #plt.gca().set_xscale('log') #plt.gca().set_xlim(sed['e_min'][0], sed['e_max'][-1]) # #cax, cbar = make_colorbar(fig, ax, im, (zmin, zmax)) # cbar = fig.colorbar(im, ticks=np.arange(zmin,zmax), # fraction=0.10,panchor=(1.05,0.5)) #cbar.set_label(r'$\Delta \log\mathcal{L}$') #cax = None return fig, ax, im, ztmp, cax, cbar
Make a color plot (castro plot) of the log-likelihood as a function of energy and flux normalization castroData : A CastroData_Base object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits xlabel : x-axis title ylabel : y-axis title nstep : Number of y-axis steps to plot for each energy bin zlims : z-axis limits global_min : Plot the log-likelihood w.r.t. the global min. returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects
entailment
def plotCastro(castroData, ylims, nstep=25, zlims=None): """ Make a color plot (castro plot) of the delta log-likelihood as a function of energy and flux normalization castroData : A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits nstep : Number of y-axis steps to plot for each energy bin zlims : z-axis limits returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects """ xlabel = "Energy [MeV]" ylabel = NORM_LABEL[castroData.norm_type] return plotCastro_base(castroData, ylims, xlabel, ylabel, nstep, zlims)
Make a color plot (castro plot) of the delta log-likelihood as a function of energy and flux normalization castroData : A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits nstep : Number of y-axis steps to plot for each energy bin zlims : z-axis limits returns fig,ax,im,ztmp which are matplotlib figure, axes and image objects
entailment
def plotSED(castroData, ylims, TS_thresh=4.0, errSigma=1.0, specVals=[]): """ Make a color plot (castro plot) of the (negative) log-likelihood as a function of energy and flux normalization castroData : A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects """ import matplotlib.pyplot as plt xmin = castroData.refSpec.ebins[0] xmax = castroData.refSpec.ebins[-1] ymin = ylims[0] ymax = ylims[1] fig = plt.figure() ax = fig.add_subplot(111) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) ax.set_xlabel("Energy [GeV]") ax.set_ylabel(NORM_LABEL[castroData.norm_type]) plotSED_OnAxis(ax, castroData, TS_thresh, errSigma) for spec in specVals: ax.loglog(castroData.refSpec.eref, spec) pass return fig, ax
Make a color plot (castro plot) of the (negative) log-likelihood as a function of energy and flux normalization castroData : A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects
entailment
def compare_SED(castroData1, castroData2, ylims, TS_thresh=4.0, errSigma=1.0, specVals=[]): """ Compare two SEDs castroData1: A CastroData object, with the log-likelihood v. normalization for each energy bin castroData2: A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects """ import matplotlib.pyplot as plt xmin = min(castroData1.refSpec.ebins[0], castroData2.refSpec.ebins[0]) xmax = max(castroData1.refSpec.ebins[-1], castroData2.refSpec.ebins[-1]) ymin = ylims[0] ymax = ylims[1] fig = plt.figure() ax = fig.add_subplot(111) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) ax.set_xlabel("Energy [GeV]") ax.set_ylabel(NORM_LABEL[castroData1.norm_type]) plotSED_OnAxis(ax, castroData1, TS_thresh, errSigma, colorLim='blue', colorPoint='blue') plotSED_OnAxis(ax, castroData2, TS_thresh, errSigma, colorLim='red', colorPoint='red') for spec in specVals: ax.loglog(castroData1.refSpec.eref, spec) return fig, ax
Compare two SEDs castroData1: A CastroData object, with the log-likelihood v. normalization for each energy bin castroData2: A CastroData object, with the log-likelihood v. normalization for each energy bin ylims : y-axis limits TS_thresh : TS value above with to plot a point, rather than an upper limit errSigma : Number of sigma to use for error bars specVals : List of spectra to add to plot returns fig,ax which are matplotlib figure and axes objects
entailment
def make_ring_dicts(**kwargs): """Build and return the information about the Galprop rings """ library_yamlfile = kwargs.get('library', 'models/library.yaml') gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs)) if library_yamlfile is None or library_yamlfile == 'None': return gmm diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml(library_yamlfile) for diffuse_value in diffuse_comps.values(): if diffuse_value is None: continue if diffuse_value['model_type'] != 'galprop_rings': continue versions = diffuse_value['versions'] for version in versions: gmm.make_ring_dict(version) return gmm
Build and return the information about the Galprop rings
entailment
def make_diffuse_comp_info_dict(**kwargs): """Build and return the information about the diffuse components """ library_yamlfile = kwargs.pop('library', 'models/library.yaml') components = kwargs.pop('components', None) if components is None: comp_yamlfile = kwargs.pop('comp', 'config/binning.yaml') components = Component.build_from_yamlfile(comp_yamlfile) gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs)) dmm = kwargs.get('DiffuseModelManager', DiffuseModelManager(**kwargs)) if library_yamlfile is None or library_yamlfile == 'None': diffuse_comps = {} else: diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml( library_yamlfile) diffuse_comp_info_dict = dmm.make_diffuse_comp_info_dict( diffuse_comps, components) for diffuse_value in diffuse_comps.values(): if diffuse_value is None: continue if diffuse_value['model_type'] != 'galprop_rings': continue versions = diffuse_value['versions'] for version in versions: galprop_dict = gmm.make_diffuse_comp_info_dict(version) diffuse_comp_info_dict.update(galprop_dict) return dict(comp_info_dict=diffuse_comp_info_dict, GalpropMapManager=gmm, DiffuseModelManager=dmm)
Build and return the information about the diffuse components
entailment
def read_galprop_rings_yaml(self, galkey): """ Read the yaml file for a partiuclar galprop key """ galprop_rings_yaml = self._name_factory.galprop_rings_yaml(galkey=galkey, fullpath=True) galprop_rings = yaml.safe_load(open(galprop_rings_yaml)) return galprop_rings
Read the yaml file for a partiuclar galprop key
entailment
def make_ring_filename(self, source_name, ring, galprop_run): """ Make the name of a gasmap file for a single ring Parameters ---------- source_name : str The galprop component, used to define path to gasmap files ring : int The ring index galprop_run : str String identifying the galprop parameters """ format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_ringkey(source_name=source_name, ringkey="ring_%i" % ring) format_dict['galprop_run'] = galprop_run return self._name_factory.galprop_gasmap(**format_dict)
Make the name of a gasmap file for a single ring Parameters ---------- source_name : str The galprop component, used to define path to gasmap files ring : int The ring index galprop_run : str String identifying the galprop parameters
entailment
def make_merged_name(self, source_name, galkey, fullpath): """ Make the name of a gasmap file for a set of merged rings Parameters ---------- source_name : str The galprop component, used to define path to gasmap files galkey : str A short key identifying the galprop parameters fullpath : bool Return the full path name """ format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name, galpropkey=galkey) format_dict['fullpath'] = fullpath return self._name_factory.merged_gasmap(**format_dict)
Make the name of a gasmap file for a set of merged rings Parameters ---------- source_name : str The galprop component, used to define path to gasmap files galkey : str A short key identifying the galprop parameters fullpath : bool Return the full path name
entailment
def make_xml_name(self, source_name, galkey, fullpath): """ Make the name of an xml file for a model definition for a set of merged rings Parameters ---------- source_name : str The galprop component, used to define path to gasmap files galkey : str A short key identifying the galprop parameters fullpath : bool Return the full path name """ format_dict = self.__dict__.copy() format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name, galpropkey=galkey) format_dict['fullpath'] = fullpath return self._name_factory.srcmdl_xml(**format_dict)
Make the name of an xml file for a model definition for a set of merged rings Parameters ---------- source_name : str The galprop component, used to define path to gasmap files galkey : str A short key identifying the galprop parameters fullpath : bool Return the full path name
entailment
def make_ring_filelist(self, sourcekeys, rings, galprop_run): """ Make a list of all the template files for a merged component Parameters ---------- sourcekeys : list-like of str The names of the componenents to merge rings : list-like of int The indices of the rings to merge galprop_run : str String identifying the galprop parameters """ flist = [] for sourcekey in sourcekeys: for ring in rings: flist += [self.make_ring_filename(sourcekey, ring, galprop_run)] return flist
Make a list of all the template files for a merged component Parameters ---------- sourcekeys : list-like of str The names of the componenents to merge rings : list-like of int The indices of the rings to merge galprop_run : str String identifying the galprop parameters
entailment
def make_ring_dict(self, galkey): """ Make a dictionary mapping the merged component names to list of template files Parameters ---------- galkey : str Unique key for this ring dictionary Returns `model_component.GalpropMergedRingInfo` """ galprop_rings = self.read_galprop_rings_yaml(galkey) galprop_run = galprop_rings['galprop_run'] ring_limits = galprop_rings['ring_limits'] comp_dict = galprop_rings['diffuse_comp_dict'] remove_rings = galprop_rings.get('remove_rings', []) ring_dict = {} nring = len(ring_limits) - 1 for source_name, source_value in comp_dict.items(): base_dict = dict(source_name=source_name, galkey=galkey, galprop_run=galprop_run) for iring in range(nring): sourcekey = "%s_%i" % (source_name, iring) if sourcekey in remove_rings: continue full_key = "%s_%s" % (sourcekey, galkey) rings = range(ring_limits[iring], ring_limits[iring + 1]) base_dict.update(dict(ring=iring, sourcekey=sourcekey, files=self.make_ring_filelist(source_value, rings, galprop_run), merged_gasmap=self.make_merged_name(sourcekey, galkey, False))) ring_dict[full_key] = GalpropMergedRingInfo(**base_dict) self._ring_dicts[galkey] = ring_dict return ring_dict
Make a dictionary mapping the merged component names to list of template files Parameters ---------- galkey : str Unique key for this ring dictionary Returns `model_component.GalpropMergedRingInfo`
entailment
def make_diffuse_comp_info(self, merged_name, galkey): """ Make the information about a single merged component Parameters ---------- merged_name : str The name of the merged component galkey : str A short key identifying the galprop parameters Returns `Model_component.ModelComponentInfo` """ kwargs = dict(source_name=merged_name, source_ver=galkey, model_type='MapCubeSource', Spatial_Filename=self.make_merged_name( merged_name, galkey, fullpath=True), srcmdl_name=self.make_xml_name(merged_name, galkey, fullpath=True)) return MapCubeComponentInfo(**kwargs)
Make the information about a single merged component Parameters ---------- merged_name : str The name of the merged component galkey : str A short key identifying the galprop parameters Returns `Model_component.ModelComponentInfo`
entailment
def make_diffuse_comp_info_dict(self, galkey): """ Make a dictionary maping from merged component to information about that component Parameters ---------- galkey : str A short key identifying the galprop parameters """ galprop_rings = self.read_galprop_rings_yaml(galkey) ring_limits = galprop_rings.get('ring_limits') comp_dict = galprop_rings.get('diffuse_comp_dict') remove_rings = galprop_rings.get('remove_rings', []) diffuse_comp_info_dict = {} nring = len(ring_limits) - 1 for source_key in sorted(comp_dict.keys()): for iring in range(nring): source_name = "%s_%i" % (source_key, iring) if source_name in remove_rings: continue full_key = "%s_%s" % (source_name, galkey) diffuse_comp_info_dict[full_key] =\ self.make_diffuse_comp_info(source_name, galkey) self._diffuse_comp_info_dicts[galkey] = diffuse_comp_info_dict return diffuse_comp_info_dict
Make a dictionary maping from merged component to information about that component Parameters ---------- galkey : str A short key identifying the galprop parameters
entailment
def make_template_name(self, model_type, sourcekey): """ Make the name of a template file for particular component Parameters ---------- model_type : str Type of model to use for this component sourcekey : str Key to identify this component Returns filename or None if component does not require a template file """ format_dict = self.__dict__.copy() format_dict['sourcekey'] = sourcekey if model_type == 'IsoSource': return self._name_factory.spectral_template(**format_dict) elif model_type in ['MapCubeSource', 'SpatialMap']: return self._name_factory.diffuse_template(**format_dict) else: raise ValueError("Unexpected model_type %s" % model_type)
Make the name of a template file for particular component Parameters ---------- model_type : str Type of model to use for this component sourcekey : str Key to identify this component Returns filename or None if component does not require a template file
entailment
def make_xml_name(self, sourcekey): """ Make the name of an xml file for a model definition of a single component Parameters ---------- sourcekey : str Key to identify this component """ format_dict = self.__dict__.copy() format_dict['sourcekey'] = sourcekey return self._name_factory.srcmdl_xml(**format_dict)
Make the name of an xml file for a model definition of a single component Parameters ---------- sourcekey : str Key to identify this component
entailment
def make_diffuse_comp_info(self, source_name, source_ver, diffuse_dict, components=None, comp_key=None): """ Make a dictionary mapping the merged component names to list of template files Parameters ---------- source_name : str Name of the source source_ver : str Key identifying the version of the source diffuse_dict : dict Information about this component comp_key : str Used when we need to keep track of sub-components, i.e., for moving and selection dependent sources. Returns `model_component.ModelComponentInfo` or `model_component.IsoComponentInfo` """ model_type = diffuse_dict['model_type'] sourcekey = '%s_%s' % (source_name, source_ver) if comp_key is None: template_name = self.make_template_name(model_type, sourcekey) srcmdl_name = self.make_xml_name(sourcekey) else: template_name = self.make_template_name( model_type, "%s_%s" % (sourcekey, comp_key)) srcmdl_name = self.make_xml_name("%s_%s" % (sourcekey, comp_key)) template_name = self._name_factory.fullpath(localpath=template_name) srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name) kwargs = dict(source_name=source_name, source_ver=source_ver, model_type=model_type, srcmdl_name=srcmdl_name, components=components, comp_key=comp_key) kwargs.update(diffuse_dict) if model_type == 'IsoSource': kwargs['Spectral_Filename'] = template_name return IsoComponentInfo(**kwargs) elif model_type == 'MapCubeSource': kwargs['Spatial_Filename'] = template_name return MapCubeComponentInfo(**kwargs) elif model_type == 'SpatialMap': kwargs['Spatial_Filename'] = template_name return SpatialMapComponentInfo(**kwargs) else: raise ValueError("Unexpected model type %s" % model_type)
Make a dictionary mapping the merged component names to list of template files Parameters ---------- source_name : str Name of the source source_ver : str Key identifying the version of the source diffuse_dict : dict Information about this component comp_key : str Used when we need to keep track of sub-components, i.e., for moving and selection dependent sources. Returns `model_component.ModelComponentInfo` or `model_component.IsoComponentInfo`
entailment
def make_diffuse_comp_info_dict(self, diffuse_sources, components): """ Make a dictionary maping from diffuse component to information about that component Parameters ---------- diffuse_sources : dict Dictionary with diffuse source defintions components : dict Dictionary with event selection defintions, needed for selection depenedent diffuse components Returns ------- ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo` """ ret_dict = {} for key, value in diffuse_sources.items(): if value is None: continue model_type = value.get('model_type', 'MapCubeSource') if model_type in ['galprop_rings', 'catalog']: continue selection_dependent = value.get('selection_dependent', False) moving = value.get('moving', False) versions = value.get('versions', []) for version in versions: # sourcekey = self._name_factory.sourcekey(source_name=key, # source_ver=version) comp_dict = None if selection_dependent: # For selection dependent diffuse sources we need to split # by binning component comp_dict = {} for comp in components: comp_key = comp.make_key('{ebin_name}_{evtype_name}') comp_dict[comp_key] = self.make_diffuse_comp_info( key, version, value, None, comp_key) elif moving: # For moving diffuse sources we need to split by zmax cut comp_dict = {} zmax_dict = {} for comp in components: zmax_dict[int(comp.zmax)] = True zmax_list = sorted(zmax_dict.keys()) for zmax in zmax_list: comp_key = "zmax%i" % (zmax) comp_dict[comp_key] = self.make_diffuse_comp_info( key, version, value, None, comp_key) comp_info = self.make_diffuse_comp_info( key, version, value, comp_dict) ret_dict[comp_info.sourcekey] = comp_info self._diffuse_comp_info_dict.update(ret_dict) return ret_dict
Make a dictionary maping from diffuse component to information about that component Parameters ---------- diffuse_sources : dict Dictionary with diffuse source defintions components : dict Dictionary with event selection defintions, needed for selection depenedent diffuse components Returns ------- ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo`
entailment
def get_unique_match(table, colname, value): """Get the row matching value for a particular column. If exactly one row matchs, return index of that row, Otherwise raise KeyError. """ # FIXME, This is here for python 3.5, where astropy is now returning bytes # instead of str if table[colname].dtype.kind in ['S', 'U']: mask = table[colname].astype(str) == value else: mask = table[colname] == value if mask.sum() != 1: raise KeyError("%i rows in column %s match value %s" % (mask.sum(), colname, value)) return np.argmax(mask)
Get the row matching value for a particular column. If exactly one row matchs, return index of that row, Otherwise raise KeyError.
entailment
def main_browse(): """Entry point for command line use for browsing a FileArchive """ import argparse parser = argparse.ArgumentParser(usage="file_archive.py [options]", description="Browse a job archive") parser.add_argument('--files', action='store', dest='file_archive_table', type=str, default='file_archive_temp.fits', help="File archive file") parser.add_argument('--base', action='store', dest='base_path', type=str, default=os.path.abspath('.'), help="File archive base path") args = parser.parse_args(sys.argv[1:]) FileArchive.build_archive(**args.__dict__)
Entry point for command line use for browsing a FileArchive
entailment
def latch_file_info(self, args): """Extract the file paths from a set of arguments """ self.file_dict.clear() for key, val in self.file_args.items(): try: file_path = args[key] if file_path is None: continue # 'args' is special if key[0:4] == 'args': if isinstance(file_path, list): tokens = file_path elif isinstance(file_path, str): tokens = file_path.split() else: raise TypeError( "Args has type %s, expect list or str" % type(file_path)) for token in tokens: self.file_dict[token.replace('.gz', '')] = val else: self.file_dict[file_path.replace('.gz', '')] = val except KeyError: pass
Extract the file paths from a set of arguments
entailment
def update(self, file_dict): """Update self with values from a dictionary mapping file path [str] to `FileFlags` enum """ for key, val in file_dict.items(): if key in self.file_dict: self.file_dict[key] |= val else: self.file_dict[key] = val
Update self with values from a dictionary mapping file path [str] to `FileFlags` enum
entailment
def input_files(self): """Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain` """ ret_list = [] for key, val in self.file_dict.items(): # For input files we only want files that were marked as input if val & FileFlags.input_mask: ret_list.append(key) return ret_list
Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
entailment
def output_files(self): """Return a list of the output files produced by this link. For `Link` sub-classes this will return the union of all the output files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain` """ ret_list = [] for key, val in self.file_dict.items(): # For output files we only want files that were marked as output if val & FileFlags.output_mask: ret_list.append(key) return ret_list
Return a list of the output files produced by this link. For `Link` sub-classes this will return the union of all the output files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
entailment
def chain_input_files(self): """Return a list of the input files needed by this chain. For `Link` sub-classes this will return only those files that were not created by any internal `Link` """ ret_list = [] for key, val in self.file_dict.items(): # For chain input files we only want files that were not marked as output # (I.e., not produced by some other step in the chain) if val & FileFlags.in_ch_mask == FileFlags.input_mask: ret_list.append(key) return ret_list
Return a list of the input files needed by this chain. For `Link` sub-classes this will return only those files that were not created by any internal `Link`
entailment
def chain_output_files(self): """Return a list of the all the output files produced by this link. For `Link` sub-classes this will return only those files that were not marked as internal files or marked for removal. """ ret_list = [] for key, val in self.file_dict.items(): # For pure input files we only want output files that were not # marked as internal or temp if val & FileFlags.out_ch_mask == FileFlags.output_mask: ret_list.append(key) return ret_list
Return a list of the all the output files produced by this link. For `Link` sub-classes this will return only those files that were not marked as internal files or marked for removal.
entailment
def input_files_to_stage(self): """Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain` """ ret_list = [] for key, val in self.file_dict.items(): # For input files we only want files that were marked as input if val & FileFlags.in_stage_mask == FileFlags.in_stage_mask: ret_list.append(key) return ret_list
Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
entailment
def output_files_to_stage(self): """Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain` """ ret_list = [] for key, val in self.file_dict.items(): # For input files we only want files that were marked as input if val & FileFlags.out_stage_mask == FileFlags.out_stage_mask: ret_list.append(key) return ret_list
Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
entailment
def internal_files(self): """Return a list of the intermediate files produced by this link. This returns all files that were explicitly marked as internal files. """ ret_list = [] for key, val in self.file_dict.items(): # For internal files we only want files that were marked as # internal if val & FileFlags.internal_mask: ret_list.append(key) return ret_list
Return a list of the intermediate files produced by this link. This returns all files that were explicitly marked as internal files.
entailment
def temp_files(self): """Return a list of the temporary files produced by this link. This returns all files that were explicitly marked for removal. """ ret_list = [] for key, val in self.file_dict.items(): # For temp files we only want files that were marked for removal if val & FileFlags.rm_mask: ret_list.append(key) return ret_list
Return a list of the temporary files produced by this link. This returns all files that were explicitly marked for removal.
entailment
def gzip_files(self): """Return a list of the files compressed by this link. This returns all files that were explicitly marked for compression. """ ret_list = [] for key, val in self.file_dict.items(): # For temp files we only want files that were marked for removal if val & FileFlags.gz_mask: ret_list.append(key) return ret_list
Return a list of the files compressed by this link. This returns all files that were explicitly marked for compression.
entailment
def print_summary(self, stream=sys.stdout, indent=""): """Print a summary of the files in this file dict. This version explictly counts the union of all input and output files. """ stream.write("%sTotal files : %i\n" % (indent, len(self.file_dict))) stream.write("%s Input files : %i\n" % (indent, len(self.input_files))) stream.write("%s Output files : %i\n" % (indent, len(self.output_files))) stream.write("%s Internal files : %i\n" % (indent, len(self.internal_files))) stream.write("%s Temp files : %i\n" % (indent, len(self.temp_files)))
Print a summary of the files in this file dict. This version explictly counts the union of all input and output files.
entailment
def print_chain_summary(self, stream=sys.stdout, indent=""): """Print a summary of the files in this file dict. This version uses chain_input_files and chain_output_files to count the input and output files. """ stream.write("%sTotal files : %i\n" % (indent, len(self.file_dict))) stream.write("%s Input files : %i\n" % (indent, len(self.chain_input_files))) stream.write("%s Output files : %i\n" % (indent, len(self.chain_output_files))) stream.write("%s Internal files : %i\n" % (indent, len(self.internal_files))) stream.write("%s Temp files : %i\n" % (indent, len(self.temp_files)))
Print a summary of the files in this file dict. This version uses chain_input_files and chain_output_files to count the input and output files.
entailment
def split_local_path(self, local_file): """Split the local path into a directory name and a file name If local_file is in self.workdir or a subdirectory of it, the directory will consist of the relative path from workdir. If local_file is not in self.workdir, directory will be empty. Returns (dirname, basename) """ abspath = os.path.abspath(local_file) if abspath.find(self.workdir) >= 0: relpath = abspath.replace(self.workdir, '')[1:] basename = os.path.basename(relpath) dirname = os.path.dirname(relpath) else: basename = os.path.basename(local_file) dirname = '' return (dirname, basename)
Split the local path into a directory name and a file name If local_file is in self.workdir or a subdirectory of it, the directory will consist of the relative path from workdir. If local_file is not in self.workdir, directory will be empty. Returns (dirname, basename)
entailment
def construct_scratch_path(self, dirname, basename): """Construct and return a path in the scratch area. This will be <self.scratchdir>/<dirname>/<basename> """ return os.path.join(self.scratchdir, dirname, basename)
Construct and return a path in the scratch area. This will be <self.scratchdir>/<dirname>/<basename>
entailment
def get_scratch_path(self, local_file): """Construct and return a path in the scratch area from a local file. """ (local_dirname, local_basename) = self.split_local_path(local_file) return self.construct_scratch_path(local_dirname, local_basename)
Construct and return a path in the scratch area from a local file.
entailment
def map_files(self, local_files): """Build a dictionary mapping local paths to scratch paths. Parameters ---------- local_files : list List of filenames to be mapped to scratch area Returns dict Mapping local_file : fullpath of scratch file """ ret_dict = {} for local_file in local_files: ret_dict[local_file] = self.get_scratch_path(local_file) return ret_dict
Build a dictionary mapping local paths to scratch paths. Parameters ---------- local_files : list List of filenames to be mapped to scratch area Returns dict Mapping local_file : fullpath of scratch file
entailment
def make_scratch_dirs(file_mapping, dry_run=True): """Make any directories need in the scratch area""" scratch_dirs = {} for value in file_mapping.values(): scratch_dirname = os.path.dirname(value) scratch_dirs[scratch_dirname] = True for scratch_dirname in scratch_dirs: if dry_run: print("mkdir -f %s" % (scratch_dirname)) else: try: os.makedirs(scratch_dirname) except OSError: pass
Make any directories need in the scratch area
entailment
def copy_to_scratch(file_mapping, dry_run=True): """Copy input files to scratch area """ for key, value in file_mapping.items(): if not os.path.exists(key): continue if dry_run: print ("copy %s %s" % (key, value)) else: print ("copy %s %s" % (key, value)) copyfile(key, value) return file_mapping
Copy input files to scratch area
entailment
def copy_from_scratch(file_mapping, dry_run=True): """Copy output files from scratch area """ for key, value in file_mapping.items(): if dry_run: print ("copy %s %s" % (value, key)) else: try: outdir = os.path.dirname(key) os.makedirs(outdir) except OSError: pass print ("copy %s %s" % (value, key)) copyfile(value, key) return file_mapping
Copy output files from scratch area
entailment
def make_table(file_dict): """Build and return an `astropy.table.Table` to store `FileHandle`""" col_key = Column(name='key', dtype=int) col_path = Column(name='path', dtype='S256') col_creator = Column(name='creator', dtype=int) col_timestamp = Column(name='timestamp', dtype=int) col_status = Column(name='status', dtype=int) col_flags = Column(name='flags', dtype=int) columns = [col_key, col_path, col_creator, col_timestamp, col_status, col_flags] table = Table(data=columns) for val in file_dict.values(): val.append_to_table(table) return table
Build and return an `astropy.table.Table` to store `FileHandle`
entailment
def make_dict(cls, table): """Build and return a dict of `FileHandle` from an `astropy.table.Table` The dictionary is keyed by FileHandle.key, which is a unique integer for each file """ ret_dict = {} for row in table: file_handle = cls.create_from_row(row) ret_dict[file_handle.key] = file_handle return ret_dict
Build and return a dict of `FileHandle` from an `astropy.table.Table` The dictionary is keyed by FileHandle.key, which is a unique integer for each file
entailment
def create_from_row(cls, table_row): """Build and return a `FileHandle` from an `astropy.table.row.Row` """ kwargs = {} for key in table_row.colnames: kwargs[key] = table_row[key] try: return cls(**kwargs) except KeyError: print(kwargs)
Build and return a `FileHandle` from an `astropy.table.row.Row`
entailment
def check_status(self, basepath=None): """Check on the status of this particular file""" if basepath is None: fullpath = self.path else: fullpath = os.path.join(basepath, self.path) exists = os.path.exists(fullpath) if not exists: if self.flags & FileFlags.gz_mask != 0: fullpath += '.gz' exists = os.path.exists(fullpath) if exists: if self.status == FileStatus.superseded: pass else: self.status = FileStatus.exists else: if self.status in [FileStatus.no_file, FileStatus.expected, FileStatus.missing, FileStatus.temp_removed]: if self.flags & FileFlags.rmint_mask != 0: self.status = FileStatus.temp_removed elif self.status == FileStatus.exists: self.status = FileStatus.missing elif self.status == FileStatus.exists: self.status = FileStatus.temp_removed return self.status
Check on the status of this particular file
entailment
def append_to_table(self, table): """Add this instance as a row on a `astropy.table.Table` """ table.add_row(dict(path=self.path, key=self.key, creator=self.creator, timestamp=self.timestamp, status=self.status, flags=self.flags))
Add this instance as a row on a `astropy.table.Table`
entailment
def update_table_row(self, table, row_idx): """Update the values in an `astropy.table.Table` for this instances""" table[row_idx]['path'] = self.path table[row_idx]['key'] = self.key table[row_idx]['creator'] = self.creator table[row_idx]['timestamp'] = self.timestamp table[row_idx]['status'] = self.status table[row_idx]['flags'] = self.flags
Update the values in an `astropy.table.Table` for this instances
entailment
def _get_fullpath(self, filepath): """Return filepath with the base_path prefixed """ if filepath[0] == '/': return filepath return os.path.join(self._base_path, filepath)
Return filepath with the base_path prefixed
entailment
def _fill_cache(self): """Fill the cache from the `astropy.table.Table`""" for irow in range(len(self._table)): file_handle = self._make_file_handle(irow) self._cache[file_handle.path] = file_handle
Fill the cache from the `astropy.table.Table`
entailment
def _read_table_file(self, table_file): """Read an `astropy.table.Table` to set up the archive""" self._table_file = table_file if os.path.exists(self._table_file): self._table = Table.read(self._table_file) else: self._table = FileHandle.make_table({}) self._fill_cache()
Read an `astropy.table.Table` to set up the archive
entailment
def _make_file_handle(self, row_idx): """Build and return a `FileHandle` object from an `astropy.table.row.Row` """ row = self._table[row_idx] return FileHandle.create_from_row(row)
Build and return a `FileHandle` object from an `astropy.table.row.Row`
entailment
def get_handle(self, filepath): """Get the `FileHandle` object associated to a particular file """ localpath = self._get_localpath(filepath) return self._cache[localpath]
Get the `FileHandle` object associated to a particular file
entailment
def register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags): """Register a file in the archive. If the file already exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file flags : `FileFlags` Enumeration giving flags set on this file Returns `FileHandle` """ # check to see if the file already exists try: file_handle = self.get_handle(filepath) raise KeyError("File %s already exists in archive" % filepath) except KeyError: pass localpath = self._get_localpath(filepath) if status == FileStatus.exists: # Make sure the file really exists fullpath = self._get_fullpath(filepath) if not os.path.exists(fullpath): print("register_file called on called on mising file %s" % fullpath) status = FileStatus.missing timestamp = 0 else: timestamp = int(os.stat(fullpath).st_mtime) else: timestamp = 0 key = len(self._table) + 1 file_handle = FileHandle(path=localpath, key=key, creator=creator, timestamp=timestamp, status=status, flags=flags) file_handle.append_to_table(self._table) self._cache[localpath] = file_handle return file_handle
Register a file in the archive. If the file already exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file flags : `FileFlags` Enumeration giving flags set on this file Returns `FileHandle`
entailment
def update_file(self, filepath, creator, status): """Update a file in the archive If the file does not exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file Returns `FileHandle` """ file_handle = self.get_handle(filepath) if status in [FileStatus.exists, FileStatus.superseded]: # Make sure the file really exists fullpath = file_handle.fullpath if not os.path.exists(fullpath): raise ValueError("File %s does not exist" % fullpath) timestamp = int(os.stat(fullpath).st_mtime) else: timestamp = 0 file_handle.creator = creator file_handle.timestamp = timestamp file_handle.status = status file_handle.update_table_row(self._table, file_handle.key - 1) return file_handle
Update a file in the archive If the file does not exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file Returns `FileHandle`
entailment
def get_file_ids(self, file_list, creator=None, status=FileStatus.no_file, file_dict=None): """Get or create a list of file ids based on file names Parameters ---------- file_list : list The paths to the file creatror : int A unique key for the job that created these files status : `FileStatus` Enumeration giving current status of files file_dict : `FileDict` Mask giving flags set on this file Returns list of integers """ ret_list = [] for fname in file_list: if file_dict is None: flags = FileFlags.no_flags else: flags = file_dict.file_dict[fname] try: fhandle = self.get_handle(fname) except KeyError: if creator is None: creator = -1 # raise KeyError("Can not register a file %s without a creator"%fname) fhandle = self.register_file(fname, creator, status, flags) ret_list.append(fhandle.key) return ret_list
Get or create a list of file ids based on file names Parameters ---------- file_list : list The paths to the file creatror : int A unique key for the job that created these files status : `FileStatus` Enumeration giving current status of files file_dict : `FileDict` Mask giving flags set on this file Returns list of integers
entailment
def get_file_paths(self, id_list): """Get a list of file paths based of a set of ids Parameters ---------- id_list : list List of integer file keys Returns list of file paths """ if id_list is None: return [] try: path_array = self._table[id_list - 1]['path'] except IndexError: print("IndexError ", len(self._table), id_list) path_array = [] return [path for path in path_array]
Get a list of file paths based of a set of ids Parameters ---------- id_list : list List of integer file keys Returns list of file paths
entailment
def write_table_file(self, table_file=None): """Write the table to self._table_file""" if self._table is None: raise RuntimeError("No table to write") if table_file is not None: self._table_file = table_file if self._table_file is None: raise RuntimeError("No output file specified for table") write_tables_to_fits(self._table_file, [self._table], clobber=True, namelist=['FILE_ARCHIVE'])
Write the table to self._table_file
entailment
def update_file_status(self): """Update the status of all the files in the archive""" nfiles = len(self.cache.keys()) status_vect = np.zeros((6), int) sys.stdout.write("Updating status of %i files: " % nfiles) sys.stdout.flush() for i, key in enumerate(self.cache.keys()): if i % 200 == 0: sys.stdout.write('.') sys.stdout.flush() fhandle = self.cache[key] fhandle.check_status(self._base_path) fhandle.update_table_row(self._table, fhandle.key - 1) status_vect[fhandle.status] += 1 sys.stdout.write("!\n") sys.stdout.flush() sys.stdout.write("Summary:\n") sys.stdout.write(" no_file: %i\n" % status_vect[0]) sys.stdout.write(" expected: %i\n" % status_vect[1]) sys.stdout.write(" exists: %i\n" % status_vect[2]) sys.stdout.write(" missing: %i\n" % status_vect[3]) sys.stdout.write(" superseded: %i\n" % status_vect[4]) sys.stdout.write(" temp_removed: %i\n" % status_vect[5])
Update the status of all the files in the archive
entailment
def _make_ltcube_file_list(ltsumfile, num_files): """Make the list of input files for a particular energy bin X psf type """ outbasename = os.path.basename(ltsumfile) lt_list_file = ltsumfile.replace('fits', 'lst') outfile = open(lt_list_file, 'w') for i in range(num_files): split_key = "%06i" % i output_dir = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes', split_key) filepath = os.path.join(output_dir, outbasename.replace('.fits', '_%s.fits' % split_key)) outfile.write(filepath) outfile.write("\n") outfile.close() return '@' + lt_list_file
Make the list of input files for a particular energy bin X psf type
entailment
def register_classes(): """Register these classes with the `LinkFactory` """ Gtlink_select.register_class() Gtlink_bin.register_class() Gtlink_expcube2.register_class() Gtlink_scrmaps.register_class() Gtlink_mktime.register_class() Gtlink_ltcube.register_class() Link_FermipyCoadd.register_class() Link_FermipyGatherSrcmaps.register_class() Link_FermipyVstack.register_class() Link_FermipyHealview.register_class() Gtexpcube2_SG.register_class() Gtltsum_SG.register_class() SumRings_SG.register_class() Vstack_SG.register_class() GatherSrcmaps_SG.register_class() Healview_SG.register_class()
Register these classes with the `LinkFactory`
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) datafile = args['data'] if datafile is None or datafile == 'None': return job_configs NAME_FACTORY.update_base_dict(args['data']) for comp in components: zcut = "zmax%i" % comp.zmax mktimelist = copy.copy(comp.mktimefilters) if not mktimelist: mktimelist.append('none') evtclasslist_keys = copy.copy(comp.evtclasses) if not evtclasslist_keys: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] else: evtclasslist_vals = copy.copy(evtclasslist_keys) for mktimekey in mktimelist: for evtclassval in evtclasslist_vals: fullkey = comp.make_key( '%s_%s_{ebin_name}_%s_{evtype_name}' % (evtclassval, zcut, mktimekey)) name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktimekey, evclass=evtclassval, fullpath=True) outfile = NAME_FACTORY.bexpcube(**name_keys) cmap = NAME_FACTORY.ccube(**name_keys) infile = NAME_FACTORY.ltcube(**name_keys) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[fullkey] = dict(cmap=cmap, infile=infile, outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), hpx_order=min( comp.hpx_order, args['hpx_order_max']), evtype=comp.evtype, logfile=logfile) return job_configs
Hook to build job configurations
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} gmm = make_ring_dicts(library=args['library'], basedir='.') for galkey in gmm.galkeys(): ring_dict = gmm.ring_dict(galkey) for ring_key, ring_info in ring_dict.items(): output_file = ring_info.merged_gasmap file_string = "" for fname in ring_info.files: file_string += " %s" % fname logfile = make_nfs_path(output_file.replace('.fits', '.log')) job_configs[ring_key] = dict(output=output_file, args=file_string, logfile=logfile) return job_configs
Hook to build job configurations
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_diffuse_comp_info_dict(components=components, library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) diffuse_comp_info_dict = ret_dict['comp_info_dict'] for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()): diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') if diffuse_comp_info_value.components is None: sub_comp_info = diffuse_comp_info_value else: sub_comp_info = diffuse_comp_info_value.get_component_info(comp) name_keys = dict(zcut=zcut, sourcekey=sub_comp_info.sourcekey, ebin=comp.ebin_name, psftype=comp.evtype_name, mktime='none', coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), fullpath=True) outfile = NAME_FACTORY.srcmaps(**name_keys) outfile_tokens = os.path.splitext(outfile) infile_regexp = "%s_*.fits*" % outfile_tokens[0] full_key = "%s_%s" % (sub_comp_info.sourcekey, key) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[full_key] = dict(output=outfile, args=infile_regexp, hdu=sub_comp_info.source_name, logfile=logfile) return job_configs
Hook to build job configurations
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_catalog_comp_dict(library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) catalog_info_dict = ret_dict['catalog_info_dict'] for catalog_name in catalog_info_dict: for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, sourcekey=catalog_name, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime='none', fullpath=True) outfile = NAME_FACTORY.srcmaps(**name_keys) outfile_tokens = os.path.splitext(outfile) infile_regexp = "%s_*.fits" % outfile_tokens[0] logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[key] = dict(output=outfile, args=infile_regexp, logfile=logfile) return job_configs
Hook to build job configurations
entailment
def norm(x, mu, sigma=1.0): """ Scipy norm function """ return stats.norm(loc=mu, scale=sigma).pdf(x)
Scipy norm function
entailment
def ln_norm(x, mu, sigma=1.0): """ Natural log of scipy norm function truncated at zero """ return np.log(stats.norm(loc=mu, scale=sigma).pdf(x))
Natural log of scipy norm function truncated at zero
entailment
def lognorm(x, mu, sigma=1.0): """ Log-normal function from scipy """ return stats.lognorm(sigma, scale=mu).pdf(x)
Log-normal function from scipy
entailment
def log10norm(x, mu, sigma=1.0): """ Scale scipy lognorm from natural log to base 10 x : input parameter mu : mean of the underlying log10 gaussian sigma : variance of underlying log10 gaussian """ return stats.lognorm(sigma * np.log(10), scale=mu).pdf(x)
Scale scipy lognorm from natural log to base 10 x : input parameter mu : mean of the underlying log10 gaussian sigma : variance of underlying log10 gaussian
entailment
def lgauss(x, mu, sigma=1.0, logpdf=False): """ Log10 normal distribution... x : Parameter of interest for scanning the pdf mu : Peak of the lognormal distribution (mean of the underlying normal distribution is log10(mu) sigma : Standard deviation of the underlying normal distribution """ x = np.array(x, ndmin=1) lmu = np.log10(mu) s2 = sigma * sigma lx = np.zeros(x.shape) v = np.zeros(x.shape) lx[x > 0] = np.log10(x[x > 0]) v = 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(lx - lmu)**2 / (2 * s2)) if not logpdf: v /= (x * np.log(10.)) v[x <= 0] = -np.inf return v
Log10 normal distribution... x : Parameter of interest for scanning the pdf mu : Peak of the lognormal distribution (mean of the underlying normal distribution is log10(mu) sigma : Standard deviation of the underlying normal distribution
entailment
def create_prior_functor(d): """Build a prior from a dictionary. Parameters ---------- d : A dictionary, it must contain: d['functype'] : a recognized function type and all of the required parameters for the prior_functor of the desired type Returns ---------- A sub-class of '~fermipy.stats_utils.prior_functor' Recognized types are: 'lognorm' : Scipy lognormal distribution 'norm' : Scipy normal distribution 'gauss' : Gaussian truncated at zero 'lgauss' : Gaussian in log-space 'lgauss_like' : Gaussian in log-space, with arguments reversed. 'lgauss_logpdf' : ??? """ functype = d.get('functype', 'lgauss_like') j_ref = d.get('j_ref', 1.0) if functype == 'norm': return norm_prior(d['mu'], d['sigma'], j_ref) elif functype == 'lognorm': return lognorm_prior(d['mu'], d['sigma'], j_ref) elif functype == 'gauss': return function_prior(functype, d['mu'], d['sigma'], gauss, lngauss, j_ref) elif functype == 'lgauss': return function_prior(functype, d['mu'], d['sigma'], lgauss, lnlgauss, j_ref) elif functype in ['lgauss_like', 'lgauss_lik']: def fn(x, y, s): return lgauss(y, x, s) def lnfn(x, y, s): return lnlgauss(y, x, s) return function_prior(functype, d['mu'], d['sigma'], fn, lnfn, j_ref) elif functype == 'lgauss_log': def fn(x, y, s): return lgauss(x, y, s, logpdf=True) def lnfn(x, y, s): return lnlgauss(x, y, s, logpdf=True) return function_prior(functype, d['mu'], d['sigma'], fn, lnfn, j_ref) else: raise KeyError("Unrecognized prior_functor type %s" % functype)
Build a prior from a dictionary. Parameters ---------- d : A dictionary, it must contain: d['functype'] : a recognized function type and all of the required parameters for the prior_functor of the desired type Returns ---------- A sub-class of '~fermipy.stats_utils.prior_functor' Recognized types are: 'lognorm' : Scipy lognormal distribution 'norm' : Scipy normal distribution 'gauss' : Gaussian truncated at zero 'lgauss' : Gaussian in log-space 'lgauss_like' : Gaussian in log-space, with arguments reversed. 'lgauss_logpdf' : ???
entailment
def marginalization_bins(self): """Binning to use to do the marginalization integrals """ log_mean = np.log10(self.mean()) # Default is to marginalize over two decades, # centered on mean, using 1000 bins return np.logspace(-1. + log_mean, 1. + log_mean, 1001)/self._j_ref
Binning to use to do the marginalization integrals
entailment
def profile_bins(self): """ The binning to use to do the profile fitting """ log_mean = np.log10(self.mean()) log_half_width = max(5. * self.sigma(), 3.) # Default is to profile over +-5 sigma, # centered on mean, using 100 bins return np.logspace(log_mean - log_half_width, log_mean + log_half_width, 101)/self._j_ref
The binning to use to do the profile fitting
entailment
def normalization(self): """ The normalization i.e., the intergral of the function over the normalization_range """ norm_r = self.normalization_range() return quad(self, norm_r[0]*self._j_ref, norm_r[1]*self._j_ref)[0]
The normalization i.e., the intergral of the function over the normalization_range
entailment
def init_return(self, ret_type): """Specify the return type. Note that this will also construct the '~fermipy.castro.Interpolator' object for the requested return type. """ if self._ret_type == ret_type: return if ret_type == "straight": self._interp = self._lnlfn.interp if ret_type == "profile": self._profile_loglike_spline(self._lnlfn.interp.x) #self._profile_loglike(self._lnlfn.interp.x) self._interp = self._prof_interp elif ret_type == "marginal": self._marginal_loglike(self._lnlfn.interp.x) self._interp = self._marg_interp elif ret_type == "posterior": self._posterior(self._lnlfn.interp.x) self._interp = self._post_interp else: raise ValueError("Did not recognize return type %s" % ret_type) self._ret_type = ret_type
Specify the return type. Note that this will also construct the '~fermipy.castro.Interpolator' object for the requested return type.
entailment
def clear_cached_values(self): """Removes all of the cached values and interpolators """ self._prof_interp = None self._prof_y = None self._prof_z = None self._marg_interp = None self._marg_z = None self._post = None self._post_interp = None self._interp = None self._ret_type = None
Removes all of the cached values and interpolators
entailment
def like(self, x, y): """Evaluate the 2-D likelihood in the x/y parameter space. The dimension of the two input arrays should be the same. Parameters ---------- x : array_like Array of coordinates in the `x` parameter. y : array_like Array of coordinates in the `y` nuisance parameter. """ # This is the negative log-likelihood z = self._lnlfn.interp(x * y) return np.exp(-z) * self._nuis_pdf(y) / self._nuis_norm
Evaluate the 2-D likelihood in the x/y parameter space. The dimension of the two input arrays should be the same. Parameters ---------- x : array_like Array of coordinates in the `x` parameter. y : array_like Array of coordinates in the `y` nuisance parameter.
entailment
def loglike(self, x, y): """Evaluate the 2-D log-likelihood in the x/y parameter space. The dimension of the two input arrays should be the same. Parameters ---------- x : array_like Array of coordinates in the `x` parameter. y : array_like Array of coordinates in the `y` nuisance parameter. """ nuis = self._nuis_pdf(y) log_nuis = np.where( nuis > 0., np.log(nuis), -1e2) vals = -self._lnlfn.interp(x * y ) + \ log_nuis - \ self._nuis_log_norm return vals
Evaluate the 2-D log-likelihood in the x/y parameter space. The dimension of the two input arrays should be the same. Parameters ---------- x : array_like Array of coordinates in the `x` parameter. y : array_like Array of coordinates in the `y` nuisance parameter.
entailment
def profile_loglike(self, x): """Profile log-likelihood. Returns ``L_prof(x,y=y_min|z')`` : where y_min is the value of y that minimizes L for a given x. This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._prof_interp is None: # This calculates values and caches the spline return self._profile_loglike(x)[1] x = np.array(x, ndmin=1) return self._prof_interp(x)
Profile log-likelihood. Returns ``L_prof(x,y=y_min|z')`` : where y_min is the value of y that minimizes L for a given x. This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
entailment
def marginal_loglike(self, x): """Marginal log-likelihood. Returns ``L_marg(x) = \int L(x,y|z') L(y) dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._marg_interp is None: # This calculates values and caches the spline return self._marginal_loglike(x) x = np.array(x, ndmin=1) return self._marg_interp(x)
Marginal log-likelihood. Returns ``L_marg(x) = \int L(x,y|z') L(y) dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
entailment
def posterior(self, x): """Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._post is None: return self._posterior(x) x = np.array(x, ndmin=1) return self._post_interp(x)
Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
entailment
def _profile_loglike(self, x): """Internal function to calculate and cache the profile likelihood """ x = np.array(x, ndmin=1) z = [] y = [] for xtmp in x: def fn(t): return -self.loglike(xtmp, t) ytmp = opt.fmin(fn, 1.0, disp=False)[0] ztmp = self.loglike(xtmp, ytmp) z.append(ztmp) y.append(ytmp) self._prof_y = np.array(y) self._prof_z = np.array(z) self._prof_z = self._prof_z.max() - self._prof_z self._prof_interp = castro.Interpolator(x, self._prof_z) return self._prof_y, self._prof_z
Internal function to calculate and cache the profile likelihood
entailment
def _profile_loglike_spline(self, x): """Internal function to calculate and cache the profile likelihood """ z = [] y = [] yv = self._nuis_pdf.profile_bins() nuis_vals = self._nuis_pdf.log_value(yv) - self._nuis_log_norm for xtmp in x: zv = -1. * self._lnlfn.interp(xtmp * yv) + nuis_vals sp = splrep(yv, zv, k=2, s=0) def rf(t): return splev(t, sp, der=1) ix = np.argmax(splev(yv, sp)) imin, imax = max(0, ix - 3), min(len(yv) - 1, ix + 3) try: y0 = opt.brentq(rf, yv[imin], yv[imax], xtol=1e-10) except: y0 = yv[ix] z0 = self.loglike(xtmp, y0) z.append(z0) y.append(y0) self._prof_y = np.array(y) self._prof_z = np.array(z) self._prof_z = self._prof_z.max() - self._prof_z self._prof_interp = castro.Interpolator(x, self._prof_z) return self._prof_y, self._prof_z
Internal function to calculate and cache the profile likelihood
entailment
def _marginal_loglike(self, x): """Internal function to calculate and cache the marginal likelihood """ yedge = self._nuis_pdf.marginalization_bins() yw = yedge[1:] - yedge[:-1] yc = 0.5 * (yedge[1:] + yedge[:-1]) s = self.like(x[:, np.newaxis], yc[np.newaxis, :]) # This does the marginalization integral z = 1. * np.sum(s * yw, axis=1) self._marg_z = np.zeros(z.shape) msk = z > 0 self._marg_z[msk] = -1 * np.log(z[msk]) # Extrapolate to unphysical values # FIXME, why is this needed dlogzdx = (np.log(z[msk][-1]) - np.log(z[msk][-2]) ) / (x[msk][-1] - x[msk][-2]) self._marg_z[~msk] = self._marg_z[msk][-1] + \ (self._marg_z[~msk] - self._marg_z[msk][-1]) * dlogzdx self._marg_interp = castro.Interpolator(x, self._marg_z) return self._marg_z
Internal function to calculate and cache the marginal likelihood
entailment
def _posterior(self, x): """Internal function to calculate and cache the posterior """ yedge = self._nuis_pdf.marginalization_bins() yc = 0.5 * (yedge[1:] + yedge[:-1]) yw = yedge[1:] - yedge[:-1] like_array = self.like(x[:, np.newaxis], yc[np.newaxis, :]) * yw like_array /= like_array.sum() self._post = like_array.sum(1) self._post_interp = castro.Interpolator(x, self._post) return self._post
Internal function to calculate and cache the posterior
entailment
def _compute_mle(self): """Maximum likelihood estimator. """ xmax = self._lnlfn.interp.xmax x0 = max(self._lnlfn.mle(), xmax * 1e-5) ret = opt.fmin(lambda x: np.where( xmax > x > 0, -self(x), np.inf), x0, disp=False) mle = float(ret[0]) return mle
Maximum likelihood estimator.
entailment
def build_from_energy_dict(cls, ebin_name, input_dict): """ Build a list of components from a dictionary for a single energy range """ psf_types = input_dict.pop('psf_types') output_list = [] for psf_type, val_dict in sorted(psf_types.items()): fulldict = input_dict.copy() fulldict.update(val_dict) fulldict['evtype_name'] = psf_type fulldict['ebin_name'] = ebin_name component = cls(**fulldict) output_list += [component] return output_list
Build a list of components from a dictionary for a single energy range
entailment
def build_from_yamlstr(cls, yamlstr): """ Build a list of components from a yaml string """ top_dict = yaml.safe_load(yamlstr) coordsys = top_dict.pop('coordsys') output_list = [] for e_key, e_dict in sorted(top_dict.items()): if e_key == 'coordsys': continue e_dict = top_dict[e_key] e_dict['coordsys'] = coordsys output_list += cls.build_from_energy_dict(e_key, e_dict) return output_list
Build a list of components from a yaml string
entailment
def _match_cubes(ccube_clean, ccube_dirty, bexpcube_clean, bexpcube_dirty, hpx_order): """ Match the HEALPIX scheme and order of all the input cubes return a dictionary of cubes with the same HEALPIX scheme and order """ if hpx_order == ccube_clean.hpx.order: ccube_clean_at_order = ccube_clean else: ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True) if hpx_order == ccube_dirty.hpx.order: ccube_dirty_at_order = ccube_dirty else: ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True) if hpx_order == bexpcube_clean.hpx.order: bexpcube_clean_at_order = bexpcube_clean else: bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True) if hpx_order == bexpcube_dirty.hpx.order: bexpcube_dirty_at_order = bexpcube_dirty else: bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True) if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest: ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme() if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest: bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme() if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest: bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme() ret_dict = dict(ccube_clean=ccube_clean_at_order, ccube_dirty=ccube_dirty_at_order, bexpcube_clean=bexpcube_clean_at_order, bexpcube_dirty=bexpcube_dirty_at_order) return ret_dict
Match the HEALPIX scheme and order of all the input cubes return a dictionary of cubes with the same HEALPIX scheme and order
entailment
def _compute_intensity(ccube, bexpcube): """ Compute the intensity map """ bexp_data = np.sqrt(bexpcube.data[0:-1, 0:] * bexpcube.data[1:, 0:]) intensity_data = ccube.data / bexp_data intensity_map = HpxMap(intensity_data, ccube.hpx) return intensity_map
Compute the intensity map
entailment