code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
profile_path = os.path.join(basedir, "profile_%s.yaml" % profile_name) profile_config = load_yaml(profile_path) src_name = profile_config['name'] profile_dict = profile_config['source_model'] return profile_name, src_name, profile_dict
def build_profile_dict(basedir, profile_name)
Get the name and source dictionary for the test source. Parameters ---------- basedir : str Path to the analysis directory profile_name : str Key for the spatial from of the target Returns ------- profile_name : str Name of for this particular profile src_name : str Name of the source for this particular profile profile_dict : dict Dictionary with the source parameters
3.044314
2.874646
1.059022
if DEFAULT_JOB_TYPE == 'slac': from fermipy.jobs.slac_impl import get_slac_default_args return get_slac_default_args(job_time) elif DEFAULT_JOB_TYPE == 'native': from fermipy.jobs.native_impl import get_native_default_args return get_native_default_args() return None
def get_batch_job_args(job_time=1500)
Get the correct set of batch jobs arguments. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion. Returns ------- job_args : dict Dictionary of arguments used to submit a batch job
2.993553
3.465114
0.863912
batch_job_args = get_batch_job_args(job_time) if DEFAULT_JOB_TYPE == 'slac': from fermipy.jobs.slac_impl import SlacInterface return SlacInterface(**batch_job_args) elif DEFAULT_JOB_TYPE == 'native': from fermipy.jobs.native_impl import NativeInterface return NativeInterface(**batch_job_args) return None
def get_batch_job_interface(job_time=1500)
Create a batch job interface object. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion. Returns ------- job_interfact : `SysInterface` Object that manages interactions with batch farm
2.987807
3.669775
0.814166
import sys import argparse # Argument defintion usage = "usage: %(prog)s [options]" description = "Collect all the new source" parser = argparse.ArgumentParser(usage, description=__abstract__) parser.add_argument("-i", "--input", type=argparse.FileType('r'), required=True, help="Input file") parser.add_argument("-e", "--extension", type=str, default="SKYMAP", help="FITS HDU with HEALPix map") parser.add_argument("--ebin", type=str, default=None, help="Energy bin, integer or 'ALL'") parser.add_argument("--zscale", type=str, default='log', help="Scaling for color scale") parser.add_argument("--zmin", type=float, default=None, help="Minimum z-axis value") parser.add_argument("--zmax", type=float, default=None, help="Maximum z-axis value") parser.add_argument("--cbar", action='store_true', default=False, help="draw color bar") parser.add_argument("-o", "--output", type=argparse.FileType('w'), help="Output file. Leave blank for interactive.") # Parse the command line args = parser.parse_args(sys.argv[1:]) hpxmap = Map.read(args.input.name, hdu=args.extension) outdata = [] if args.zscale == 'sqrt': the_norm = PowerNorm(gamma=0.5) elif args.zscale == 'log': the_norm= LogNorm() elif args.zscale == 'lin': the_norm = Normalize() else: the_norm = Normalize() fig, ax, im = hpxmap.plot(norm=the_norm, vmin=args.zmin, vmax=args.zmax) outdata.append(fig) if args.cbar: cbar = plt.colorbar(im, orientation='horizontal',shrink=0.7,pad=0.15, fraction=0.05) if args.output is None: plt.show() else: if len(outdata) == 1: plt.savefig(args.output.name) else: base, ext = os.path.splitext(args.output.name) for i, fig in enumerate(outdata): fig.savefig("%s_%02i%s" % (base, i, ext))
def main()
if args.ebin == "ALL": wcsproj = hpxmap.geom.make_wcs( naxis=2, proj='MOL', energies=None, oversample=2) mapping = HpxToWcsMapping(hpxmap.hpx, wcsproj) for i, data in enumerate(hpxmap.counts): ip = ImagePlotter(data=data, proj=hpxmap.hpx, mapping=mapping) fig = plt.figure(i) im, ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax) outdata.append(fig) elif args.ebin is None: ip = ImagePlotter(data=hpxmap.counts, proj=hpxmap.hpx) im, ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax) outdata.append((im, ax)) else: try: ibin = int(args.ebin) ip = ImagePlotter(data=hpxmap.counts[ibin], proj=hpxmap.hpx) im, ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax) outdata.append((im, ax)) except: raise ValueError("--ebin argument must be an integer or 'ALL'")
2.602846
2.308156
1.127673
CopyBaseROI.register_class() CopyBaseROI_SG.register_class() SimulateROI.register_class() SimulateROI_SG.register_class() RandomDirGen.register_class() RandomDirGen_SG.register_class()
def register_classes()
Register these classes with the `LinkFactory`
4.99781
5.197032
0.961666
for pattern in copyfiles: glob_path = os.path.join(orig_dir, pattern) files = glob.glob(glob_path) for ff in files: f = os.path.basename(ff) orig_path = os.path.join(orig_dir, f) dest_path = os.path.join(dest_dir, f) try: copyfile(orig_path, dest_path) except IOError: sys.stderr.write("WARNING: failed to copy %s\n" % orig_path)
def copy_analysis_files(cls, orig_dir, dest_dir, copyfiles)
Copy a list of files from orig_dir to dest_dir
1.903398
1.924474
0.989049
try: os.makedirs(dest_dir) except OSError: pass copyfiles = ['%s.fits' % roi_baseline, '%s.npy' % roi_baseline, '%s_*.xml' % roi_baseline] + cls.copyfiles if isinstance(extracopy, list): copyfiles += extracopy cls.copy_analysis_files(orig_dir, dest_dir, copyfiles)
def copy_target_dir(cls, orig_dir, dest_dir, roi_baseline, extracopy)
Create and populate directoris for target analysis
3.188967
3.308626
0.963834
args = self._parser.parse_args(argv) name_keys = dict(target_type=args.ttype, target_name=args.target, sim_name=args.sim, fullpath=True) orig_dir = NAME_FACTORY.targetdir(**name_keys) dest_dir = NAME_FACTORY.sim_targetdir(**name_keys) self.copy_target_dir(orig_dir, dest_dir, args.roi_baseline, args.extracopy)
def run_analysis(self, argv)
Run this analysis
6.321289
6.328536
0.998855
job_configs = {} ttype = args['ttype'] (sim_targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args) targets = load_yaml(sim_targets_yaml) base_config = dict(ttype=ttype, roi_baseline=args['roi_baseline'], extracopy = args['extracopy'], sim=sim) for target_name in targets.keys(): targetdir = NAME_FACTORY.sim_targetdir(target_type=ttype, target_name=target_name, sim_name=sim) logfile = os.path.join(targetdir, 'copy_base_dir.log') job_config = base_config.copy() job_config.update(dict(target=target_name, logfile=logfile)) job_configs[target_name] = job_config return job_configs
def build_job_configs(self, args)
Hook to build job configurations
4.278306
4.268262
1.002353
binning = config['binning'] binsz = binning['binsz'] coordsys = binning.get('coordsys', 'GAL') roiwidth = binning['roiwidth'] proj = binning.get('proj', 'AIT') ra = config['selection']['ra'] dec = config['selection']['dec'] npix = int(np.round(roiwidth / binsz)) skydir = SkyCoord(ra * u.deg, dec * u.deg) wcsgeom = WcsGeom.create(npix=npix, binsz=binsz, proj=proj, coordsys=coordsys, skydir=skydir) return wcsgeom
def _make_wcsgeom_from_config(config)
Build a `WCS.Geom` object from a fermipy coniguration file
2.728108
2.67766
1.01884
step_x = rand_config['step_x'] step_y = rand_config['step_y'] max_x = rand_config['max_x'] max_y = rand_config['max_y'] seed = rand_config['seed'] nsims = rand_config['nsims'] cdelt = wcsgeom.wcs.wcs.cdelt pixstep_x = step_x / cdelt[0] pixstep_y = -1. * step_y / cdelt[1] pixmax_x = max_x / cdelt[0] pixmax_y = max_y / cdelt[0] nstep_x = int(np.ceil(2. * pixmax_x / pixstep_x)) + 1 nstep_y = int(np.ceil(2. * pixmax_y / pixstep_y)) + 1 center = np.array(wcsgeom._center_pix) grid = np.meshgrid(np.linspace(-1 * pixmax_x, pixmax_x, nstep_x), np.linspace(-1 * pixmax_y, pixmax_y, nstep_y)) grid[0] += center[0] grid[1] += center[1] test_grid = wcsgeom.pix_to_coord(grid) glat_vals = test_grid[0].flat glon_vals = test_grid[1].flat conv_vals = SkyCoord(glat_vals * u.deg, glon_vals * u.deg, frame=Galactic).transform_to(ICRS) ra_vals = conv_vals.ra.deg[seed:nsims] dec_vals = conv_vals.dec.deg[seed:nsims] o_dict = {} for i, (ra, dec) in enumerate(zip(ra_vals, dec_vals)): key = i + seed o_dict[key] = dict(ra=ra, dec=dec) return o_dict
def _build_skydir_dict(wcsgeom, rand_config)
Build a dictionary of random directions
2.149304
2.112373
1.017483
args = self._parser.parse_args(argv) if is_null(args.config): raise ValueError("Config yaml file must be specified") if is_null(args.rand_config): raise ValueError( "Random direction config yaml file must be specified") config = load_yaml(args.config) rand_config = load_yaml(args.rand_config) wcsgeom = self._make_wcsgeom_from_config(config) dir_dict = self._build_skydir_dict(wcsgeom, rand_config) if is_not_null(args.outfile): write_yaml(dir_dict, args.outfile)
def run_analysis(self, argv)
Run this analysis
3.752805
3.740568
1.003272
workdir = os.path.dirname(config_path) new_config_path = config_path.replace('.yaml', '_%06i.yaml' % seed) config = load_yaml(config_path) comps = config.get('components', [config]) for i, comp in enumerate(comps): comp_name = "%02i" % i if 'gtlike' not in comp: comp['gtlike'] = {} orig_srcmap = os.path.abspath(os.path.join(workdir, 'srcmap_%s.fits' % (comp_name))) new_srcmap = os.path.abspath(os.path.join(workdir, 'srcmap_%06i_%s.fits' % (seed, comp_name))) comp['gtlike']['srcmap'] = os.path.abspath(os.path.join(workdir, 'srcmap_%06i_%s.fits' % (seed, comp_name))) comp['gtlike']['use_external_srcmap'] = True copyfile(orig_srcmap, new_srcmap) write_yaml(config, new_config_path) return new_config_path
def _clone_config_and_srcmaps(config_path, seed)
Clone the configuration
2.200399
2.20903
0.996093
gta.load_roi('sim_baseline_%06i.npy' % current_seed) gta.set_random_seed(seed) gta.simulate_roi() if injected_name: gta.zero_source(injected_name) gta.optimize() gta.find_sources(sqrt_ts_threshold=5.0, search_skydir=gta.roi.skydir, search_minmax_radius=[1.0, np.nan]) gta.optimize() gta.free_sources(skydir=gta.roi.skydir, distance=1.0, pars='norm') gta.fit(covar=True) gta.write_roi('sim_refit_%06i' % current_seed) for test_source in test_sources: test_source_name = test_source['name'] sedfile = "sed_%s_%06i.fits" % (test_source_name, seed) correl_dict, test_src_name = add_source_get_correlated(gta, test_source_name, test_source['source_model'], correl_thresh=0.25, non_null_src=non_null_src) # Write the list of correlated sources correl_yaml = os.path.join(gta.workdir, "correl_%s_%06i.yaml" % (test_source_name, seed)) write_yaml(correl_dict, correl_yaml) gta.free_sources(False) for src_name in correl_dict.keys(): gta.free_source(src_name, pars='norm') gta.sed(test_source_name, prefix=pkey, outfile=sedfile) # Set things back to how they were gta.delete_source(test_source_name) gta.load_xml('sim_refit_%06i' % current_seed)
def _run_simulation(gta, roi_baseline, injected_name, test_sources, current_seed, seed, non_null_src)
Simulate a realization of this analysis
4.171017
4.195541
0.994155
args = self._parser.parse_args(argv) if not HAVE_ST: raise RuntimeError( "Trying to run fermipy analysis, but don't have ST") workdir = os.path.dirname(args.config) _config_file = self._clone_config_and_srcmaps(args.config, args.seed) gta = GTAnalysis(_config_file, logging={'verbosity': 3}, fileio={'workdir_regex': '\.xml$|\.npy$'}) gta.load_roi(args.roi_baseline) simfile = os.path.join(workdir, 'sim_%s_%s.yaml' % (args.sim, args.sim_profile)) mcube_file = "%s_%s_%06i" % (args.sim, args.sim_profile, args.seed) sim_config = utils.load_yaml(simfile) injected_source = sim_config.get('injected_source', None) if injected_source is not None: src_dict = injected_source['source_model'] src_dict['ra'] = gta.config['selection']['ra'] src_dict['dec'] = gta.config['selection']['dec'] injected_name = injected_source['name'] gta.add_source(injected_name, src_dict) gta.write_model_map(mcube_file) mc_spec_dict = dict(true_counts=gta.model_counts_spectrum(injected_name), energies=gta.energies, model=src_dict) mcspec_file = os.path.join(workdir, "mcspec_%s_%06i.yaml" % (mcube_file, args.seed)) utils.write_yaml(mc_spec_dict, mcspec_file) else: injected_name = None gta.write_roi('sim_baseline_%06i' % args.seed) test_sources = [] for profile in args.profiles: profile_path = os.path.join(workdir, 'profile_%s.yaml' % profile) test_source = load_yaml(profile_path) test_sources.append(test_source) first = args.seed last = first + args.nsims for seed in range(first, last): self._run_simulation(gta, args.roi_baseline, injected_name, test_sources, first, seed, non_null_src=args.non_null_src)
def run_analysis(self, argv)
Run this analysis
4.035884
4.036464
0.999856
job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args) if targets_yaml is None: return job_configs config_yaml = 'config.yaml' config_override = args.get('config') if is_not_null(config_override): config_yaml = config_override rand_yaml = NAME_FACTORY.resolve_randconfig(args) targets = load_yaml(targets_yaml) base_config = dict(rand_config=rand_yaml) for target_name in targets.keys(): name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, fullpath=True) simdir = NAME_FACTORY.sim_targetdir(**name_keys) config_path = os.path.join(simdir, config_yaml) outfile = os.path.join(simdir, 'skydirs.yaml') logfile = make_nfs_path(outfile.replace('yaml', 'log')) job_config = base_config.copy() job_config.update(dict(config=config_path, outfile=outfile, logfile=logfile)) job_configs[target_name] = job_config return job_configs
def build_job_configs(self, args)
Hook to build job configurations
3.756018
3.742922
1.003499
job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args) if targets_yaml is None: return job_configs config_yaml = 'config.yaml' config_override = args.get('config') if is_not_null(config_override): config_yaml = config_override targets = load_yaml(targets_yaml) nsims_job = args['nsims_job'] first_seed = args['seed'] nsims = args['nsims'] last_seed = first_seed + nsims base_config = dict(sim_profile=args['sim_profile'], roi_baseline=args['roi_baseline'], non_null_src=args['non_null_src'], sim=sim) for target_name, target_list in targets.items(): name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, fullpath=True) simdir = NAME_FACTORY.sim_targetdir(**name_keys) config_path = os.path.join(simdir, config_yaml) job_config = base_config.copy() job_config.update(dict(config=config_path, profiles=target_list)) current_seed = first_seed while current_seed < last_seed: fullkey = "%s_%06i" % (target_name, current_seed) logfile = make_nfs_path(os.path.join(simdir, "%s_%s_%06i.log" % (self.linkname, target_name, current_seed))) if nsims_job <= 0 or current_seed + nsims_job >= last_seed: nsims_current = last_seed - current_seed else: nsims_current = nsims_job job_config.update(dict(seed=current_seed, nsims=nsims_current, logfile=logfile)) job_configs[fullkey] = job_config.copy() current_seed += nsims_current return job_configs
def build_job_configs(self, args)
Hook to build job configurations
3.120001
3.123372
0.998921
ignore = ['pow', 'log10', 'sqrt', 'max'] branches = [] for k, v in aliases.items(): tokens = re.sub('[\(\)\+\*\/\,\=\<\>\&\!\-\|]', ' ', v).split() for t in tokens: if bool(re.search(r'^\d', t)) or len(t) <= 3: continue if bool(re.search(r'[a-zA-Z]', t)) and t not in ignore: branches += [t] return list(set(branches))
def get_branches(aliases)
Get unique branch names from an alias dictionary.
3.976238
3.7925
1.048448
if re.search('.root?', txt) is not None: c = ROOT.TChain(chain.GetName()) c.SetDirectory(0) c.Add(txt) friend_chains.append(c) chain.AddFriend(c, rand_str()) return files = np.loadtxt(txt, unpack=True, dtype='str') if files.ndim == 0: files = np.array([files]) if nfiles is not None: files = files[:nfiles] print("Loading %i files..." % len(files)) c = ROOT.TChain(chain.GetName()) c.SetDirectory(0) for f in files: c.Add(f) friend_chains.append(c) chain.AddFriend(c, rand_str()) return
def load_friend_chains(chain, friend_chains, txt, nfiles=None)
Load a list of trees from a file and add them as friends to the chain.
2.626627
2.574609
1.020204
from fermipy import utils ebins = None if 'ENERGIES' in hdulist: hdu = hdulist['ENERGIES'] ectr = hdu.data.field(hdu.columns[0].name) ebins = np.exp(utils.center_to_edge(np.log(ectr))) elif 'EBOUNDS' in hdulist: hdu = hdulist['EBOUNDS'] emin = hdu.data.field('E_MIN') / 1E3 emax = hdu.data.field('E_MAX') / 1E3 ebins = np.append(emin, emax[-1]) return ebins
def find_and_read_ebins(hdulist)
Reads and returns the energy bin edges. This works for both the CASE where the energies are in the ENERGIES HDU and the case where they are in the EBOUND HDU
2.408362
2.411111
0.99886
nebins = len(hdu.data) ebin_edges = np.ndarray((nebins + 1)) try: ebin_edges[0:-1] = np.log10(hdu.data.field("E_MIN")) - 3. ebin_edges[-1] = np.log10(hdu.data.field("E_MAX")[-1]) - 3. except KeyError: ebin_edges[0:-1] = np.log10(hdu.data.field("energy_MIN")) ebin_edges[-1] = np.log10(hdu.data.field("energy_MAX")[-1]) return ebin_edges
def read_energy_bounds(hdu)
Reads and returns the energy bin edges from a FITs HDU
2.071126
2.022384
1.024101
ebins = read_energy_bounds(hdu) fluxes = np.ndarray((len(ebins))) try: fluxes[0:-1] = hdu.data.field("E_MIN_FL") fluxes[-1] = hdu.data.field("E_MAX_FL")[-1] npreds = hdu.data.field("NPRED") except: fluxes = np.ones((len(ebins))) npreds = np.ones((len(ebins))) return ebins, fluxes, npreds
def read_spectral_data(hdu)
Reads and returns the energy bin edges, fluxes and npreds from a FITs HDU
2.929882
2.494006
1.174769
cols = [fits.Column("Energy", "D", unit='MeV', array=energy_vals)] hdu = fits.BinTableHDU.from_columns(cols, name=extname) return hdu
def make_energies_hdu(energy_vals, extname="ENERGIES")
Builds and returns a FITs HDU with the energy values extname : The HDU extension name
2.598012
3.251746
0.798959
f = fits.open(fitsfile) nhdu = len(f) # Try and get the energy bounds try: ebins = find_and_read_ebins(f) except: ebins = None if extname is None: # If there is an image in the Primary HDU we can return a WCS-based # projection if f[0].header['NAXIS'] != 0: proj = WCS(f[0].header) return proj, f, f[0] else: if f[extname].header['XTENSION'] == 'IMAGE': proj = WCS(f[extname].header) return proj, f, f[extname] elif extname in ['SKYMAP', 'SKYMAP2']: proj = HPX.create_from_hdu(f[extname], ebins) return proj, f, f[extname] elif f[extname].header['XTENSION'] == 'BINTABLE': try: if f[extname].header['PIXTYPE'] == 'HEALPIX': proj = HPX.create_from_hdu(f[extname], ebins) return proj, f, f[extname] except: pass return None, f, None # Loop on HDU and look for either an image or a table with HEALPix data for i in range(1, nhdu): # if there is an image we can return a WCS-based projection if f[i].header['XTENSION'] == 'IMAGE': proj = WCS(f[i].header) return proj, f, f[i] elif f[i].header['XTENSION'] == 'BINTABLE': if f[i].name in ['SKYMAP', 'SKYMAP2']: proj = HPX.create_from_hdu(f[i], ebins) return proj, f, f[i] try: if f[i].header['PIXTYPE'] == 'HEALPIX': proj = HPX.create_from_hdu(f[i], ebins) return proj, f, f[i] except: pass return None, f, None
def read_projection_from_fits(fitsfile, extname=None)
Load a WCS or HPX projection.
2.204365
2.12091
1.039348
outhdulist = [fits.PrimaryHDU()] rmlist = [] for i, table in enumerate(tablelist): ft_name = "%s._%i" % (filepath, i) rmlist.append(ft_name) try: os.unlink(ft_name) except: pass table.write(ft_name, format="fits") ft_in = fits.open(ft_name) if namelist: ft_in[1].name = namelist[i] if cardslist: for k, v in cardslist[i].items(): ft_in[1].header[k] = v ft_in[1].update() outhdulist += [ft_in[1]] if hdu_list is not None: for h in hdu_list: outhdulist.append(h) fits.HDUList(outhdulist).writeto(filepath, overwrite=clobber) for rm in rmlist: os.unlink(rm)
def write_tables_to_fits(filepath, tablelist, clobber=False, namelist=None, cardslist=None, hdu_list=None)
Write some astropy.table.Table objects to a single fits file
2.163943
2.189069
0.988522
options_str = [] for i, (k, v) in enumerate(sorted(options_dict.items())): option_str = '' if i == 0: option_str += '%s : %s\n' % (k, v[2].__name__) else: option_str += ' ' * 8 + '%s : %s\n' % (k, v[2].__name__) option_doc = v[1] option_doc += ' (default : %s)' % v[0] option_doc = textwrap.wrap(option_doc, 72 - 12) option_str += ' ' * 12 + ('\n' + ' ' * 12).join(option_doc) options_str += [option_str] options_str = '\n\n'.join(options_str) return docstring.format(options=options_str)
def update_docstring(docstring, options_dict)
Update a method docstring by inserting option docstrings defined in the options dictionary. The input docstring should define `{options}` at the location where the options docstring block should be inserted. Parameters ---------- docstring : str Existing method docstring. options_dict : dict Dictionary defining the options that will be appended to the method docstring. Dictionary keys are mapped to option names and each element of the dictionary should have the format (default value, docstring, type). Returns ------- docstring : str Updated method docstring.
2.240639
2.236698
1.001762
islice = slice(imin, imax) o = np.zeros(m[islice, ...].shape) ix = int(cpix[0]) iy = int(cpix[1]) # Loop over energy for i in range(m[islice, ...].shape[0]): ks = k[islice, ...][i, ...] ms = m[islice, ...][i, ...] mx = ks[ix, :] > ks[ix, iy] * threshold my = ks[:, iy] > ks[ix, iy] * threshold nx = int(max(3, np.round(np.sum(mx) / 2.))) ny = int(max(3, np.round(np.sum(my) / 2.))) # Ensure that there is an odd number of pixels in the kernel # array if ix + nx + 1 >= ms.shape[0] or ix - nx < 0: nx -= 1 ny -= 1 sx = slice(ix - nx, ix + nx + 1) sy = slice(iy - ny, iy + ny + 1) ks = ks[sx, sy] # origin = [0, 0] # if ks.shape[0] % 2 == 0: origin[0] += 1 # if ks.shape[1] % 2 == 0: origin[1] += 1 # o[i,...] = ndimage.convolve(ms, ks, mode='constant', # origin=origin, cval=0.0) o[i, ...] = scipy.signal.fftconvolve(ms, ks, mode='same') if wmap is not None: o[i, ...] *= wmap[islice, ...][i, ...] return o
def convolve_map(m, k, cpix, threshold=0.001, imin=0, imax=None, wmap=None)
Perform an energy-dependent convolution on a sequence of 2-D spatial maps. Parameters ---------- m : `~numpy.ndarray` 3-D map containing a sequence of 2-D spatial maps. First dimension should be energy. k : `~numpy.ndarray` 3-D map containing a sequence of convolution kernels (PSF) for each slice in m. This map should have the same dimension as m. cpix : list Indices of kernel reference pixel in the two spatial dimensions. threshold : float Kernel amplitude imin : int Minimum index in energy dimension. imax : int Maximum index in energy dimension. wmap : `~numpy.ndarray` 3-D map containing a sequence of 2-D spatial maps of weights. First dimension should be energy. This map should have the same dimension as m.
2.457592
2.389348
1.028562
islice = slice(imin, imax) o = np.zeros(m.data.shape) nside = m.geom.nside nest = m.geom.nest # Loop over energy for i, ms in enumerate(m.data[islice, ...]): sigma = sigmas[islice][i] # Need to be in RING scheme if nest: ms = hp.pixelfunc.reorder(ms, n2r=True) o[islice, ...][i] = hp.sphtfunc.smoothing(ms, sigma=sigma) if nest: o[islice, ...][i] = hp.pixelfunc.reorder( o[islice, ...][i], r2n=True) if wmap is not None: o[islice, ...][i] *= wmap.data[islice, ...][i] return HpxNDMap(m.geom, o)
def convolve_map_hpx_gauss(m, sigmas, imin=0, imax=None, wmap=None)
Perform an energy-dependent convolution on a sequence of 2-D spatial maps. Parameters ---------- m : `HpxMap` 2-D map containing a sequence of 1-D HEALPix maps. First dimension should be energy. sigmas : `~numpy.ndarray` 1-D map containing a sequence gaussian widths for smoothing imin : int Minimum index in energy dimension. imax : int Maximum index in energy dimension. wmap : `~numpy.ndarray` 2-D map containing a sequence of 1-D HEALPix maps of weights. First dimension should be energy. This map should have the same dimension as m.
2.889936
2.938101
0.983607
sm = [] zs = 0 for c in gta.components: z = c.model_counts_map(name).data.astype('float') if kernel is not None: shape = (z.shape[0],) + kernel.shape z = np.apply_over_axes(np.sum, z, axes=[1, 2]) * np.ones( shape) * kernel[np.newaxis, :, :] zs += np.sum(z) else: zs += np.sum(z) sm.append(z) sm2 = 0 for i, m in enumerate(sm): sm[i] /= zs sm2 += np.sum(sm[i] ** 2) for i, m in enumerate(sm): sm[i] /= sm2 return sm
def get_source_kernel(gta, name, kernel=None)
Get the PDF for the given source.
3.271206
3.233515
1.011656
timer = Timer.create(start=True) self.logger.info('Generating residual maps') schema = ConfigSchema(self.defaults['residmap']) config = schema.create_config(self.config['residmap'], **kwargs) # Defining default properties of test source model config['model'].setdefault('Index', 2.0) config['model'].setdefault('SpectrumType', 'PowerLaw') config['model'].setdefault('SpatialModel', 'PointSource') config['model'].setdefault('Prefactor', 1E-13) o = self._make_residual_map(prefix, **config) if config['make_plots']: plotter = plotting.AnalysisPlotter(self.config['plotting'], fileio=self.config['fileio'], logging=self.config['logging']) plotter.make_residmap_plots(o, self.roi) self.logger.info('Finished residual maps') outfile = utils.format_filename(self.workdir, 'residmap', prefix=[o['name']]) if config['write_fits']: o['file'] = os.path.basename(outfile) + '.fits' self._make_residmap_fits(o, outfile + '.fits') if config['write_npy']: np.save(outfile + '.npy', o) self.logger.info('Execution time: %.2f s', timer.elapsed_time) return o
def residmap(self, prefix='', **kwargs)
Generate 2-D spatial residual maps using the current ROI model and the convolution kernel defined with the `model` argument. Parameters ---------- prefix : str String that will be prefixed to the output residual map files. {options} Returns ------- maps : dict A dictionary containing the `~fermipy.utils.Map` objects for the residual significance and amplitude.
4.726853
4.637395
1.019291
if appname in LinkFactory._class_dict: return LinkFactory._class_dict[appname].create(**kwargs) else: raise KeyError( "Could not create object associated to app %s" % appname)
def create(appname, **kwargs)
Create a `Link` of a particular class, using the kwargs as options
5.646931
4.785776
1.179941
comp_file = args.get('comp', None) datafile = args.get('data', None) if is_null(comp_file): return if is_null(datafile): return NAME_FACTORY.update_base_dict(datafile) outdir = args.get('outdir', None) outkey = args.get('outkey', None) ft1file = args['ft1file'] if is_null(outdir) or is_null(outkey): return pfiles = os.path.join(outdir, outkey) self.comp_dict = yaml.safe_load(open(comp_file)) coordsys = self.comp_dict.pop('coordsys') full_out_dir = make_nfs_path(os.path.join(outdir, outkey)) for key_e, comp_e in sorted(self.comp_dict.items()): emin = math.pow(10., comp_e['log_emin']) emax = math.pow(10., comp_e['log_emax']) enumbins = comp_e['enumbins'] zmax = comp_e['zmax'] zcut = "zmax%i" % comp_e['zmax'] evclassstr = NAME_FACTORY.base_dict['evclass'] kwargs_select = dict(zcut=zcut, ebin=key_e, psftype='ALL', coordsys=coordsys, mktime='none') selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select)) linkname = 'select-energy-%s-%s' % (key_e, zcut) self._set_link(linkname, Gtlink_select, infile=ft1file, outfile=selectfile_energy, zmax=zmax, emin=emin, emax=emax, evclass=NAME_FACTORY.evclassmask(evclassstr), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname)) if 'evtclasses' in comp_e: evtclasslist_vals = comp_e['evtclasses'] else: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] for evtclassval in evtclasslist_vals: for psf_type, psf_dict in sorted(comp_e['psf_types'].items()): linkname_select = 'select-type-%s-%s-%s-%s' % ( key_e, zcut, evtclassval, psf_type) linkname_bin = 'bin-%s-%s-%s-%s' % (key_e, zcut, evtclassval, psf_type) hpx_order = psf_dict['hpx_order'] kwargs_bin = kwargs_select.copy() kwargs_bin['psftype'] = psf_type selectfile_psf = make_full_path( outdir, outkey, NAME_FACTORY.select(**kwargs_bin)) binfile = make_full_path(outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin)) self._set_link(linkname_select, Gtlink_select, infile=selectfile_energy, outfile=selectfile_psf, zmax=zmax, emin=emin, emax=emax, evtype=EVT_TYPE_DICT[psf_type], evclass=NAME_FACTORY.evclassmask(evtclassval), pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_select)) self._set_link(linkname_bin, Gtlink_bin, coordsys=coordsys, hpx_order=hpx_order, evfile=selectfile_psf, outfile=binfile, emin=emin, emax=emax, enumbins=enumbins, pfiles=pfiles, logfile=os.path.join(full_out_dir, "%s.log" % linkname_bin))
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
2.785696
2.73806
1.017398
data = args.get('data') comp = args.get('comp') ft1file = args.get('ft1file') scratch = args.get('scratch', None) dry_run = args.get('dry_run', None) self._set_link('split-and-bin', SplitAndBin_SG, comp=comp, data=data, hpx_order_max=args.get('hpx_order_ccube', 9), ft1file=ft1file, scratch=scratch, dry_run=dry_run) self._set_link('coadd-split', CoaddSplit_SG, comp=comp, data=data, ft1file=ft1file) self._set_link('expcube2', Gtexpcube2_SG, comp=comp, data=data, hpx_order_max=args.get('hpx_order_expcube', 5), dry_run=dry_run)
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
4.044626
3.979566
1.016348
for k, v in aDict.items(): if v is None: aDict[k] = 'none'
def _replace_none(self, aDict)
Replace all None values in a dict with 'none'
2.684974
2.081424
1.28997
dsval = kwargs.get('dataset', self.dataset(**kwargs)) tokens = dsval.split('_') irf_name = "%s_%s_%s" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])], EVCLASS_NAME_DICTIONARY[tokens[3]], kwargs.get('irf_ver')) return irf_name
def irfs(self, **kwargs)
Get the name of IFRs associted with a particular dataset
6.592806
6.014093
1.096226
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.dataset_format.format(**kwargs_copy) except KeyError: return None
def dataset(self, **kwargs)
Return a key that specifies the data selection
5.83026
5.452371
1.069307
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.component_format.format(**kwargs_copy) except KeyError: return None
def component(self, **kwargs)
Return a key that specifies data the sub-selection
6.21059
6.074118
1.022468
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.sourcekey_format.format(**kwargs_copy) except KeyError: return None
def sourcekey(self, **kwargs)
Return a key that specifies the name and version of a source or component
5.680584
5.583528
1.017383
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.galprop_ringkey_format.format(**kwargs_copy) except KeyError: return None
def galprop_ringkey(self, **kwargs)
return the sourcekey for galprop input maps : specifies the component and ring
5.410459
5.295857
1.02164
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.galprop_sourcekey_format.format(**kwargs_copy) except KeyError: return None
def galprop_sourcekey(self, **kwargs)
return the sourcekey for merged galprop maps : specifies the merged component and merging scheme
5.273317
5.309855
0.993119
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.merged_sourcekey_format.format(**kwargs_copy) except KeyError: return None
def merged_sourcekey(self, **kwargs)
return the sourcekey for merged sets of point sources : specifies the catalog and merging rule
5.319647
5.487777
0.969363
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def galprop_gasmap(self, **kwargs)
return the file name for Galprop input gasmaps
5.438329
5.165104
1.052898
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.merged_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def merged_gasmap(self, **kwargs)
return the file name for Galprop merged gasmaps
5.56413
5.225203
1.064864
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.diffuse_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def diffuse_template(self, **kwargs)
return the file name for other diffuse map templates
5.58671
5.334046
1.047368
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = NameFactory.spectral_template_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def spectral_template(self, **kwargs)
return the file name for spectral templates
5.428024
4.853129
1.118459
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def srcmdl_xml(self, **kwargs)
return the file name for source model xml files
5.015269
4.749602
1.055935
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.nested_srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def nested_srcmdl_xml(self, **kwargs)
return the file name for source model xml files of nested sources
5.12751
4.868155
1.053276
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft1file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def ft1file(self, **kwargs)
return the name of the input ft1 file list
4.99646
4.904219
1.018809
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['data_time'] = kwargs.get( 'data_time', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft2file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def ft2file(self, **kwargs)
return the name of the input ft2 file list
5.649732
5.558366
1.016437
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) localpath = NameFactory.ltcube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def ltcube(self, **kwargs)
return the name of a livetime cube file
5.217741
4.778446
1.091932
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.select_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def select(self, **kwargs)
return the name of a selected events ft1file
4.629521
4.539134
1.019913
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.mktime_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def mktime(self, **kwargs)
return the name of a selected events ft1file
4.643551
4.502839
1.03125
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.ccube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def ccube(self, **kwargs)
return the name of a counts cube file
4.166788
3.936048
1.058622
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.bexpcube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def bexpcube(self, **kwargs)
return the name of a binned exposure cube file
4.479274
4.284448
1.045473
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.srcmaps_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def srcmaps(self, **kwargs)
return the name of a source map file
4.433913
4.416672
1.003904
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.mcube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def mcube(self, **kwargs)
return the name of a model cube file
4.437266
4.305578
1.030585
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ltcubesun_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def ltcube_sun(self, **kwargs)
return the name of a livetime cube file
6.012344
5.574208
1.078601
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ltcubemoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def ltcube_moon(self, **kwargs)
return the name of a livetime cube file
6.091589
5.725101
1.064014
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.bexpcubesun_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def bexpcube_sun(self, **kwargs)
return the name of a binned exposure cube file
4.884633
4.645716
1.051427
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.bexpcubemoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def bexpcube_moon(self, **kwargs)
return the name of a binned exposure cube file
5.045954
4.878186
1.034391
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.angprofile_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def angprofile(self, **kwargs)
return the file name for sun or moon angular profiles
5.778082
5.373333
1.075326
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def template_sunmoon(self, **kwargs)
return the file name for sun or moon template files
4.807616
4.735126
1.015309
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.residual_cr_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def residual_cr(self, **kwargs)
Return the name of the residual CR analysis output files
4.449659
4.090839
1.087713
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_rings_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def galprop_rings_yaml(self, **kwargs)
return the name of a galprop rings merging yaml file
5.171557
5.029419
1.028261
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.catalog_split_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def catalog_split_yaml(self, **kwargs)
return the name of a catalog split yaml file
5.189786
4.87978
1.063529
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.model_yaml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def model_yaml(self, **kwargs)
return the name of a model yaml file
5.518291
5.156744
1.070111
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.merged_srcmaps_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def merged_srcmaps(self, **kwargs)
return the name of a source map file
4.501946
4.446039
1.012575
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.master_srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def master_srcmdl_xml(self, **kwargs)
return the name of a source model file
5.091492
5.12796
0.992888
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.comp_srcmdl_xml_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def comp_srcmdl_xml(self, **kwargs)
return the name of a source model file
4.257224
4.194346
1.014991
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) return NameFactory.fullpath_format.format(**kwargs_copy)
def fullpath(self, **kwargs)
Return a full path name for a given file
6.481748
6.190708
1.047012
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) return input_string.format(**kwargs_copy)
def generic(self, input_string, **kwargs)
return a generic filename for a given dataset and component
3.833001
3.298757
1.161953
out_dict = dict(ft1file=self.ft1file(**kwargs), ltcube=self.ltcube(**kwargs), ccube=self.ccube(**kwargs), bexpcube=self.bexpcube(**kwargs), srcmaps=self.srcmaps(**kwargs), mcube=self.mcube(**kwargs)) return out_dict
def make_filenames(self, **kwargs)
Make a dictionary of filenames for various types
4.153132
4.191762
0.990784
data = args.get('data') comp = args.get('comp') library = args.get('library') dry_run = args.get('dry_run', False) self._set_link('sum-rings', SumRings_SG, library=library, outdir=args['outdir'], dry_run=dry_run) self._set_link('srcmaps-diffuse', SrcmapsDiffuse_SG, comp=comp, data=data, library=library, make_xml=args['make_xml'], dry_run=dry_run) self._set_link('vstack-diffuse', Vstack_SG, comp=comp, data=data, library=library, dry_run=dry_run)
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
4.146354
3.976834
1.042627
data = args.get('data') comp = args.get('comp') library = args.get('library') dry_run = args.get('dry_run', False) self._set_link('srcmaps-catalog', SrcmapsCatalog_SG, comp=comp, data=data, library=library, nsrc=args.get('nsrc', 500), dry_run=dry_run) self._set_link('gather-srcmaps', GatherSrcmaps_SG, comp=comp, data=data, library=library, dry_run=dry_run) self._set_link('merge-srcmaps', MergeSrcmaps_SG, comp=comp, data=data, library=library, dry_run=dry_run)
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
3.333812
3.259785
1.022709
config_yaml = args['config'] config_dict = load_yaml(config_yaml) dry_run = args.get('dry_run', False) data = config_dict.get('data') comp = config_dict.get('comp') library = config_dict.get('library') models = config_dict.get('models') scratch = config_dict.get('scratch') self._set_link('prepare', SplitAndBinChain, comp=comp, data=data, ft1file=config_dict.get('ft1file'), hpx_order_ccube=config_dict.get('hpx_order_ccube'), hpx_order_expcube=config_dict.get('hpx_order_expcube'), scratch=scratch, dry_run=dry_run) self._set_link('diffuse-comp', DiffuseCompChain, comp=comp, data=data, library=library, make_xml=config_dict.get('make_diffuse_comp_xml', False), outdir=config_dict.get('merged_gasmap_dir', 'merged_gasmap'), dry_run=dry_run) self._set_link('catalog-comp', CatalogCompChain, comp=comp, data=data, library=library, make_xml=config_dict.get('make_catalog_comp_xml', False), nsrc=config_dict.get('catalog_nsrc', 500), dry_run=dry_run) self._set_link('assemble-model', AssembleModelChain, comp=comp, data=data, library=library, models=models, hpx_order=config_dict.get('hpx_order_fitting'), dry_run=dry_run)
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
3.064277
3.05764
1.002171
if fn is None: fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type)) if spectrum_type == 'PiecewisePowerLaw': build_piecewise_powerlaw(fn, spectral_pars) for k, v in spectral_pars.items(): v.setdefault('scale', 1.0) v.setdefault('min', v['value'] * 1E-3) v.setdefault('max', v['value'] * 1E3) par = fn.getParam(str(k)) vmin = min(float(v['value']), float(v['min'])) vmax = max(float(v['value']), float(v['max'])) par.setValue(float(v['value'])) par.setBounds(vmin, vmax) par.setScale(float(v['scale'])) if 'free' in v and int(v['free']) != 0: par.setFree(True) else: par.setFree(False) fn.setParam(par) return fn
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None)
Create a Function object from a parameter dictionary. Parameters ---------- spectrum_type : str String identifying the spectrum type (e.g. PowerLaw). spectral_pars : dict Dictionary of spectral parameters.
3.191875
3.170016
1.006895
parameters = pyLike.ParameterVector() spectrum.getParams(parameters) d = dict(spectrum_type=spectrum.genericName()) for p in parameters: pname = p.getName() pval = p.getTrueValue() perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan d[pname] = np.array([pval, perr]) if d['spectrum_type'] == 'FileFunction': ff = pyLike.FileFunction_cast(spectrum) d['file'] = ff.filename() return d
def gtlike_spectrum_to_dict(spectrum)
Convert a pyLikelihood object to a python dictionary which can be easily saved to a file.
5.139487
5.065882
1.01453
parameters = pyLike.ParameterVector() spectrum.getParams(parameters) npar = max(parameters.size(), 10) o = {'param_names': np.zeros(npar, dtype='S32'), 'param_values': np.empty(npar, dtype=float) * np.nan, 'param_errors': np.empty(npar, dtype=float) * np.nan, } for i, p in enumerate(parameters): o['param_names'][i] = p.getName() o['param_values'][i] = p.getTrueValue() perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan o['param_errors'][i] = perr return o
def gtlike_spectrum_to_vectors(spectrum)
Convert a pyLikelihood object to a python dictionary which can be easily saved to a file.
2.981294
2.837317
1.050744
pars = [] par_names = pyLike.StringVector() fn.getParamNames(par_names) for pname in par_names: par = fn.getParam(pname) bounds = par.getBounds() perr = par.error() if par.isFree() else np.nan pars += [dict(name=pname, value=par.getValue(), error=perr, min=bounds[0], max=bounds[1], free=par.isFree(), scale=par.getScale())] return pars
def get_function_pars(fn)
Extract the parameters of a pyLikelihood function object (value, scale, bounds). Parameters ---------- fn : pyLikelihood.Function Returns ------- pars : list
3.319301
3.377923
0.982646
npar = len(like.params()) vals = np.ones(npar) errs = np.ones(npar) has_prior = np.array([False] * npar) for i, p in enumerate(like.params()): prior = like[i].log_prior() if prior is None: continue par_names = pyLike.StringVector() prior.getParamNames(par_names) if not 'Mean' in par_names: raise Exception('Failed to find Mean in prior parameters.') if not 'Sigma' in par_names: raise Exception('Failed to find Sigma in prior parameters.') for t in par_names: if t == 'Mean': vals[i] = prior.parameter(t).getValue() if t == 'Sigma': errs[i] = prior.parameter(t).getValue() has_prior[i] = True return vals, errs, has_prior
def get_priors(like)
Extract priors from a likelihood object.
2.907206
2.841166
1.023244
fnmap = src.getSrcFuncs() keys = fnmap.keys() if 'Position' in keys: ppars = get_function_pars(src.getSrcFuncs()[str('Position')]) elif 'SpatialDist' in keys: ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')]) else: raise Exception('Failed to extract spatial parameters.') fn = src.getSrcFuncs()[str('Spectrum')] spars = get_function_pars(fn) for i, p in enumerate(ppars): ppars[i]['is_norm'] = False for i, p in enumerate(spars): if fn.normPar().getName() == p['name']: spars[i]['is_norm'] = True else: spars[i]['is_norm'] = False return spars, ppars
def get_source_pars(src)
Extract the parameters associated with a pyLikelihood Source object.
3.393913
3.211129
1.056922
nF = 0 pars = self.params() for par in pars: if par.isFree(): nF += 1 return nF
def nFreeParams(self)
Count the number of free parameters in the active model.
3.343074
3.015517
1.108624
saved_state = LikelihoodState(self) if verbosity > 0: print("*** Start Ts_dl ***") source_attributes = self.getExtraSourceAttributes() self.logLike.syncParams() src = self.logLike.getSource(srcName) self._ts_src = src freeParams = pyLike.DoubleVector() self.logLike.getFreeParamValues(freeParams) logLike1 = self.logLike.value() self.scaleSource(srcName, 1E-10) logLike0 = self.logLike.value() if tol is None: tol = self.tol if reoptimize: if verbosity > 0: print("** Do reoptimize") optFactory = pyLike.OptimizerFactory_instance() myOpt = optFactory.create(self.optimizer, self.logLike) Niter = 1 while Niter <= MaxIterations: try: myOpt.find_min(0, tol) break except RuntimeError as e: print(e) if verbosity > 0: print("** Iteration :", Niter) Niter += 1 else: if approx: try: self._renorm() except ZeroDivisionError: pass self.logLike.syncParams() logLike0 = max(self.logLike.value(), logLike0) Ts_value = 2 * (logLike1 - logLike0) self.scaleSource(srcName, 1E10) self.logLike.setFreeParamValues(freeParams) self.model = SourceModel(self.logLike) for src in source_attributes: self.model[src].__dict__.update(source_attributes[src]) saved_state.restore() self.logLike.value() return Ts_value
def Ts2(self, srcName, reoptimize=False, approx=True, tol=None, MaxIterations=10, verbosity=0)
Computes the TS value for a source indicated by "srcName." If "reoptimize=True" is selected this function will reoptimize the model up to "MaxIterations" given the tolerance "tol" (default is the tolerance selected for the overall fit). If "appox=True" is selected (the default) it will renormalize the model (see _renorm).
4.009394
3.96013
1.01244
logfile = job_config.get('logfile', "%s_%s_%s.log" % (cls.default_prefix_logfile, linkname, key)) job_config['logfile'] = logfile
def _make_scatter_logfile_name(cls, key, linkname, job_config)
Hook to inster the name of a logfile into the input config
4.577169
3.998481
1.144727
linkname = kwargs.setdefault('linkname', cls.clientclass.linkname_default) # Don't use setdefault b/c we don't want to build a JobArchive # Unless it is needed job_archive = kwargs.get('job_archive', None) if job_archive is None: job_archive = JobArchive.build_temp_job_archive() kwargs.setdefault('job_archive', job_archive) kwargs_client = dict(linkname=linkname, link_prefix=kwargs.get('link_prefix', ''), file_stage=kwargs.get('file_stage', None), job_archive=job_archive) link = cls.clientclass.create(**kwargs_client) sg = cls(link, **kwargs) return sg
def create(cls, **kwargs)
Build and return a `ScatterGather` object
4.899405
4.768774
1.027393
self.files.file_dict.clear() self.sub_files.file_dict.clear() self.files.latch_file_info(self.args) self._scatter_link._update_sub_file_dict(self.sub_files)
def _latch_file_info(self)
Internal function to update the dictionaries keeping track of input and output files
7.666488
6.252975
1.226054
status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): # if job_details.status == JobStatus.failed: # failed = True # continue # elif job_details.status == JobStatus.done: # continue if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
def _check_link_completion(self, link, fail_pending=False, fail_running=False)
Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
3.027832
2.704639
1.119496
if self.args['dry_run']: status = JobStatus.unknown else: status = JobStatus.not_ready base_config = self.scatter_link.args for jobkey, job_config in sorted(self._job_configs.items()): full_job_config = base_config.copy() full_job_config.update(job_config) ScatterGather._make_scatter_logfile_name(jobkey, self.linkname, full_job_config) logfile = job_config.get('logfile') self._scatter_link._register_job(key=jobkey, job_config=full_job_config, logfile=logfile, status=status)
def _build_job_dict(self)
Build a dictionary of `JobDetails` objects for the internal `Link`
5.328337
5.134775
1.037696
if resubmit_failed: self.args['action'] = 'resubmit' argv = self._make_argv() if dry_run: argv.append('--dry_run') self._invoke(argv, stream, resubmit_failed=resubmit_failed)
def _run_link(self, stream=sys.stdout, dry_run=False, stage_files=True, resubmit_failed=False)
Internal function that actually runs this link. This checks if input and output files are present. If input files are missing this will raise `OSError` if dry_run is False If all output files are present this will skip execution. Parameters ----------- stream : `file` Stream that this `Link` will print to, must have 'write' function. dry_run : bool Print command but do not run it. stage_files : bool Stage files to and from the scratch area. resubmit_failed : bool Resubmit failed jobs.
3.653907
4.546087
0.803748
args = self._run_argparser(argv) if args.action not in ACTIONS: sys.stderr.write( "Unrecognized action %s, options are %s\n" % (args.action, ACTIONS)) if args.action == 'skip': return JobStatus.no_job elif args.action in ['run', 'resubmit', 'check_status', 'config']: self._job_configs = self.build_job_configs(args.__dict__) self._interface._dry_run = args.dry_run if args.action == 'run': status_vect = self.run_jobs(stream, resubmit_failed=resubmit_failed) elif args.action == 'resubmit': status_vect = self.resubmit(stream, resubmit_failed=resubmit_failed) elif args.action == 'check_status': self._build_job_dict() status_vect = self.check_status(stream) elif args.action == 'config': self._build_job_dict() status_vect = JobStatusVector() status_vect[JobStatus.done] += 1 return status_vect
def _invoke(self, argv, stream=sys.stdout, resubmit_failed=False)
Invoke this object to preform a particular action Parameters ---------- argv : list List of command line arguments, passed to helper classes stream : `file` Stream that this function will print to, must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
3.162926
2.92626
1.080876
self.args = extract_arguments(override_args, self.args) self._job_configs = self.build_job_configs(self.args) if not self._scatter_link.jobs: self._build_job_dict() self._latch_file_info()
def update_args(self, override_args)
Update the arguments used to invoke the application Note that this will also update the dictionary of input and output files Parameters ---------- override_args : dict dictionary of arguments to override the current values
9.182618
10.194831
0.900713
if recursive: self._scatter_link.clear_jobs(recursive) self.jobs.clear()
def clear_jobs(self, recursive=True)
Clear the self.jobs dictionary that contains information about jobs associated with this `ScatterGather` If recursive is True this will include jobs from all internal `Link`
11.184148
9.5645
1.16934
if recursive: ret_dict = self.jobs.copy() ret_dict.update(self._scatter_link.get_jobs(recursive)) return ret_dict return self.jobs
def get_jobs(self, recursive=True)
Return a dictionary with all the jobs If recursive is True this will include jobs from all internal `Link`
5.430427
5.18699
1.046932
running = True first = True if not check_once: if stream != sys.stdout: sys.stdout.write('Checking status (%is): ' % self.args['job_check_sleep']) sys.stdout.flush() status_vect = JobStatusVector() while running: if first: first = False elif self.args['dry_run']: break elif no_wait: pass else: stream.write("Sleeping %.0f seconds between status checks\n" % self.args['job_check_sleep']) if stream != sys.stdout: sys.stdout.write('.') sys.stdout.flush() time.sleep(self.args['job_check_sleep']) status_vect = self._check_link_completion(self._scatter_link, fail_pending, fail_running) if self.args['check_status_once'] or check_once or no_wait: if do_print: self.print_update(stream, status_vect) break if self.args['print_update']: if do_print: self.print_update(stream, status_vect) if self._job_archive is not None: self._job_archive.write_table_file() n_total = status_vect.n_total n_done = status_vect.n_done n_failed = status_vect.n_failed if n_done + n_failed == n_total: running = False status = status_vect.get_status() if status in [JobStatus.failed, JobStatus.partial_failed]: if do_print: self.print_update(stream, status_vect) self.print_failed(stream) if write_status: self._write_status_to_log(status, stream) else: if write_status: self._write_status_to_log(0, stream) self._set_status_self(status=status) if not check_once: if stream != sys.stdout: sys.stdout.write("! %s\n" % (JOB_STATUS_STRINGS[status])) if self._job_archive is not None: self._job_archive.write_table_file() return status_vect
def check_status(self, stream=sys.stdout, check_once=False, fail_pending=False, fail_running=False, no_wait=False, do_print=True, write_status=False)
Loop to check on the status of all the jobs in job dict. Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. check_once : bool Check status once and exit loop. fail_pending : `bool` If True, consider pending jobs as failed fail_running : `bool` If True, consider running jobs as failed no_wait : bool Do not sleep before checking jobs. do_print : bool Print summary stats. write_status : bool Write the status the to log file. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
2.901278
2.835777
1.023098
self._build_job_dict() self._interface._dry_run = self.args['dry_run'] scatter_status = self._interface.submit_jobs(self.scatter_link, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=True) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) return status_vect
def run_jobs(self, stream=sys.stdout, resubmit_failed=False)
Function to dipatch jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
4.76973
4.660319
1.023477
self._build_job_dict() status_vect = self.check_status(stream, check_once=True, fail_pending=True, fail_running=fail_running) status = status_vect.get_status() if status == JobStatus.done: return status failed_jobs = self._scatter_link.get_failed_jobs(True, True) if failed_jobs: scatter_status = self._interface.submit_jobs(self._scatter_link, failed_jobs, job_archive=self._job_archive, stream=stream) if scatter_status == JobStatus.failed: return JobStatus.failed status_vect = self.check_status(stream, write_status=True) status = status_vect.get_status() if status == JobStatus.partial_failed: if resubmit_failed: sys.stdout.write("Resubmitting partially failed link %s\n" % self.full_linkname) status_vect = self.resubmit(stream=stream, fail_running=False, resubmit_failed=False) else: sys.stdout.write("NOT resubmitting partially failed link %s\n" % self.full_linkname) if self.args['dry_run']: return JobStatus.unknown return status_vect
def resubmit(self, stream=sys.stdout, fail_running=False, resubmit_failed=False)
Function to resubmit failed jobs and collect results Parameters ----------- stream : `file` Stream that this function will print to, Must have 'write' function. fail_running : `bool` If True, consider running jobs as failed resubmit_failed : bool Resubmit failed jobs. Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
3.536144
3.559484
0.993443
self._interface.clean_jobs(self.scatter_link, clean_all=recursive)
def clean_jobs(self, recursive=False)
Clean up all the jobs associated with this object. If recursive is True this also clean jobs dispatch by this object.
28.784554
28.237968
1.019356
Link.print_summary(self, stream, indent, recurse_level) if recurse_level > 0: recurse_level -= 1 indent += " " stream.write("\n") self._scatter_link.print_summary(stream, indent, recurse_level)
def print_summary(self, stream=sys.stdout, indent="", recurse_level=2)
Print a summary of the activity done by this `Link`. Parameters ---------- stream : `file` Stream to print to indent : str Indentation at start of line recurse_level : int Number of recursion levels to print
3.39113
3.460793
0.979871
if job_stats is None: job_stats = JobStatusVector() job_det_list = [] job_det_list += self._scatter_link.jobs.values() for job_dets in job_det_list: if job_dets.status == JobStatus.no_job: continue job_stats[job_dets.status] += 1 stream.write("Status :\n Total : %i\n Unknown: %i\n" % (job_stats.n_total, job_stats[JobStatus.unknown])) stream.write(" Not Ready: %i\n Ready: %i\n" % (job_stats[JobStatus.not_ready], job_stats[JobStatus.ready])) stream.write(" Pending: %i\n Running: %i\n" % (job_stats[JobStatus.pending], job_stats[JobStatus.running])) stream.write(" Done: %i\n Failed: %i\n" % (job_stats[JobStatus.done], job_stats[JobStatus.failed]))
def print_update(self, stream=sys.stdout, job_stats=None)
Print an update about the current number of jobs running
2.506565
2.480028
1.0107
for job_key, job_details in sorted(self.scatter_link.jobs.items()): if job_details.status == JobStatus.failed: stream.write("Failed job %s\n log = %s\n" % (job_key, job_details.logfile))
def print_failed(self, stream=sys.stderr)
Print list of the failed jobs
5.89813
5.21576
1.130829