code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
for key, val in file_dict.items(): if key in self.file_dict: self.file_dict[key] |= val else: self.file_dict[key] = val
def update(self, file_dict)
Update self with values from a dictionary mapping file path [str] to `FileFlags` enum
2.287871
2.221599
1.029831
ret_list = [] for key, val in self.file_dict.items(): # For input files we only want files that were marked as input if val & FileFlags.input_mask: ret_list.append(key) return ret_list
def input_files(self)
Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
5.542544
6.060819
0.914488
ret_list = [] for key, val in self.file_dict.items(): # For output files we only want files that were marked as output if val & FileFlags.output_mask: ret_list.append(key) return ret_list
def output_files(self)
Return a list of the output files produced by this link. For `Link` sub-classes this will return the union of all the output files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
5.725613
6.277117
0.912141
ret_list = [] for key, val in self.file_dict.items(): # For chain input files we only want files that were not marked as output # (I.e., not produced by some other step in the chain) if val & FileFlags.in_ch_mask == FileFlags.input_mask: ret_list.append(key) return ret_list
def chain_input_files(self)
Return a list of the input files needed by this chain. For `Link` sub-classes this will return only those files that were not created by any internal `Link`
7.219718
7.013705
1.029373
ret_list = [] for key, val in self.file_dict.items(): # For pure input files we only want output files that were not # marked as internal or temp if val & FileFlags.out_ch_mask == FileFlags.output_mask: ret_list.append(key) return ret_list
def chain_output_files(self)
Return a list of the all the output files produced by this link. For `Link` sub-classes this will return only those files that were not marked as internal files or marked for removal.
8.585858
7.283727
1.178773
ret_list = [] for key, val in self.file_dict.items(): # For input files we only want files that were marked as input if val & FileFlags.in_stage_mask == FileFlags.in_stage_mask: ret_list.append(key) return ret_list
def input_files_to_stage(self)
Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
5.13884
5.647217
0.909977
ret_list = [] for key, val in self.file_dict.items(): # For input files we only want files that were marked as input if val & FileFlags.out_stage_mask == FileFlags.out_stage_mask: ret_list.append(key) return ret_list
def output_files_to_stage(self)
Return a list of the input files needed by this link. For `Link` sub-classes this will return the union of all the input files of each internal `Link`. That is to say this will include files produced by one `Link` in a `Chain` and used as input to another `Link` in the `Chain`
6.060903
6.544929
0.926046
ret_list = [] for key, val in self.file_dict.items(): # For internal files we only want files that were marked as # internal if val & FileFlags.internal_mask: ret_list.append(key) return ret_list
def internal_files(self)
Return a list of the intermediate files produced by this link. This returns all files that were explicitly marked as internal files.
5.284928
4.484901
1.178382
ret_list = [] for key, val in self.file_dict.items(): # For temp files we only want files that were marked for removal if val & FileFlags.rm_mask: ret_list.append(key) return ret_list
def temp_files(self)
Return a list of the temporary files produced by this link. This returns all files that were explicitly marked for removal.
6.284399
4.864232
1.291961
ret_list = [] for key, val in self.file_dict.items(): # For temp files we only want files that were marked for removal if val & FileFlags.gz_mask: ret_list.append(key) return ret_list
def gzip_files(self)
Return a list of the files compressed by this link. This returns all files that were explicitly marked for compression.
7.888177
6.687105
1.17961
stream.write("%sTotal files : %i\n" % (indent, len(self.file_dict))) stream.write("%s Input files : %i\n" % (indent, len(self.input_files))) stream.write("%s Output files : %i\n" % (indent, len(self.output_files))) stream.write("%s Internal files : %i\n" % (indent, len(self.internal_files))) stream.write("%s Temp files : %i\n" % (indent, len(self.temp_files)))
def print_summary(self, stream=sys.stdout, indent="")
Print a summary of the files in this file dict. This version explictly counts the union of all input and output files.
1.79424
1.640523
1.0937
stream.write("%sTotal files : %i\n" % (indent, len(self.file_dict))) stream.write("%s Input files : %i\n" % (indent, len(self.chain_input_files))) stream.write("%s Output files : %i\n" % (indent, len(self.chain_output_files))) stream.write("%s Internal files : %i\n" % (indent, len(self.internal_files))) stream.write("%s Temp files : %i\n" % (indent, len(self.temp_files)))
def print_chain_summary(self, stream=sys.stdout, indent="")
Print a summary of the files in this file dict. This version uses chain_input_files and chain_output_files to count the input and output files.
1.907935
1.650689
1.155842
abspath = os.path.abspath(local_file) if abspath.find(self.workdir) >= 0: relpath = abspath.replace(self.workdir, '')[1:] basename = os.path.basename(relpath) dirname = os.path.dirname(relpath) else: basename = os.path.basename(local_file) dirname = '' return (dirname, basename)
def split_local_path(self, local_file)
Split the local path into a directory name and a file name If local_file is in self.workdir or a subdirectory of it, the directory will consist of the relative path from workdir. If local_file is not in self.workdir, directory will be empty. Returns (dirname, basename)
2.175374
1.997328
1.089142
return os.path.join(self.scratchdir, dirname, basename)
def construct_scratch_path(self, dirname, basename)
Construct and return a path in the scratch area. This will be <self.scratchdir>/<dirname>/<basename>
3.531302
3.41404
1.034347
(local_dirname, local_basename) = self.split_local_path(local_file) return self.construct_scratch_path(local_dirname, local_basename)
def get_scratch_path(self, local_file)
Construct and return a path in the scratch area from a local file.
3.621287
3.151309
1.149137
ret_dict = {} for local_file in local_files: ret_dict[local_file] = self.get_scratch_path(local_file) return ret_dict
def map_files(self, local_files)
Build a dictionary mapping local paths to scratch paths. Parameters ---------- local_files : list List of filenames to be mapped to scratch area Returns dict Mapping local_file : fullpath of scratch file
3.25471
2.62166
1.241469
scratch_dirs = {} for value in file_mapping.values(): scratch_dirname = os.path.dirname(value) scratch_dirs[scratch_dirname] = True for scratch_dirname in scratch_dirs: if dry_run: print("mkdir -f %s" % (scratch_dirname)) else: try: os.makedirs(scratch_dirname) except OSError: pass
def make_scratch_dirs(file_mapping, dry_run=True)
Make any directories need in the scratch area
2.548171
2.516438
1.01261
for key, value in file_mapping.items(): if not os.path.exists(key): continue if dry_run: print ("copy %s %s" % (key, value)) else: print ("copy %s %s" % (key, value)) copyfile(key, value) return file_mapping
def copy_to_scratch(file_mapping, dry_run=True)
Copy input files to scratch area
2.354026
2.325827
1.012125
for key, value in file_mapping.items(): if dry_run: print ("copy %s %s" % (value, key)) else: try: outdir = os.path.dirname(key) os.makedirs(outdir) except OSError: pass print ("copy %s %s" % (value, key)) copyfile(value, key) return file_mapping
def copy_from_scratch(file_mapping, dry_run=True)
Copy output files from scratch area
2.346585
2.318753
1.012003
col_key = Column(name='key', dtype=int) col_path = Column(name='path', dtype='S256') col_creator = Column(name='creator', dtype=int) col_timestamp = Column(name='timestamp', dtype=int) col_status = Column(name='status', dtype=int) col_flags = Column(name='flags', dtype=int) columns = [col_key, col_path, col_creator, col_timestamp, col_status, col_flags] table = Table(data=columns) for val in file_dict.values(): val.append_to_table(table) return table
def make_table(file_dict)
Build and return an `astropy.table.Table` to store `FileHandle`
2.508519
2.257251
1.111316
ret_dict = {} for row in table: file_handle = cls.create_from_row(row) ret_dict[file_handle.key] = file_handle return ret_dict
def make_dict(cls, table)
Build and return a dict of `FileHandle` from an `astropy.table.Table` The dictionary is keyed by FileHandle.key, which is a unique integer for each file
4.84357
3.015046
1.606467
kwargs = {} for key in table_row.colnames: kwargs[key] = table_row[key] try: return cls(**kwargs) except KeyError: print(kwargs)
def create_from_row(cls, table_row)
Build and return a `FileHandle` from an `astropy.table.row.Row`
3.34714
2.6179
1.278559
if basepath is None: fullpath = self.path else: fullpath = os.path.join(basepath, self.path) exists = os.path.exists(fullpath) if not exists: if self.flags & FileFlags.gz_mask != 0: fullpath += '.gz' exists = os.path.exists(fullpath) if exists: if self.status == FileStatus.superseded: pass else: self.status = FileStatus.exists else: if self.status in [FileStatus.no_file, FileStatus.expected, FileStatus.missing, FileStatus.temp_removed]: if self.flags & FileFlags.rmint_mask != 0: self.status = FileStatus.temp_removed elif self.status == FileStatus.exists: self.status = FileStatus.missing elif self.status == FileStatus.exists: self.status = FileStatus.temp_removed return self.status
def check_status(self, basepath=None)
Check on the status of this particular file
2.679395
2.648147
1.0118
table.add_row(dict(path=self.path, key=self.key, creator=self.creator, timestamp=self.timestamp, status=self.status, flags=self.flags))
def append_to_table(self, table)
Add this instance as a row on a `astropy.table.Table`
4.088768
3.477239
1.175866
table[row_idx]['path'] = self.path table[row_idx]['key'] = self.key table[row_idx]['creator'] = self.creator table[row_idx]['timestamp'] = self.timestamp table[row_idx]['status'] = self.status table[row_idx]['flags'] = self.flags
def update_table_row(self, table, row_idx)
Update the values in an `astropy.table.Table` for this instances
2.346408
2.228775
1.05278
if filepath[0] == '/': return filepath return os.path.join(self._base_path, filepath)
def _get_fullpath(self, filepath)
Return filepath with the base_path prefixed
3.455369
2.499727
1.382299
for irow in range(len(self._table)): file_handle = self._make_file_handle(irow) self._cache[file_handle.path] = file_handle
def _fill_cache(self)
Fill the cache from the `astropy.table.Table`
5.162245
4.336084
1.190532
self._table_file = table_file if os.path.exists(self._table_file): self._table = Table.read(self._table_file) else: self._table = FileHandle.make_table({}) self._fill_cache()
def _read_table_file(self, table_file)
Read an `astropy.table.Table` to set up the archive
4.028646
3.595021
1.120618
row = self._table[row_idx] return FileHandle.create_from_row(row)
def _make_file_handle(self, row_idx)
Build and return a `FileHandle` object from an `astropy.table.row.Row`
6.280225
4.263481
1.473027
localpath = self._get_localpath(filepath) return self._cache[localpath]
def get_handle(self, filepath)
Get the `FileHandle` object associated to a particular file
7.463325
6.880888
1.084646
# check to see if the file already exists try: file_handle = self.get_handle(filepath) raise KeyError("File %s already exists in archive" % filepath) except KeyError: pass localpath = self._get_localpath(filepath) if status == FileStatus.exists: # Make sure the file really exists fullpath = self._get_fullpath(filepath) if not os.path.exists(fullpath): print("register_file called on called on mising file %s" % fullpath) status = FileStatus.missing timestamp = 0 else: timestamp = int(os.stat(fullpath).st_mtime) else: timestamp = 0 key = len(self._table) + 1 file_handle = FileHandle(path=localpath, key=key, creator=creator, timestamp=timestamp, status=status, flags=flags) file_handle.append_to_table(self._table) self._cache[localpath] = file_handle return file_handle
def register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags)
Register a file in the archive. If the file already exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file flags : `FileFlags` Enumeration giving flags set on this file Returns `FileHandle`
3.049794
2.905989
1.049486
file_handle = self.get_handle(filepath) if status in [FileStatus.exists, FileStatus.superseded]: # Make sure the file really exists fullpath = file_handle.fullpath if not os.path.exists(fullpath): raise ValueError("File %s does not exist" % fullpath) timestamp = int(os.stat(fullpath).st_mtime) else: timestamp = 0 file_handle.creator = creator file_handle.timestamp = timestamp file_handle.status = status file_handle.update_table_row(self._table, file_handle.key - 1) return file_handle
def update_file(self, filepath, creator, status)
Update a file in the archive If the file does not exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file Returns `FileHandle`
3.220896
3.09507
1.040654
ret_list = [] for fname in file_list: if file_dict is None: flags = FileFlags.no_flags else: flags = file_dict.file_dict[fname] try: fhandle = self.get_handle(fname) except KeyError: if creator is None: creator = -1 # raise KeyError("Can not register a file %s without a creator"%fname) fhandle = self.register_file(fname, creator, status, flags) ret_list.append(fhandle.key) return ret_list
def get_file_ids(self, file_list, creator=None, status=FileStatus.no_file, file_dict=None)
Get or create a list of file ids based on file names Parameters ---------- file_list : list The paths to the file creatror : int A unique key for the job that created these files status : `FileStatus` Enumeration giving current status of files file_dict : `FileDict` Mask giving flags set on this file Returns list of integers
3.539365
3.609151
0.980664
if id_list is None: return [] try: path_array = self._table[id_list - 1]['path'] except IndexError: print("IndexError ", len(self._table), id_list) path_array = [] return [path for path in path_array]
def get_file_paths(self, id_list)
Get a list of file paths based of a set of ids Parameters ---------- id_list : list List of integer file keys Returns list of file paths
4.100926
4.413408
0.929197
if self._table is None: raise RuntimeError("No table to write") if table_file is not None: self._table_file = table_file if self._table_file is None: raise RuntimeError("No output file specified for table") write_tables_to_fits(self._table_file, [self._table], clobber=True, namelist=['FILE_ARCHIVE'])
def write_table_file(self, table_file=None)
Write the table to self._table_file
3.712861
3.335899
1.113002
nfiles = len(self.cache.keys()) status_vect = np.zeros((6), int) sys.stdout.write("Updating status of %i files: " % nfiles) sys.stdout.flush() for i, key in enumerate(self.cache.keys()): if i % 200 == 0: sys.stdout.write('.') sys.stdout.flush() fhandle = self.cache[key] fhandle.check_status(self._base_path) fhandle.update_table_row(self._table, fhandle.key - 1) status_vect[fhandle.status] += 1 sys.stdout.write("!\n") sys.stdout.flush() sys.stdout.write("Summary:\n") sys.stdout.write(" no_file: %i\n" % status_vect[0]) sys.stdout.write(" expected: %i\n" % status_vect[1]) sys.stdout.write(" exists: %i\n" % status_vect[2]) sys.stdout.write(" missing: %i\n" % status_vect[3]) sys.stdout.write(" superseded: %i\n" % status_vect[4]) sys.stdout.write(" temp_removed: %i\n" % status_vect[5])
def update_file_status(self)
Update the status of all the files in the archive
2.45863
2.384562
1.031061
outbasename = os.path.basename(ltsumfile) lt_list_file = ltsumfile.replace('fits', 'lst') outfile = open(lt_list_file, 'w') for i in range(num_files): split_key = "%06i" % i output_dir = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes', split_key) filepath = os.path.join(output_dir, outbasename.replace('.fits', '_%s.fits' % split_key)) outfile.write(filepath) outfile.write("\n") outfile.close() return '@' + lt_list_file
def _make_ltcube_file_list(ltsumfile, num_files)
Make the list of input files for a particular energy bin X psf type
3.679837
3.599376
1.022354
Gtlink_select.register_class() Gtlink_bin.register_class() Gtlink_expcube2.register_class() Gtlink_scrmaps.register_class() Gtlink_mktime.register_class() Gtlink_ltcube.register_class() Link_FermipyCoadd.register_class() Link_FermipyGatherSrcmaps.register_class() Link_FermipyVstack.register_class() Link_FermipyHealview.register_class() Gtexpcube2_SG.register_class() Gtltsum_SG.register_class() SumRings_SG.register_class() Vstack_SG.register_class() GatherSrcmaps_SG.register_class() Healview_SG.register_class()
def register_classes()
Register these classes with the `LinkFactory`
5.373685
5.276036
1.018508
job_configs = {} components = Component.build_from_yamlfile(args['comp']) datafile = args['data'] if datafile is None or datafile == 'None': return job_configs NAME_FACTORY.update_base_dict(args['data']) for comp in components: zcut = "zmax%i" % comp.zmax mktimelist = copy.copy(comp.mktimefilters) if not mktimelist: mktimelist.append('none') evtclasslist_keys = copy.copy(comp.evtclasses) if not evtclasslist_keys: evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']] else: evtclasslist_vals = copy.copy(evtclasslist_keys) for mktimekey in mktimelist: for evtclassval in evtclasslist_vals: fullkey = comp.make_key( '%s_%s_{ebin_name}_%s_{evtype_name}' % (evtclassval, zcut, mktimekey)) name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=mktimekey, evclass=evtclassval, fullpath=True) outfile = NAME_FACTORY.bexpcube(**name_keys) cmap = NAME_FACTORY.ccube(**name_keys) infile = NAME_FACTORY.ltcube(**name_keys) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[fullkey] = dict(cmap=cmap, infile=infile, outfile=outfile, irfs=NAME_FACTORY.irfs(**name_keys), hpx_order=min( comp.hpx_order, args['hpx_order_max']), evtype=comp.evtype, logfile=logfile) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
4.656212
4.65241
1.000817
job_configs = {} gmm = make_ring_dicts(library=args['library'], basedir='.') for galkey in gmm.galkeys(): ring_dict = gmm.ring_dict(galkey) for ring_key, ring_info in ring_dict.items(): output_file = ring_info.merged_gasmap file_string = "" for fname in ring_info.files: file_string += " %s" % fname logfile = make_nfs_path(output_file.replace('.fits', '.log')) job_configs[ring_key] = dict(output=output_file, args=file_string, logfile=logfile) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
5.865457
5.801739
1.010983
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_diffuse_comp_info_dict(components=components, library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) diffuse_comp_info_dict = ret_dict['comp_info_dict'] for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()): diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') if diffuse_comp_info_value.components is None: sub_comp_info = diffuse_comp_info_value else: sub_comp_info = diffuse_comp_info_value.get_component_info(comp) name_keys = dict(zcut=zcut, sourcekey=sub_comp_info.sourcekey, ebin=comp.ebin_name, psftype=comp.evtype_name, mktime='none', coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), fullpath=True) outfile = NAME_FACTORY.srcmaps(**name_keys) outfile_tokens = os.path.splitext(outfile) infile_regexp = "%s_*.fits*" % outfile_tokens[0] full_key = "%s_%s" % (sub_comp_info.sourcekey, key) logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[full_key] = dict(output=outfile, args=infile_regexp, hdu=sub_comp_info.source_name, logfile=logfile) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
4.82669
4.811705
1.003114
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) ret_dict = make_catalog_comp_dict(library=args['library'], basedir=NAME_FACTORY.base_dict['basedir']) catalog_info_dict = ret_dict['catalog_info_dict'] for catalog_name in catalog_info_dict: for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, sourcekey=catalog_name, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime='none', fullpath=True) outfile = NAME_FACTORY.srcmaps(**name_keys) outfile_tokens = os.path.splitext(outfile) infile_regexp = "%s_*.fits" % outfile_tokens[0] logfile = make_nfs_path(outfile.replace('.fits', '.log')) job_configs[key] = dict(output=outfile, args=infile_regexp, logfile=logfile) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
6.741336
6.757424
0.997619
return stats.norm(loc=mu, scale=sigma).pdf(x)
def norm(x, mu, sigma=1.0)
Scipy norm function
3.916854
4.023846
0.97341
return np.log(stats.norm(loc=mu, scale=sigma).pdf(x))
def ln_norm(x, mu, sigma=1.0)
Natural log of scipy norm function truncated at zero
3.548887
3.306873
1.073185
return stats.lognorm(sigma, scale=mu).pdf(x)
def lognorm(x, mu, sigma=1.0)
Log-normal function from scipy
4.743704
5.284869
0.897601
return stats.lognorm(sigma * np.log(10), scale=mu).pdf(x)
def log10norm(x, mu, sigma=1.0)
Scale scipy lognorm from natural log to base 10 x : input parameter mu : mean of the underlying log10 gaussian sigma : variance of underlying log10 gaussian
5.009929
6.041735
0.82922
x = np.array(x, ndmin=1) lmu = np.log10(mu) s2 = sigma * sigma lx = np.zeros(x.shape) v = np.zeros(x.shape) lx[x > 0] = np.log10(x[x > 0]) v = 1. / np.sqrt(2 * s2 * np.pi) * np.exp(-(lx - lmu)**2 / (2 * s2)) if not logpdf: v /= (x * np.log(10.)) v[x <= 0] = -np.inf return v
def lgauss(x, mu, sigma=1.0, logpdf=False)
Log10 normal distribution... x : Parameter of interest for scanning the pdf mu : Peak of the lognormal distribution (mean of the underlying normal distribution is log10(mu) sigma : Standard deviation of the underlying normal distribution
2.414454
2.753753
0.876787
functype = d.get('functype', 'lgauss_like') j_ref = d.get('j_ref', 1.0) if functype == 'norm': return norm_prior(d['mu'], d['sigma'], j_ref) elif functype == 'lognorm': return lognorm_prior(d['mu'], d['sigma'], j_ref) elif functype == 'gauss': return function_prior(functype, d['mu'], d['sigma'], gauss, lngauss, j_ref) elif functype == 'lgauss': return function_prior(functype, d['mu'], d['sigma'], lgauss, lnlgauss, j_ref) elif functype in ['lgauss_like', 'lgauss_lik']: def fn(x, y, s): return lgauss(y, x, s) def lnfn(x, y, s): return lnlgauss(y, x, s) return function_prior(functype, d['mu'], d['sigma'], fn, lnfn, j_ref) elif functype == 'lgauss_log': def fn(x, y, s): return lgauss(x, y, s, logpdf=True) def lnfn(x, y, s): return lnlgauss(x, y, s, logpdf=True) return function_prior(functype, d['mu'], d['sigma'], fn, lnfn, j_ref) else: raise KeyError("Unrecognized prior_functor type %s" % functype)
def create_prior_functor(d)
Build a prior from a dictionary. Parameters ---------- d : A dictionary, it must contain: d['functype'] : a recognized function type and all of the required parameters for the prior_functor of the desired type Returns ---------- A sub-class of '~fermipy.stats_utils.prior_functor' Recognized types are: 'lognorm' : Scipy lognormal distribution 'norm' : Scipy normal distribution 'gauss' : Gaussian truncated at zero 'lgauss' : Gaussian in log-space 'lgauss_like' : Gaussian in log-space, with arguments reversed. 'lgauss_logpdf' : ???
2.246398
2.059471
1.090765
log_mean = np.log10(self.mean()) # Default is to marginalize over two decades, # centered on mean, using 1000 bins return np.logspace(-1. + log_mean, 1. + log_mean, 1001)/self._j_ref
def marginalization_bins(self)
Binning to use to do the marginalization integrals
8.192111
7.962529
1.028833
log_mean = np.log10(self.mean()) log_half_width = max(5. * self.sigma(), 3.) # Default is to profile over +-5 sigma, # centered on mean, using 100 bins return np.logspace(log_mean - log_half_width, log_mean + log_half_width, 101)/self._j_ref
def profile_bins(self)
The binning to use to do the profile fitting
6.594467
6.483536
1.01711
norm_r = self.normalization_range() return quad(self, norm_r[0]*self._j_ref, norm_r[1]*self._j_ref)[0]
def normalization(self)
The normalization i.e., the intergral of the function over the normalization_range
7.308774
5.451859
1.340602
if self._ret_type == ret_type: return if ret_type == "straight": self._interp = self._lnlfn.interp if ret_type == "profile": self._profile_loglike_spline(self._lnlfn.interp.x) #self._profile_loglike(self._lnlfn.interp.x) self._interp = self._prof_interp elif ret_type == "marginal": self._marginal_loglike(self._lnlfn.interp.x) self._interp = self._marg_interp elif ret_type == "posterior": self._posterior(self._lnlfn.interp.x) self._interp = self._post_interp else: raise ValueError("Did not recognize return type %s" % ret_type) self._ret_type = ret_type
def init_return(self, ret_type)
Specify the return type. Note that this will also construct the '~fermipy.castro.Interpolator' object for the requested return type.
2.814172
2.694622
1.044366
self._prof_interp = None self._prof_y = None self._prof_z = None self._marg_interp = None self._marg_z = None self._post = None self._post_interp = None self._interp = None self._ret_type = None
def clear_cached_values(self)
Removes all of the cached values and interpolators
4.831866
4.132241
1.169309
# This is the negative log-likelihood z = self._lnlfn.interp(x * y) return np.exp(-z) * self._nuis_pdf(y) / self._nuis_norm
def like(self, x, y)
Evaluate the 2-D likelihood in the x/y parameter space. The dimension of the two input arrays should be the same. Parameters ---------- x : array_like Array of coordinates in the `x` parameter. y : array_like Array of coordinates in the `y` nuisance parameter.
15.765522
17.360508
0.908126
nuis = self._nuis_pdf(y) log_nuis = np.where( nuis > 0., np.log(nuis), -1e2) vals = -self._lnlfn.interp(x * y ) + \ log_nuis - \ self._nuis_log_norm return vals
def loglike(self, x, y)
Evaluate the 2-D log-likelihood in the x/y parameter space. The dimension of the two input arrays should be the same. Parameters ---------- x : array_like Array of coordinates in the `x` parameter. y : array_like Array of coordinates in the `y` nuisance parameter.
8.632744
9.749701
0.885437
if self._prof_interp is None: # This calculates values and caches the spline return self._profile_loglike(x)[1] x = np.array(x, ndmin=1) return self._prof_interp(x)
def profile_loglike(self, x)
Profile log-likelihood. Returns ``L_prof(x,y=y_min|z')`` : where y_min is the value of y that minimizes L for a given x. This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
6.507493
5.68169
1.145345
if self._marg_interp is None: # This calculates values and caches the spline return self._marginal_loglike(x) x = np.array(x, ndmin=1) return self._marg_interp(x)
def marginal_loglike(self, x)
Marginal log-likelihood. Returns ``L_marg(x) = \int L(x,y|z') L(y) dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
5.438962
4.791958
1.135019
if self._post is None: return self._posterior(x) x = np.array(x, ndmin=1) return self._post_interp(x)
def posterior(self, x)
Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
4.151948
3.965302
1.04707
x = np.array(x, ndmin=1) z = [] y = [] for xtmp in x: def fn(t): return -self.loglike(xtmp, t) ytmp = opt.fmin(fn, 1.0, disp=False)[0] ztmp = self.loglike(xtmp, ytmp) z.append(ztmp) y.append(ytmp) self._prof_y = np.array(y) self._prof_z = np.array(z) self._prof_z = self._prof_z.max() - self._prof_z self._prof_interp = castro.Interpolator(x, self._prof_z) return self._prof_y, self._prof_z
def _profile_loglike(self, x)
Internal function to calculate and cache the profile likelihood
2.674588
2.651051
1.008878
z = [] y = [] yv = self._nuis_pdf.profile_bins() nuis_vals = self._nuis_pdf.log_value(yv) - self._nuis_log_norm for xtmp in x: zv = -1. * self._lnlfn.interp(xtmp * yv) + nuis_vals sp = splrep(yv, zv, k=2, s=0) def rf(t): return splev(t, sp, der=1) ix = np.argmax(splev(yv, sp)) imin, imax = max(0, ix - 3), min(len(yv) - 1, ix + 3) try: y0 = opt.brentq(rf, yv[imin], yv[imax], xtol=1e-10) except: y0 = yv[ix] z0 = self.loglike(xtmp, y0) z.append(z0) y.append(y0) self._prof_y = np.array(y) self._prof_z = np.array(z) self._prof_z = self._prof_z.max() - self._prof_z self._prof_interp = castro.Interpolator(x, self._prof_z) return self._prof_y, self._prof_z
def _profile_loglike_spline(self, x)
Internal function to calculate and cache the profile likelihood
3.907866
3.879865
1.007217
yedge = self._nuis_pdf.marginalization_bins() yw = yedge[1:] - yedge[:-1] yc = 0.5 * (yedge[1:] + yedge[:-1]) s = self.like(x[:, np.newaxis], yc[np.newaxis, :]) # This does the marginalization integral z = 1. * np.sum(s * yw, axis=1) self._marg_z = np.zeros(z.shape) msk = z > 0 self._marg_z[msk] = -1 * np.log(z[msk]) # Extrapolate to unphysical values # FIXME, why is this needed dlogzdx = (np.log(z[msk][-1]) - np.log(z[msk][-2]) ) / (x[msk][-1] - x[msk][-2]) self._marg_z[~msk] = self._marg_z[msk][-1] + \ (self._marg_z[~msk] - self._marg_z[msk][-1]) * dlogzdx self._marg_interp = castro.Interpolator(x, self._marg_z) return self._marg_z
def _marginal_loglike(self, x)
Internal function to calculate and cache the marginal likelihood
3.687531
3.624444
1.017406
yedge = self._nuis_pdf.marginalization_bins() yc = 0.5 * (yedge[1:] + yedge[:-1]) yw = yedge[1:] - yedge[:-1] like_array = self.like(x[:, np.newaxis], yc[np.newaxis, :]) * yw like_array /= like_array.sum() self._post = like_array.sum(1) self._post_interp = castro.Interpolator(x, self._post) return self._post
def _posterior(self, x)
Internal function to calculate and cache the posterior
5.379884
5.185154
1.037555
xmax = self._lnlfn.interp.xmax x0 = max(self._lnlfn.mle(), xmax * 1e-5) ret = opt.fmin(lambda x: np.where( xmax > x > 0, -self(x), np.inf), x0, disp=False) mle = float(ret[0]) return mle
def _compute_mle(self)
Maximum likelihood estimator.
6.955828
6.500678
1.070016
psf_types = input_dict.pop('psf_types') output_list = [] for psf_type, val_dict in sorted(psf_types.items()): fulldict = input_dict.copy() fulldict.update(val_dict) fulldict['evtype_name'] = psf_type fulldict['ebin_name'] = ebin_name component = cls(**fulldict) output_list += [component] return output_list
def build_from_energy_dict(cls, ebin_name, input_dict)
Build a list of components from a dictionary for a single energy range
3.073391
2.934098
1.047474
top_dict = yaml.safe_load(yamlstr) coordsys = top_dict.pop('coordsys') output_list = [] for e_key, e_dict in sorted(top_dict.items()): if e_key == 'coordsys': continue e_dict = top_dict[e_key] e_dict['coordsys'] = coordsys output_list += cls.build_from_energy_dict(e_key, e_dict) return output_list
def build_from_yamlstr(cls, yamlstr)
Build a list of components from a yaml string
3.085365
3.011479
1.024535
if hpx_order == ccube_clean.hpx.order: ccube_clean_at_order = ccube_clean else: ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True) if hpx_order == ccube_dirty.hpx.order: ccube_dirty_at_order = ccube_dirty else: ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True) if hpx_order == bexpcube_clean.hpx.order: bexpcube_clean_at_order = bexpcube_clean else: bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True) if hpx_order == bexpcube_dirty.hpx.order: bexpcube_dirty_at_order = bexpcube_dirty else: bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True) if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest: ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme() if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest: bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme() if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest: bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme() ret_dict = dict(ccube_clean=ccube_clean_at_order, ccube_dirty=ccube_dirty_at_order, bexpcube_clean=bexpcube_clean_at_order, bexpcube_dirty=bexpcube_dirty_at_order) return ret_dict
def _match_cubes(ccube_clean, ccube_dirty, bexpcube_clean, bexpcube_dirty, hpx_order)
Match the HEALPIX scheme and order of all the input cubes return a dictionary of cubes with the same HEALPIX scheme and order
1.368891
1.353379
1.011461
bexp_data = np.sqrt(bexpcube.data[0:-1, 0:] * bexpcube.data[1:, 0:]) intensity_data = ccube.data / bexp_data intensity_map = HpxMap(intensity_data, ccube.hpx) return intensity_map
def _compute_intensity(ccube, bexpcube)
Compute the intensity map
3.705616
3.451556
1.073607
data = (map1.data + map2.data) / 2. return HpxMap(data, map1.hpx)
def _compute_mean(map1, map2)
Make a map that is the mean of two maps
6.058
5.068368
1.195256
data = np.where(bot.data > 0, top.data / bot.data, 0.) return HpxMap(data, top.hpx)
def _compute_ratio(top, bot)
Make a map that is the ratio of two maps
6.933516
5.892824
1.176603
data = map1.data - map2.data return HpxMap(data, map1.hpx)
def _compute_diff(map1, map2)
Make a map that is the difference of two maps
7.372679
6.040754
1.22049
data = map1.data * map2.data return HpxMap(data, map1.hpx)
def _compute_product(map1, map2)
Make a map that is the product of two maps
7.370507
6.37268
1.156579
data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1]) return HpxMap(data, intensity.hpx)
def _compute_counts_from_intensity(intensity, bexpcube)
Make the counts map from the intensity
6.189412
5.616095
1.102085
data = model.data * bexpcube.data ebins = model.hpx.ebins ratio = ebins[1:] / ebins[0:-1] half_log_ratio = np.log(ratio) / 2. int_map = ((data[0:-1].T * ebins[0:-1]) + (data[1:].T * ebins[1:])) * half_log_ratio return HpxMap(int_map.T, model.hpx)
def _compute_counts_from_model(model, bexpcube)
Make the counts maps from teh mdoe
4.182277
3.882067
1.077332
mask = np.zeros((intensity_mean.data.shape), bool) nebins = len(intensity_mean.data) sum_intensity = intensity_mean.data.sum(0) mean_intensity = sum_intensity.mean() for i in range(nebins): mask[i, 0:] = sum_intensity > (mask_factor * mean_intensity) return HpxMap(mask, intensity_mean.hpx)
def _make_bright_pixel_mask(intensity_mean, mask_factor=5.0)
Make of mask of all the brightest pixels
3.729282
3.537107
1.054331
nebins = len(intensity_ratio.data) aeff_corrections = np.zeros((nebins)) for i in range(nebins): bright_pixels_intensity = intensity_ratio.data[i][mask.data[i]] mean_bright_pixel = bright_pixels_intensity.mean() aeff_corrections[i] = 1. / mean_bright_pixel print("Aeff correction: ", aeff_corrections) return aeff_corrections
def _get_aeff_corrections(intensity_ratio, mask)
Compute a correction for the effective area from the brighter pixesl
2.935584
2.680301
1.095244
data = aeff_corrections * intensity_map.data.T return HpxMap(data.T, intensity_map.hpx)
def _apply_aeff_corrections(intensity_map, aeff_corrections)
Multipy a map by the effective area correction
6.892462
6.313676
1.091672
filled_intensity = np.zeros((intensity_resid.data.shape)) nebins = len(intensity_resid.data) for i in range(nebins): masked = bright_pixel_mask.data[i] unmasked = np.invert(masked) mean_intensity = intensity_resid.data[i][unmasked].mean() filled_intensity[i] = np.where(masked, mean_intensity, intensity_resid.data[i]) return HpxMap(filled_intensity, intensity_resid.hpx)
def _fill_masked_intensity_resid(intensity_resid, bright_pixel_mask)
Fill the pixels used to compute the effective area correction with the mean intensity
2.945011
2.816151
1.045758
if hpx_map.hpx.ordering == "NESTED": ring_map = hpx_map.swap_scheme() else: ring_map = hpx_map ring_data = ring_map.data.copy() nebins = len(hpx_map.data) smoothed_data = np.zeros((hpx_map.data.shape)) for i in range(nebins): smoothed_data[i] = healpy.sphtfunc.smoothing( ring_data[i], sigma=np.radians(sigma), verbose=False) smoothed_data.clip(0., 1e99) smoothed_ring_map = HpxMap(smoothed_data, ring_map.hpx) if hpx_map.hpx.ordering == "NESTED": return smoothed_ring_map.swap_scheme() return smoothed_ring_map
def _smooth_hpx_map(hpx_map, sigma)
Smooth a healpix map using a Gaussian
2.629034
2.613182
1.006066
nebins = len(hpx_map.data) diff_map = np.zeros((nebins + 1, hpx_map.hpx.npix)) ebins = hpx_map.hpx.ebins ratio = ebins[1:] / ebins[0:-1] half_log_ratio = np.log(ratio) / 2. ratio_gamma = np.power(ratio, gamma) #ratio_inv_gamma = np.power(ratio, -1. * gamma) diff_map[0] = hpx_map.data[0] / ((ebins[0] + ratio_gamma[0] * ebins[1]) * half_log_ratio[0]) for i in range(nebins): diff_map[i + 1] = (hpx_map.data[i] / (ebins[i + 1] * half_log_ratio[i])) - (diff_map[i] / ratio[i]) return HpxMap(diff_map, hpx_map.hpx)
def _intergral_to_differential(hpx_map, gamma=-2.0)
Convert integral quantity to differential quantity Here we are assuming the spectrum is a powerlaw with index gamma and we are using log-log-quadrature to compute the integral quantities.
2.645869
2.545089
1.039598
ebins = hpx_map.hpx.ebins ratio = ebins[1:] / ebins[0:-1] half_log_ratio = np.log(ratio) / 2. int_map = ((hpx_map.data[0:-1].T * ebins[0:-1]) + (hpx_map.data[1:].T * ebins[1:])) * half_log_ratio return HpxMap(int_map.T, hpx_map.hpx)
def _differential_to_integral(hpx_map)
Convert a differential map to an integral map Here we are using log-log-quadrature to compute the integral quantities.
2.85219
2.676317
1.065714
job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) NAME_FACTORY_CLEAN.update_base_dict(args['data']) NAME_FACTORY_DIRTY.update_base_dict(args['data']) NAME_FACTORY_CLEAN.base_dict['evclass'] = args['clean'] NAME_FACTORY_DIRTY.base_dict['evclass'] = args['dirty'] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=args['mktimefilter'], fullpath=True) outfile = NAME_FACTORY.residual_cr(**name_keys) if args['hpx_order']: hpx_order = min(comp.hpx_order, args['hpx_order']) else: hpx_order = comp.hpx_order job_configs[key] = dict(bexpcube_dirty=NAME_FACTORY_DIRTY.bexpcube(**name_keys), ccube_dirty=NAME_FACTORY_DIRTY.ccube(**name_keys), bexpcube_clean=NAME_FACTORY_CLEAN.bexpcube(**name_keys), ccube_clean=NAME_FACTORY_CLEAN.ccube(**name_keys), outfile=outfile, hpx_order=hpx_order, full_output=args['full_output'], logfile=make_nfs_path(outfile.replace('.fits', '.log'))) return job_configs
def build_job_configs(self, args)
Hook to build job configurations
3.90703
3.908022
0.999746
config_yaml = args['config'] config_dict = load_yaml(config_yaml) data = config_dict.get('data') comp = config_dict.get('comp') dry_run = args.get('dry_run', False) self._set_link('prepare', SplitAndMktimeChain, comp=comp, data=data, ft1file=config_dict['ft1file'], ft2file=config_dict['ft2file'], hpx_order_ccube=config_dict.get('hpx_order_ccube', 7), hpx_order_expcube=config_dict.get('hpx_order_expcube', 7), mktime=config_dict.get('mktimefitler', None), do_ltsum=config_dict.get('do_ltsum', False), scratch=config_dict.get('scratch', None), dry_run=dry_run) self._set_link('residual-cr', ResidualCR_SG, comp=comp, data=data, mktimefilter=config_dict.get('mktimefitler', None), hpx_order=config_dict.get('hpx_order_fitting', 4), clean=config_dict.get('clean_class', None), dirty=config_dict.get('dirty_class', None), select_factor=config_dict.get('select_factor', None), mask_factor=config_dict.get('mask_factor', None), sigma=config_dict.get('sigma', None), full_output=config_dict.get('full_output', False), dry_run=dry_run)
def _map_arguments(self, args)
Map from the top-level arguments to the arguments provided to the indiviudal links
3.579997
3.509638
1.020047
# FIXME: This functionality should be built into the Map.coadd method map_out = gammapy.maps.Map.from_geom(geom) for m in maps: m_tmp = m if isinstance(m, gammapy.maps.HpxNDMap): if m.geom.order < map_out.geom.order: factor = map_out.geom.nside // m.geom.nside m_tmp = m.upsample(factor, preserve_counts=preserve_counts) map_out.coadd(m_tmp) return map_out
def coadd_maps(geom, maps, preserve_counts=True)
Coadd a sequence of `~gammapy.maps.Map` objects.
3.29491
3.179897
1.036169
# Note that the array is using the opposite convention from WCS # so we sum over axis 0 in the array, but drop axis 2 in the WCS object return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))
def sum_over_energy(self)
Reduce a 3D counts cube to a 2D counts map
11.501997
9.280105
1.239425
return np.ravel_multi_index(xypix, self.npix, order='F' if colwise else 'C', mode='raise')
def xypix_to_ipix(self, xypix, colwise=False)
Return the flattened pixel indices from an array multi-dimensional pixel indices. Parameters ---------- xypix : list List of pixel indices in the order (LON,LAT,ENERGY). colwise : bool Use column-wise pixel indexing.
3.374502
5.021088
0.672066
return np.unravel_index(ipix, self.npix, order='F' if colwise else 'C')
def ipix_to_xypix(self, ipix, colwise=False)
Return array multi-dimensional pixel indices from flattened index. Parameters ---------- colwise : bool Use column-wise pixel indexing.
3.439286
4.827886
0.712379
xy = self.ipix_to_xypix(ipix, colwise) return self.xypix_to_ipix(xy, not colwise)
def ipix_swap_axes(self, ipix, colwise=False)
Return the transposed pixel index from the pixel xy coordinates if colwise is True (False) this assumes the original index was in column wise scheme
3.78109
3.583905
1.05502
xpix = np.linspace(0, self.npix[0] - 1., self.npix[0]) ypix = np.linspace(0, self.npix[1] - 1., self.npix[1]) xypix = np.meshgrid(xpix, ypix, indexing='ij') return SkyCoord.from_pixel(np.ravel(xypix[0]), np.ravel(xypix[1]), self.wcs)
def get_pixel_skydirs(self)
Get a list of sky coordinates for the centers of every pixel.
2.259227
2.106361
1.072573
lons = np.array(lons, ndmin=1) lats = np.array(lats, ndmin=1) if len(lats) != len(lons): raise RuntimeError('Map.get_pixel_indices, input lengths ' 'do not match %i %i' % (len(lons), len(lats))) if len(self._npix) == 2: pix_x, pix_y = self._wcs.wcs_world2pix(lons, lats, 0) pixcrd = [np.floor(pix_x).astype(int), np.floor(pix_y).astype(int)] elif len(self._npix) == 3: all_lons = np.expand_dims(lons, -1) all_lats = np.expand_dims(lats, -1) if ibin is None: all_bins = (np.expand_dims( np.arange(self.npix[2]), -1) * np.ones(lons.shape)).T else: all_bins = ibin l = self.wcs.wcs_world2pix(all_lons, all_lats, all_bins, 0) pix_x = l[0] pix_y = l[1] pixcrd = [np.floor(l[0]).astype(int), np.floor(l[1]).astype(int), all_bins.astype(int)] return pixcrd
def get_pixel_indices(self, lons, lats, ibin=None)
Return the indices in the flat array corresponding to a set of coordinates Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all energy bins. Returns ---------- pixcrd : list Pixel indices along each dimension of the map.
2.283515
2.298501
0.99348
pix_idxs = self.get_pixel_indices(lons, lats, ibin) idxs = copy.copy(pix_idxs) m = np.empty_like(idxs[0], dtype=bool) m.fill(True) for i, p in enumerate(pix_idxs): m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i]) idxs[i][~m] = 0 vals = self.counts.T[idxs] vals[~m] = np.nan return vals
def get_map_values(self, lons, lats, ibin=None)
Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
3.170547
3.46299
0.915552
hpx = HPX.create_from_hdu(hdu, ebins) colnames = hdu.columns.names cnames = [] if hpx.conv.convname == 'FGST_SRCMAP_SPARSE': pixs = hdu.data.field('PIX') chans = hdu.data.field('CHANNEL') keys = chans * hpx.npix + pixs vals = hdu.data.field('VALUE') nebin = len(ebins) data = np.zeros((nebin, hpx.npix)) data.flat[keys] = vals else: for c in colnames: if c.find(hpx.conv.colstring) == 0: cnames.append(c) nebin = len(cnames) data = np.ndarray((nebin, hpx.npix)) for i, cname in enumerate(cnames): data[i, 0:] = hdu.data.field(cname) return cls(data, hpx)
def create_from_hdu(cls, hdu, ebins)
Creates and returns an HpxMap object from a FITS HDU. hdu : The FITS ebins : Energy bin edges [optional]
3.537077
3.528461
1.002442
extname = kwargs.get('hdu', hdulist[1].name) ebins = fits_utils.find_and_read_ebins(hdulist) return cls.create_from_hdu(hdulist[extname], ebins)
def create_from_hdulist(cls, hdulist, **kwargs)
Creates and returns an HpxMap object from a FITS HDUList extname : The name of the HDU with the map data ebounds : The name of the HDU with the energy bin data
5.188955
4.46091
1.163206
self._wcs_proj = proj self._wcs_oversample = oversample self._wcs_2d = self.hpx.make_wcs(2, proj=proj, oversample=oversample) self._hpx2wcs = HpxToWcsMapping(self.hpx, self._wcs_2d) wcs, wcs_data = self.convert_to_cached_wcs(self.counts, sum_ebins, normalize) return wcs, wcs_data
def make_wcs_from_hpx(self, sum_ebins=False, proj='CAR', oversample=2, normalize=True)
Make a WCS object and convert HEALPix data into WCS projection NOTE: this re-calculates the mapping, if you have already calculated the mapping it is much faster to use convert_to_cached_wcs() instead Parameters ---------- sum_ebins : bool sum energy bins over energy bins before reprojecting proj : str WCS-projection oversample : int Oversampling factor for WCS map normalize : bool True -> perserve integral by splitting HEALPix values between bins returns (WCS object, np.ndarray() with reprojected data)
3.648408
3.352963
1.088115
if self._hpx2wcs is None: raise Exception('HpxMap.convert_to_cached_wcs() called ' 'before make_wcs_from_hpx()') if len(hpx_in.shape) == 1: wcs_data = np.ndarray(self._hpx2wcs.npix) loop_ebins = False hpx_data = hpx_in elif len(hpx_in.shape) == 2: if sum_ebins: wcs_data = np.ndarray(self._hpx2wcs.npix) hpx_data = hpx_in.sum(0) loop_ebins = False else: wcs_data = np.ndarray((self.counts.shape[0], self._hpx2wcs.npix[0], self._hpx2wcs.npix[1])) hpx_data = hpx_in loop_ebins = True else: raise Exception('Wrong dimension for HpxMap %i' % len(hpx_in.shape)) if loop_ebins: for i in range(hpx_data.shape[0]): self._hpx2wcs.fill_wcs_map_from_hpx_data( hpx_data[i], wcs_data[i], normalize) pass wcs_data.reshape((self.counts.shape[0], self._hpx2wcs.npix[ 0], self._hpx2wcs.npix[1])) # replace the WCS with a 3D one wcs = self.hpx.make_wcs(3, proj=self._wcs_proj, energies=np.log10(self.hpx.ebins), oversample=self._wcs_oversample) else: self._hpx2wcs.fill_wcs_map_from_hpx_data( hpx_data, wcs_data, normalize) wcs_data.reshape(self._hpx2wcs.npix) wcs = self._wcs_2d return wcs, wcs_data
def convert_to_cached_wcs(self, hpx_in, sum_ebins=False, normalize=True)
Make a WCS object and convert HEALPix data into WCS projection Parameters ---------- hpx_in : `~numpy.ndarray` HEALPix input data sum_ebins : bool sum energy bins over energy bins before reprojecting normalize : bool True -> perserve integral by splitting HEALPix values between bins returns (WCS object, np.ndarray() with reprojected data)
2.396157
2.347276
1.020824
sky_coords = self._hpx.get_sky_coords() if self.hpx.coordsys == 'GAL': return SkyCoord(l=sky_coords.T[0], b=sky_coords.T[1], unit='deg', frame='galactic') else: return SkyCoord(ra=sky_coords.T[0], dec=sky_coords.T[1], unit='deg', frame='icrs')
def get_pixel_skydirs(self)
Get a list of sky coordinates for the centers of every pixel.
2.427856
2.241246
1.083261
# We sum over axis 0 in the array, and drop the energy binning in the # hpx object return HpxMap(np.sum(self.counts, axis=0), self.hpx.copy_and_drop_energy())
def sum_over_energy(self)
Reduce a counts cube to a counts map
12.535979
10.082846
1.243298
theta = np.pi / 2. - np.radians(lats) phi = np.radians(lons) pix = hp.ang2pix(self.hpx.nside, theta, phi, nest=self.hpx.nest) if self.data.ndim == 2: return self.data[:, pix] if ibin is None else self.data[ibin, pix] else: return self.data[pix]
def get_map_values(self, lons, lats, ibin=None)
Return the indices in the flat array corresponding to a set of coordinates Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
2.570382
3.052686
0.842007
if self.data.ndim == 1: theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) return hp.pixelfunc.get_interp_val(self.counts, theta, phi, nest=self.hpx.nest) else: return self._interpolate_cube(lon, lat, egy, interp_log)
def interpolate(self, lon, lat, egy=None, interp_log=True)
Interpolate map values. Parameters ---------- interp_log : bool Interpolate the z-coordinate in logspace.
3.927876
4.318623
0.90952
shape = np.broadcast(lon, lat, egy).shape lon = lon * np.ones(shape) lat = lat * np.ones(shape) theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) vals = [] for i, _ in enumerate(self.hpx.evals): v = hp.pixelfunc.get_interp_val(self.counts[i], theta, phi, nest=self.hpx.nest) vals += [np.expand_dims(np.array(v, ndmin=1), -1)] vals = np.concatenate(vals, axis=-1) if egy is None: return vals.T egy = egy * np.ones(shape) if interp_log: xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy)) else: xvals = utils.val_to_pix(self.hpx.evals, egy) vals = vals.reshape((-1, vals.shape[-1])) xvals = np.ravel(xvals) v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals], order=1) return v.reshape(shape)
def _interpolate_cube(self, lon, lat, egy=None, interp_log=True)
Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes.
2.901323
2.884204
1.005935
if self.hpx._ipix is None: return self.counts output = np.zeros( (self.counts.shape[0], self.hpx._maxpix), self.counts.dtype) for i in range(self.counts.shape[0]): output[i][self.hpx._ipix] = self.counts[i] return output
def expanded_counts_map(self)
return the full counts map
3.618086
3.412813
1.060148