code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# No pixel index, so build one if self.hpx._ipix is None: if self.data.ndim == 2: summed = self.counts.sum(0) if pixels is None: nz = summed.nonzero()[0] else: nz = pixels data_out = np.vstack(self.data[i].flat[nz] for i in range(self.data.shape[0])) else: if pixels is None: nz = self.data.nonzero()[0] else: nz = pixels data_out = self.data[nz] return (nz, data_out) else: if pixels is None: return (self.hpx._ipix, self.data) # FIXME, can we catch this raise RuntimeError( 'HPX.explicit_counts_map called with pixels for a map that already has pixels')
def explicit_counts_map(self, pixels=None)
return a counts map with explicit index scheme Parameters ---------- pixels : `np.ndarray` or None If set, grab only those pixels. If none, grab only non-zero pixels
4.186845
4.282212
0.97773
if self.hpx._ipix is None: flatarray = self.data.flattern() else: flatarray = self.expanded_counts_map() nz = flatarray.nonzero()[0] data_out = flatarray[nz] return (nz, data_out)
def sparse_counts_map(self)
return a counts map with sparse index scheme
7.8626
7.454524
1.054742
if ebins is None: ebins = self.ebins ectr = self.ectr else: ectr = np.exp(utils.edge_to_center(np.log(ebins))) skydir_cel = skydir.transform_to('icrs') skydir_gal = skydir.transform_to('galactic') sig = [] bkg = [] bkg_fit = None if self._gdiff_fit is not None: bkg_fit = [] for psf, exp in zip(self._psf, self._exp): coords0 = np.meshgrid(*[skydir_cel.ra.deg, ectr], indexing='ij') coords1 = np.meshgrid(*[skydir_cel.dec.deg, ectr], indexing='ij') # expv = exp.interpolate(skydir_cel.icrs.ra.deg, # skydir_cel.icrs.dec.deg, # ectr) expv = exp.interpolate(coords0[0], coords1[0], coords0[1]) coords0 = np.meshgrid(*[skydir_gal.l.deg, ectr], indexing='ij') coords1 = np.meshgrid(*[skydir_gal.b.deg, ectr], indexing='ij') bkgv = self._gdiff.interpolate(np.ravel(coords0[0]), np.ravel(coords1[0]), np.ravel(coords0[1])) bkgv = bkgv.reshape(expv.shape) # bkgv = self._gdiff.interpolate( # skydir_gal.l.deg, skydir_gal.b.deg, ectr) isov = np.exp(np.interp(np.log(ectr), np.log(self._iso[0]), np.log(self._iso[1]))) bkgv += isov s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn, egy_dim=1, spatial_model=self.spatial_model, spatial_size=self.spatial_size) sig += [s0] bkg += [b0] if self._iso_fit is not None: isov_fit = np.exp(np.interp(np.log(ectr), np.log(self._iso_fit[0]), np.log(self._iso_fit[1]))) else: isov_fit = isov if self._gdiff_fit is not None: bkgv_fit = self._gdiff_fit.interpolate(np.ravel(coords0[0]), np.ravel(coords1[0]), np.ravel(coords0[1])) bkgv_fit = bkgv_fit.reshape(expv.shape) bkgv_fit += isov_fit s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv_fit, fn, egy_dim=1, spatial_model=self.spatial_model, spatial_size=self.spatial_size) bkg_fit += [b0] sig = np.concatenate([np.expand_dims(t, -1) for t in sig]) bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg]) if self._gdiff_fit is not None: bkg_fit = np.concatenate([np.expand_dims(t, -1) for t in bkg_fit]) return sig, bkg, bkg_fit
def compute_counts(self, skydir, fn, ebins=None)
Compute signal and background counts for a point source at position ``skydir`` with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` ebins : `~numpy.ndarray` Returns ------- sig : `~numpy.ndarray` Signal counts array. Dimensions are energy, angular separation, and event type. bkg : `~numpy.ndarray` Background counts array. Dimensions are energy, angular separation, and event type.
2.158823
2.131874
1.012641
sig, bkg, bkg_fit = self.compute_counts(skydir, fn) norms = irfs.compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=[2, 3], rebin_axes=[10, 1], bkg_fit=bkg_fit) npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [2, 3])) norms = np.squeeze(norms) flux = norms * fn.flux(self.ebins[:-1], self.ebins[1:]) eflux = norms * fn.eflux(self.ebins[:-1], self.ebins[1:]) dnde = norms * fn.dnde(self.ectr) e2dnde = self.ectr**2 * dnde return dict(e_min=self.ebins[:-1], e_max=self.ebins[1:], e_ref=self.ectr, npred=npred, flux=flux, eflux=eflux, dnde=dnde, e2dnde=e2dnde)
def diff_flux_threshold(self, skydir, fn, ts_thresh, min_counts)
Compute the differential flux threshold for a point source at position ``skydir`` with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Sky coordinates at which the sensitivity will be evaluated. fn : `~fermipy.spectrum.SpectralFunction` ts_thresh : float Threshold on the detection test statistic (TS). min_counts : float Threshold on the minimum number of counts.
3.353403
3.326029
1.00823
ebins = 10**np.linspace(np.log10(self.ebins[0]), np.log10(self.ebins[-1]), 33) ectr = np.sqrt(ebins[0] * ebins[-1]) sig, bkg, bkg_fit = self.compute_counts(skydir, fn, ebins) norms = irfs.compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=[1, 2, 3], bkg_fit=bkg_fit, rebin_axes=[4, 10, 1]) npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [1, 2, 3])) npred = np.array(npred, ndmin=1) flux = np.squeeze(norms) * fn.flux(ebins[0], ebins[-1]) eflux = np.squeeze(norms) * fn.eflux(ebins[0], ebins[-1]) dnde = np.squeeze(norms) * fn.dnde(ectr) e2dnde = ectr**2 * dnde o = dict(e_min=self.ebins[0], e_max=self.ebins[-1], e_ref=ectr, npred=npred, flux=flux, eflux=eflux, dnde=dnde, e2dnde=e2dnde) sig, bkg, bkg_fit = self.compute_counts(skydir, fn) npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [2, 3])) flux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] * fn.flux(self.ebins[:-1], self.ebins[1:])) eflux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] * fn.eflux(self.ebins[:-1], self.ebins[1:])) dnde = np.squeeze(np.squeeze(norms, axis=(1, 2, 3)) [:, None] * fn.dnde(self.ectr)) e2dnde = ectr**2 * dnde o['bins'] = dict(npred=npred, flux=flux, eflux=eflux, dnde=dnde, e2dnde=e2dnde, e_min=self.ebins[:-1], e_max=self.ebins[1:], e_ref=self.ectr) return o
def int_flux_threshold(self, skydir, fn, ts_thresh, min_counts)
Compute the integral flux threshold for a point source at position ``skydir`` with spectral parameterization ``fn``.
2.246155
2.27228
0.988503
phi = np.radians(lon) theta = (np.pi / 2) - np.radians(lat) sin_t = np.sin(theta) cos_t = np.cos(theta) xVals = sin_t * np.cos(phi) yVals = sin_t * np.sin(phi) zVals = cos_t # Stack them into the output array out = np.vstack((xVals, yVals, zVals)).swapaxes(0, 1) return out
def coords_to_vec(lon, lat)
Converts longitute and latitude coordinates to a unit 3-vector return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines
2.423054
2.482932
0.975884
order = int(np.log2(nside)) if order < 0 or order > 13: raise ValueError('HEALPix order must be between 0 to 13 %i' % order) return HPX_ORDER_TO_PIXSIZE[order]
def get_pixel_size_from_nside(nside)
Returns an estimate of the pixel size from the HEALPix nside coordinate This just uses a lookup table to provide a nice round number for each HEALPix order.
4.356793
3.688331
1.181237
x = h.ebins z = np.arange(npix[-1] + 1) return x, z
def hpx_to_axes(h, npix)
Generate a sequence of bin edge vectors corresponding to the axes of a HPX object.
8.942138
8.883152
1.00664
x, z = hpx_to_axes(h, shape) x = np.sqrt(x[0:-1] * x[1:]) z = z[:-1] + 0.5 x = np.ravel(np.ones(shape) * x[:, np.newaxis]) z = np.ravel(np.ones(shape) * z[np.newaxis, :]) return np.vstack((x, z))
def hpx_to_coords(h, shape)
Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.
2.911006
3.100132
0.938994
npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2)) mult_val = np.ones(npix).T.flatten() sky_crds = hpx.get_sky_coords() pix_crds = wcs.wcs_world2pix(sky_crds, 0).astype(int) ipixs = -1 * np.ones(npix, int).T.flatten() pix_index = npix[1] * pix_crds[0:, 0] + pix_crds[0:, 1] if hpx._ipix is None: for ipix, pix_crd in enumerate(pix_index): ipixs[pix_crd] = ipix else: for pix_crd, ipix in zip(pix_index, hpx._ipix): ipixs[pix_crd] = ipix ipixs = ipixs.reshape(npix).T.flatten() return ipixs, mult_val, npix
def make_hpx_to_wcs_mapping_centers(hpx, wcs)
Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel -1 indicates the wcs pixel does not contain the center of a HEALpix pixel mult_val : array(nx,ny) of 1. npix : tuple(nx,ny) with the shape of the wcs grid
2.654225
2.483519
1.068736
npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2)) pix_crds = np.dstack(np.meshgrid(np.arange(npix[0]), np.arange(npix[1]))).swapaxes(0, 1).reshape((npix[0] * npix[1], 2)) if wcs.wcs.naxis == 2: sky_crds = wcs.wcs_pix2world(pix_crds, 0) else: use_wcs = wcs.dropaxis(2) sky_crds = use_wcs.wcs_pix2world(pix_crds, 0) sky_crds *= np.radians(1.) sky_crds[0:, 1] = (np.pi / 2) - sky_crds[0:, 1] fullmask = np.isnan(sky_crds) mask = (fullmask[0:, 0] + fullmask[0:, 1]) == 0 ipixs = -1 * np.ones(npix, int).T.flatten() ipixs[mask] = hp.pixelfunc.ang2pix(hpx.nside, sky_crds[0:, 1][mask], sky_crds[0:, 0][mask], hpx.nest) # Here we are counting the number of HEALPix pixels each WCS pixel points to; # this could probably be vectorized by filling a histogram. d_count = {} for ipix in ipixs: if ipix in d_count: d_count[ipix] += 1 else: d_count[ipix] = 1 # Here we are getting a multiplicative factor that tells use how to split up # the counts in each HEALPix pixel (by dividing the corresponding WCS pixels # by the number of associated HEALPix pixels). # This could also likely be vectorized. mult_val = np.ones(ipixs.shape) for i, ipix in enumerate(ipixs): mult_val[i] /= d_count[ipix] ipixs = ipixs.reshape(npix).flatten() mult_val = mult_val.reshape(npix).flatten() return ipixs, mult_val, npix
def make_hpx_to_wcs_mapping(hpx, wcs)
Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel mult_val : array(nx,ny) of 1./number of wcs pixels pointing at each HEALPix pixel npix : tuple(nx,ny) with the shape of the wcs grid
2.940483
2.763163
1.064173
m = re.match(r'([A-Za-z\_]*?)\((.*?)\)', region) if m is None: raise Exception('Failed to parse hpx region string.') if not m.group(1): return re.split(',', m.group(2)) else: return [m.group(1)] + re.split(',', m.group(2))
def parse_hpxregion(region)
Parse the HPX_REG header keyword into a list of tokens.
3.048781
2.906848
1.048827
nside = np.power(2, np.floor(np.log2(upix / 4)) / 2).astype(int) pix = upix - 4 * np.power(nside, 2) return pix, nside
def upix_to_pix(upix)
Get the nside from a unique pixel number.
3.822077
3.229403
1.183524
return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=pixels)
def create_hpx(cls, nside, nest, coordsys='CEL', order=-1, ebins=None, region=None, conv=HPX_Conv('FGST_CCUBE'), pixels=None)
Create a HPX object. Parameters ---------- nside : int HEALPix nside paramter nest : bool True for HEALPix "NESTED" indexing scheme, False for "RING" scheme. coordsys : str "CEL" or "GAL" order : int nside = 2**order ebins : `~numpy.ndarray` Energy bin edges region : str Allows for partial-sky mappings conv : `HPX_Conv` Object defining the convention for column names and the like pixels : `np.array` or `None` For use with 'EXPLICIT' region string
2.090409
4.704554
0.444337
# Hopefully the file contains the HPX_CONV keyword specifying # the convention used try: return header['HPX_CONV'] except KeyError: pass indxschm = header.get('INDXSCHM', None) # Try based on the EXTNAME keyword extname = header.get('EXTNAME', None) if extname == 'HPXEXPOSURES': return 'FGST_BEXPCUBE' elif extname == 'SKYMAP2': if 'COORDTYPE' in header.keys(): return 'GALPROP' else: return 'GALPROP2' # Check for the INDXSCHM keyword if indxschm == 'SPARSE': return 'FGST_SRCMAP_SPARSE' # Check the name of the first column colname = header['TTYPE1'] if colname == 'PIX': colname = header['TTYPE2'] if colname == 'KEY': return 'FGST_SRCMAP_SPARSE' elif colname == 'ENERGY1': return 'FGST_TEMPLATE' elif colname == 'COSBINS': return 'FGST_LTCUBE' elif colname == 'Bin0': return 'GALPROP' elif colname in ['CHANNEL1', 'Bin 0']: if extname == 'SKYMAP': return 'FGST_CCUBE' else: return 'FGST_SRCMAP' else: raise ValueError("Could not identify HEALPix convention")
def identify_HPX_convention(header)
Identify the convention used to write this file
4.800047
4.787224
1.002679
convname = HPX.identify_HPX_convention(header) conv = HPX_FITS_CONVENTIONS[convname] if conv.convname not in ['GALPROP', 'GALPROP2']: if header["PIXTYPE"] != "HEALPIX": raise Exception("PIXTYPE != HEALPIX") if header["PIXTYPE"] != "HEALPIX": raise Exception("PIXTYPE != HEALPIX") if header["ORDERING"] == "RING": nest = False elif header["ORDERING"] == "NESTED": nest = True else: raise Exception("ORDERING != RING | NESTED") try: order = header["ORDER"] except KeyError: order = -1 if order < 0: nside = header["NSIDE"] else: nside = -1 try: coordsys = header[conv.coordsys] except KeyError: coordsys = header['COORDSYS'] try: region = header["HPX_REG"] except KeyError: try: region = header["HPXREGION"] except KeyError: region = None try: if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']: use_pixels = pixels else: use_pixels = None except KeyError: use_pixels = None return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels)
def create_from_header(cls, header, ebins=None, pixels=None)
Creates an HPX object from a FITS header. header : The FITS header ebins : Energy bin edges [optional]
3.084068
3.138178
0.982757
convname = HPX.identify_HPX_convention(hdu.header) conv = HPX_FITS_CONVENTIONS[convname] try: pixels = hdu.data[conv.idxstring] except KeyError: pixels = None return cls.create_from_header(hdu.header, ebins, pixels)
def create_from_hdu(cls, hdu, ebins=None)
Creates an HPX object from a FITS header. hdu : The FITS hdu ebins : Energy bin edges [optional]
6.560545
7.508748
0.87372
cards = [fits.Card("TELESCOP", "GLAST"), fits.Card("INSTRUME", "LAT"), fits.Card(self._conv.coordsys, self._coordsys), fits.Card("PIXTYPE", "HEALPIX"), fits.Card("ORDERING", self.ordering), fits.Card("ORDER", self._order), fits.Card("NSIDE", self._nside), fits.Card("FIRSTPIX", 0), fits.Card("LASTPIX", self._maxpix - 1), fits.Card("HPX_CONV", self._conv.convname)] if self._coordsys == "CEL": cards.append(fits.Card("EQUINOX", 2000.0, "Equinox of RA & DEC specifications")) if self._region is not None: cards.append(fits.Card("HPX_REG", self._region)) cards.append(fits.Card("INDXSCHM", "PARTIAL")) elif self._ipix is not None: cards.append(fits.Card("INDXSCHM", "EXPLICIT")) else: if self._conv.convname in ['FGST_SRCMAP_SPARSE']: cards.append(fits.Card("INDXSCHM", "SPARSE")) else: cards.append(fits.Card("INDXSCHM", "IMPLICIT")) header = fits.Header(cards) return header
def make_header(self)
Builds and returns FITS header for this HEALPix map
3.299965
3.060696
1.078175
shape = data.shape extname = kwargs.get('extname', self.conv.extname) if shape[-1] != self._npix: raise Exception( "Size of data array does not match number of pixels") cols = [] if self._ipix is not None: cols.append(fits.Column(self.conv.idxstring, "J", array=self._ipix)) if self.conv.convname == 'FGST_SRCMAP_SPARSE': nonzero = data.nonzero() nfilled = len(nonzero[0]) if len(shape) == 1: cols.append(fits.Column("PIX", "J", array=nonzero[0].astype(int))) cols.append(fits.Column("VALUE", "E", array=data.flat[nonzero].astype(float).reshape(nfilled))) elif len(shape) == 2: keys = self._npix * nonzero[0] + nonzero[1] cols.append(fits.Column("PIX", "J", array=nonzero[1].reshape(nfilled))) cols.append(fits.Column("CHANNEL", "I", array=nonzero[0].reshape(nfilled))) cols.append(fits.Column("VALUE", "E", array=data.flat[keys].astype(float).reshape(nfilled))) else: raise Exception("HPX.write_fits only handles 1D and 2D maps") else: if len(shape) == 1: cols.append(fits.Column(self.conv.colname( indx=self.conv.firstcol), "E", array=data.astype(float))) elif len(shape) == 2: for i in range(shape[0]): cols.append(fits.Column(self.conv.colname( indx=i + self.conv.firstcol), "E", array=data[i].astype(float))) else: raise Exception("HPX.write_fits only handles 1D and 2D maps") header = self.make_header() hdu = fits.BinTableHDU.from_columns(cols, header=header, name=extname) return hdu
def make_hdu(self, data, **kwargs)
Builds and returns a FITs HDU with input data data : The data begin stored Keyword arguments ------------------- extname : The HDU extension name colbase : The prefix for column names
2.610653
2.659608
0.981593
if self._ebins is None: return None cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))), fits.Column("E_MIN", "1E", unit='keV', array=1000 * self._ebins[0:-1]), fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])] hdu = fits.BinTableHDU.from_columns( cols, self.make_header(), name=extname) return hdu
def make_energy_bounds_hdu(self, extname="EBOUNDS")
Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name
2.838362
2.956691
0.959979
if self._evals is None: return None cols = [fits.Column("ENERGY", "1E", unit='MeV', array=self._evals)] hdu = fits.BinTableHDU.from_columns( cols, self.make_header(), name=extname) return hdu
def make_energies_hdu(self, extname="ENERGIES")
Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name
3.93822
4.48019
0.87903
hdu_prim = fits.PrimaryHDU() hdu_hpx = self.make_hdu(data, extname=extname) hl = [hdu_prim, hdu_hpx] if self.conv.energy_hdu == 'EBOUNDS': hdu_energy = self.make_energy_bounds_hdu() elif self.conv.energy_hdu == 'ENERGIES': hdu_energy = self.make_energies_hdu() if hdu_energy is not None: hl.append(hdu_energy) hdulist = fits.HDUList(hl) hdulist.writeto(outfile, overwrite=clobber)
def write_fits(self, data, outfile, extname="SKYMAP", clobber=True)
Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files
2.487215
2.662815
0.934055
tokens = parse_hpxregion(region) if tokens[0] == 'DISK': vec = coords_to_vec(float(tokens[1]), float(tokens[2])) ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])), inclusive=False, nest=nest) elif tokens[0] == 'DISK_INC': vec = coords_to_vec(float(tokens[1]), float(tokens[2])) ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])), inclusive=True, fact=int(tokens[4]), nest=nest) elif tokens[0] == 'HPX_PIXEL': nside_pix = int(tokens[2]) if tokens[1] == 'NESTED': ipix_ring = hp.nest2ring(nside_pix, int(tokens[3])) elif tokens[1] == 'RING': ipix_ring = int(tokens[3]) else: raise Exception( "Did not recognize ordering scheme %s" % tokens[1]) ilist = match_hpx_pixel(nside, nest, nside_pix, ipix_ring) else: raise Exception( "HPX.get_index_list did not recognize region type %s" % tokens[0]) return ilist
def get_index_list(nside, nest, region)
Returns the list of pixels indices for all the pixels in a region nside : HEALPix nside parameter nest : True for 'NESTED', False = 'RING' region : HEALPix region string
2.653024
2.530991
1.048216
if region is None: if coordsys == "GAL": c = SkyCoord(0., 0., frame=Galactic, unit="deg") elif coordsys == "CEL": c = SkyCoord(0., 0., frame=ICRS, unit="deg") return c tokens = parse_hpxregion(region) if tokens[0] in ['DISK', 'DISK_INC']: if coordsys == "GAL": c = SkyCoord(float(tokens[1]), float( tokens[2]), frame=Galactic, unit="deg") elif coordsys == "CEL": c = SkyCoord(float(tokens[1]), float( tokens[2]), frame=ICRS, unit="deg") return c elif tokens[0] == 'HPX_PIXEL': nside_pix = int(tokens[2]) ipix_pix = int(tokens[3]) if tokens[1] == 'NESTED': nest_pix = True elif tokens[1] == 'RING': nest_pix = False else: raise Exception( "Did not recognize ordering scheme %s" % tokens[1]) theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix) lat = np.degrees((np.pi / 2) - theta) lon = np.degrees(phi) if coordsys == "GAL": c = SkyCoord(lon, lat, frame=Galactic, unit="deg") elif coordsys == "CEL": c = SkyCoord(lon, lat, frame=ICRS, unit="deg") return c else: raise Exception( "HPX.get_ref_dir did not recognize region type %s" % tokens[0]) return None
def get_ref_dir(region, coordsys)
Finds and returns the reference direction for a given HEALPix region string. region : a string describing a HEALPix region coordsys : coordinate system, GAL | CEL
2.167297
2.111082
1.026628
if region is None: return 180. tokens = parse_hpxregion(region) if tokens[0] in ['DISK', 'DISK_INC']: return float(tokens[3]) elif tokens[0] == 'HPX_PIXEL': pixel_size = get_pixel_size_from_nside(int(tokens[2])) return 2. * pixel_size else: raise Exception( "HPX.get_region_size did not recognize region type %s" % tokens[0]) return None
def get_region_size(region)
Finds and returns the approximate size of region (in degrees) from a HEALPix region string.
5.333633
3.932266
1.356377
w = WCS(naxis=naxis) skydir = self.get_ref_dir(self._region, self.coordsys) if self.coordsys == 'CEL': w.wcs.ctype[0] = 'RA---%s' % (proj) w.wcs.ctype[1] = 'DEC--%s' % (proj) w.wcs.crval[0] = skydir.ra.deg w.wcs.crval[1] = skydir.dec.deg elif self.coordsys == 'GAL': w.wcs.ctype[0] = 'GLON-%s' % (proj) w.wcs.ctype[1] = 'GLAT-%s' % (proj) w.wcs.crval[0] = skydir.galactic.l.deg w.wcs.crval[1] = skydir.galactic.b.deg else: raise Exception('Unrecognized coordinate system.') pixsize = get_pixel_size_from_nside(self.nside) roisize = self.get_region_size(self._region) allsky = False if roisize > 45: roisize = 90 allsky = True npixels = int(2. * roisize / pixsize) * oversample crpix = npixels / 2. if allsky: w.wcs.crpix[0] = 2 * crpix npix = (2 * npixels, npixels) else: w.wcs.crpix[0] = crpix npix = (npixels, npixels) w.wcs.crpix[1] = crpix w.wcs.cdelt[0] = -pixsize / oversample w.wcs.cdelt[1] = pixsize / oversample if naxis == 3: w.wcs.crpix[2] = 1 w.wcs.ctype[2] = 'Energy' if energies is not None: w.wcs.crval[2] = 10 ** energies[0] w.wcs.cdelt[2] = 10 ** energies[1] - 10 ** energies[0] w = WCS(w.to_header()) wcs_proj = WCSProj(w, npix) return wcs_proj
def make_wcs(self, naxis=2, proj='CAR', energies=None, oversample=2)
Make a WCS projection appropirate for this HPX pixelization
2.002733
1.985182
1.008841
if self._ipix is None: theta, phi = hp.pix2ang( self._nside, list(range(self._npix)), self._nest) else: theta, phi = hp.pix2ang(self._nside, self._ipix, self._nest) lat = np.degrees((np.pi / 2) - theta) lon = np.degrees(phi) return np.vstack([lon, lat]).T
def get_sky_coords(self)
Get the sky coordinates of all the pixels in this pixelization
3.011593
3.070489
0.980819
theta = np.radians(90. - lats) phi = np.radians(lons) return hp.ang2pix(self.nside, theta, phi, self.nest)
def get_pixel_indices(self, lats, lons)
"Return the indices in the flat array corresponding to a set of coordinates
3.215683
3.213001
1.000835
if self.coordsys in ['CEL', 'EQU']: skydir = skydir.transform_to('icrs') lon = skydir.ra.deg lat = skydir.dec.deg else: skydir = skydir.transform_to('galactic') lon = skydir.l.deg lat = skydir.b.deg return self.get_pixel_indices(lat, lon)
def skydir_to_pixel(self, skydir)
Return the pixel index of a SkyCoord object.
2.47321
2.356677
1.049448
from fermipy.skymap import Map hpx_header = self._hpx.make_header() index_map = Map(self.ipixs, self.wcs) mult_map = Map(self.mult_val, self.wcs) prim_hdu = index_map.create_primary_hdu() mult_hdu = index_map.create_image_hdu() for key in ['COORDSYS', 'ORDERING', 'PIXTYPE', 'ORDERING', 'ORDER', 'NSIDE', 'FIRSTPIX', 'LASTPIX']: prim_hdu.header[key] = hpx_header[key] mult_hdu.header[key] = hpx_header[key] hdulist = fits.HDUList([prim_hdu, mult_hdu]) hdulist.writeto(fitsfile, overwrite=clobber)
def write_to_fitsfile(self, fitsfile, clobber=True)
Write this mapping to a FITS file, to avoid having to recompute it
3.265838
3.287287
0.993475
from fermipy.skymap import Map index_map = Map.create_from_fits(fitsfile) mult_map = Map.create_from_fits(fitsfile, hdu=1) ff = fits.open(fitsfile) hpx = HPX.create_from_hdu(ff[0]) mapping_data = dict(ipixs=index_map.counts, mult_val=mult_map.counts, npix=mult_map.counts.shape) return cls(hpx, index_map.wcs, mapping_data)
def create_from_fitsfile(cls, fitsfile)
Read a fits file and use it to make a mapping
4.414606
4.578504
0.964203
# FIXME, there really ought to be a better way to do this hpx_naxis = len(hpx_data.shape) wcs_naxis = len(wcs_data.shape) if hpx_naxis + 1 != wcs_naxis: raise ValueError("HPX.fill_wcs_map_from_hpx_data: HPX naxis should be 1 less that WCS naxis: %i, %i"%(hpx_naxis, wcs_naxis)) if hpx_naxis == 2: if hpx_data.shape[1] != wcs_data.shape[2]: raise ValueError("HPX.fill_wcs_map_from_hpx_data: size of energy axes don't match: %i, %i"%(hpx_naxis[1], wcs_naxis[2])) lmap_valid = self._lmap[self._valid] wcs_layer_shape = wcs_data.shape[0]*wcs_data.shape[1] if hpx_naxis == 2: for i in range(hpx_data.shape[1]): wcs_data_layer = np.zeros(wcs_layer_shape) wcs_data_layer[self._valid] = hpx_data[:,i][lmap_valid] orig_value = wcs_data_layer.sum() if normalize: wcs_data_layer *= self._mult_val wcs_data[:,:,i].flat = wcs_data_layer else: wcs_data_flat = np.zeros(wcs_layer_shape) wcs_data_flat[self._valid] = hpx_data[lmap_valid] if normalize: wcs_data_flat *= self._mult_val wcs_data.flat = wcs_data_flat
def fill_wcs_map_from_hpx_data(self, hpx_data, wcs_data, normalize=True)
Fills the wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs_data : the data array being filled normalize : True -> perserve integral by splitting HEALPix values between bins
2.474333
2.513815
0.984294
wcs_data = np.zeros(wcs.npix) self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize) return wcs_data
def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True)
Creates and fills a wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs : the WCS object normalize : True -> perserve integral by splitting HEALPix values between bins
2.941787
3.314463
0.887561
config = yaml.safe_load(open(configfile)) emin = config['selection']['emin'] emax = config['selection']['emax'] log_emin = np.log10(emin) log_emax = np.log10(emax) ndec = log_emax - log_emin binsperdec = config['binning']['binsperdec'] nebins = int(np.round(binsperdec * ndec)) return nebins
def _get_enum_bins(configfile)
Get the number of energy bin in the SED Parameters ---------- configfile : str Fermipy configuration file. Returns ------- nbins : int The number of energy bins
2.57969
2.745755
0.93952
nfiles = len(filelist) shape = (nbins, nfiles) outdict = {} for c in collist: outdict[c['name']] = np.ndarray(shape) sys.stdout.write('Working on %i files: ' % nfiles) sys.stdout.flush() for i, f in enumerate(filelist): sys.stdout.write('.') sys.stdout.flush() tab = Table.read(f, hdu) for c in collist: cname = c['name'] outdict[cname][:, i] = tab[cname].data sys.stdout.write('!\n') outcols = [] for c in collist: cname = c['name'] if 'unit' in c: col = Column(data=outdict[cname], name=cname, dtype=np.float, shape=nfiles, unit=c['unit']) else: col = Column(data=outdict[cname], name=cname, dtype=np.float, shape=nfiles) outcols.append(col) tab = Table(data=outcols) return tab
def fill_output_table(filelist, hdu, collist, nbins)
Fill the arrays from the files in filelist Parameters ---------- filelist : list List of the files to get data from. hdu : str Name of the HDU containing the table with the input data. colllist : list List of the column names nbins : int Number of bins in the input data arrays Returns ------- table : astropy.table.Table A table with all the requested data extracted.
2.016013
2.177315
0.925917
nfiles = len(filelist) out_tables = [] out_names = [] for hdu in hdus: sys.stdout.write('Working on %i files for %s: ' % (nfiles, hdu)) sys.stdout.flush() tlist = [] for f in filelist: try: tab = Table.read(f, hdu) tlist.append(tab) sys.stdout.write('.') except KeyError: sys.stdout.write('x') sys.stdout.flush() sys.stdout.write('!\n') if tlist: out_table = vstack(tlist) out_tables.append(out_table) out_names.append(hdu) return (out_tables, out_names)
def vstack_tables(filelist, hdus)
vstack a set of HDUs from a set of files Parameters ---------- filelist : list List of the files to get data from. hdus : list Names of the HDU containing the table with the input data. Returns ------- out_tables : list A list with the table with all the requested data extracted. out_names : list A list with the names of the tables.
2.196976
2.298064
0.956012
mean = np.mean(data, axis=0) std = np.std(data, axis=0) median = np.median(data, axis=0) q02, q16, q84, q97 = np.percentile(data, [2.5, 16, 84, 97.5], axis=0) o = dict(mean=mean, std=std, median=median, q02=q02, q16=q16, q84=q84, q97=q97) return o
def collect_summary_stats(data)
Collect summary statisitics from an array This creates a dictionry of output arrays of summary statistics, with the input array dimension reducted by one. Parameters ---------- data : `numpy.ndarray` Array with the collected input data Returns ------- output : dict Dictionary of `np.ndarray` with the summary data. These include mean, std, median, and 4 quantiles (0.025, 0.16, 0.86, 0.975).
1.789443
1.668093
1.072748
for col in colnames: col_in = table_in[col] stats = collect_summary_stats(col_in.data) for k, v in stats.items(): out_name = "%s_%s" % (col, k) col_out = Column(data=np.vstack( [v]), name=out_name, dtype=col_in.dtype, shape=v.shape, unit=col_in.unit) table_out.add_column(col_out)
def add_summary_stats_to_table(table_in, table_out, colnames)
Collect summary statisitics from an input table and add them to an output table Parameters ---------- table_in : `astropy.table.Table` Table with the input data. table_out : `astropy.table.Table` Table with the output data. colnames : list List of the column names to get summary statistics for.
2.758394
2.716614
1.01538
del_cols = ['dnde', 'dnde_err', 'dnde_errp', 'dnde_errn', 'dnde_ul', 'e2dnde', 'e2dnde_err', 'e2dnde_errp', 'e2dnde_errn', 'e2dnde_ul', 'norm', 'norm_err', 'norm_errp', 'norm_errn', 'norm_ul', 'ts'] stats_cols = ['dnde', 'dnde_ul', 'e2dnde', 'e2dnde_ul', 'norm', 'norm_ul'] table_out = Table(sed_table[0]) table_out.remove_columns(del_cols) add_summary_stats_to_table(sed_table, table_out, stats_cols) return table_out
def summarize_sed_results(sed_table)
Build a stats summary table for a table that has all the SED results
2.235179
2.170545
1.029778
args = self._parser.parse_args(argv) sedfile = args.sed_file if is_not_null(args.config): configfile = os.path.join(os.path.dirname(sedfile), args.config) else: configfile = os.path.join(os.path.dirname(sedfile), 'config.yaml') nbins = _get_enum_bins(configfile) first = args.seed last = first + args.nsims flist = [sedfile.replace("_SEED.fits", "_%06i.fits" % seed) for seed in range(first, last)] outfile = args.outfile summaryfile = args.summaryfile outtable = fill_output_table( flist, "SED", CollectSED.collist, nbins=nbins) if is_not_null(outfile): outtable.write(outfile) if is_not_null(summaryfile): summary = summarize_sed_results(outtable) summary.write(summaryfile)
def run_analysis(self, argv)
Run this analysis
4.033673
4.05137
0.995632
job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile( args, require_sim_name=True) if targets_yaml is None: return job_configs write_full = args['write_full'] targets = load_yaml(targets_yaml) base_config = dict(config=args['config'], nsims=args['nsims'], seed=args['seed']) first = args['seed'] last = first + args['nsims'] - 1 for target_name, profile_list in targets.items(): for profile in profile_list: full_key = "%s:%s:%s" % (target_name, profile, sim) name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, profile=profile, fullpath=True) sed_file = NAME_FACTORY.sim_sedfile(**name_keys) outfile = sed_file.replace( '_SEED.fits', '_collected_%06i_%06i.fits' % (first, last)) logfile = make_nfs_path(outfile.replace('.fits', '.log')) if not write_full: outfile = None summaryfile = sed_file.replace( '_SEED.fits', '_summary_%06i_%06i.fits' % (first, last)) job_config = base_config.copy() job_config.update(dict(sed_file=sed_file, outfile=outfile, summaryfile=summaryfile, logfile=logfile)) job_configs[full_key] = job_config return job_configs
def build_job_configs(self, args)
Hook to build job configurations
3.558013
3.548303
1.002737
self.base_dict.update(**yaml.safe_load(open(yamlfile)))
def update_base_dict(self, yamlfile)
Update the values in baseline dictionary used to resolve names
4.415
4.50611
0.979781
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = format_string.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
def _format_from_dict(self, format_string, **kwargs)
Return a formatted file name dictionary components
4.146026
3.879538
1.068691
if 'seed' not in kwargs: kwargs['seed'] = 'SEED' return self._format_from_dict(NameFactory.sim_sedfile_format, **kwargs)
def sim_sedfile(self, **kwargs)
Return the name for the simulated SED file for a particular target
7.543884
7.478336
1.008765
kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) return NameFactory.stamp_format.format(**kwargs_copy)
def stamp(self, **kwargs)
Return the path for a stamp file for a scatter gather job
6.878784
5.910426
1.163839
sys.stderr.write('Target type must be specified') return (None, None) sim = args.get('sim') if is_null(sim): if require_sim_name: sys.stderr.write('Simulation scenario must be specified') return (None, None) else: sim = None name_keys = dict(target_type=ttype, targetlist='target_list.yaml', sim_name=sim, fullpath=True) if sim is None: targetfile = self.targetfile(**name_keys) else: targetfile = self.sim_targetfile(**name_keys) targets_override = args.get('targetfile') if is_not_null(targets_override): targetfile = targets_override return (targetfile, sim)
def resolve_targetfile(self, args, require_sim_name=False): # x ttype = args.get('ttype') if is_null(ttype)
Get the name of the targetfile based on the job arguments
3.391123
3.41327
0.993512
ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return None name_keys = dict(target_type=ttype, fullpath=True) randconfig = self.randconfig(**name_keys) rand_override = args.get('rand_config') if is_not_null(rand_override): randconfig = rand_override return randconfig
def resolve_randconfig(self, args)
Get the name of the specturm file based on the job arguments
4.601144
4.384315
1.049455
usage = "usage: %(prog)s [options] " description = "Run gtselect and gtmktime on one or more FT1 files. " "Note that gtmktime will be skipped if no FT2 file is provided." parser = argparse.ArgumentParser(usage=usage, description=description) add_lsf_args(parser) parser.add_argument('--zmax', default=100., type=float, help='') parser.add_argument('--dcostheta', default=0.025, type=float, help='') parser.add_argument('--binsz', default=1.0, type=float, help='') parser.add_argument('--outdir', default=None, type=str, help='Path to output directory used when merge=False.') parser.add_argument('--outfile', default=None, type=str, help='Path to output file used when merge=True.') parser.add_argument('--scfile', default=None, type=str, help='', required=True) parser.add_argument('--dry_run', default=False, action='store_true') parser.add_argument('--overwrite', default=False, action='store_true') parser.add_argument('--merge', default=False, action='store_true', help='Merge input FT1 files into a single file.') parser.add_argument('files', nargs='+', default=None, help='List of directories in which the analysis will ' 'be run.') args = parser.parse_args() args.outdir = os.path.abspath(args.outdir) args.scfile = os.path.abspath(args.scfile) mkdir(args.outdir) input_files = [[os.path.abspath(x)] for x in args.files] output_files = [os.path.join(args.outdir, os.path.basename(x)) for x in args.files] if args.batch: opts = copy.deepcopy(args.__dict__) opts.pop('files') opts.pop('batch') submit_jobs('python ' + os.path.abspath(__file__.rstrip('cd')), input_files, output_files, {k: v for k, v in opts.items()}) sys.exit(0) logger = Logger.get(os.path.basename(__file__), None, logging.INFO) logger.info('Starting.') cwd = os.getcwd() user = os.environ['USER'] tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch') os.chdir(tmpdir) logger.info('tmpdir %s', tmpdir) logger.info('outdir %s', args.outdir) logger.info('outfile %s', args.outfile) for infiles, outfile in zip(input_files, output_files): logger.info('infiles %s', pprint.pformat(infiles)) logger.info('outfile %s', outfile) kw = dict(evfile='list.txt', scfile=args.scfile, outfile='ltcube.fits', binsz=args.binsz, dcostheta=args.dcostheta, zmax=args.zmax) create_filelist(infiles, 'list.txt') staged_outfile = kw['outfile'] run_gtapp('gtltcube', logger, kw) logger.info('cp %s %s', staged_outfile, outfile) shutil.copy(staged_outfile, outfile) os.chdir(cwd) logger.info('Deleting %s', tmpdir) shutil.rmtree(tmpdir) logger.info('Done.')
def main()
Note that gtmktime will be skipped if no FT2 file is provided.
2.800202
2.667516
1.049741
# Update Column names for colname in list(tab.columns.keys()): newname = colname.lower() newname = newname.replace('dfde', 'dnde') if tab.columns[colname].name == newname: continue tab.columns[colname].name = newname return tab
def convert_sed_cols(tab)
Cast SED column names to lowercase.
4.831482
4.733324
1.020738
from scipy.interpolate import splev return splev(x, self._sp, der=der)
def derivative(self, x, der=1)
return the derivative a an array of input values x : the inputs der : the order of derivative
6.397968
5.979826
1.069925
min_y = np.min(self._interp.y) if self._interp.y[0] == min_y: self._mle = self._interp.x[0] elif self._interp.y[-1] == min_y: self._mle = self._interp.x[-1] else: argmin_y = np.argmin(self._interp.y) ix0 = max(argmin_y - 4, 0) ix1 = min(argmin_y + 4, len(self._interp.x) - 1) while np.sign(self._interp.derivative(self._interp.x[ix0])) == \ np.sign(self._interp.derivative(self._interp.x[ix1])): ix0 += 1 self._mle = scipy.optimize.brentq(self._interp.derivative, self._interp.x[ix0], self._interp.x[ix1], xtol=1e-10 * np.median(self._interp.x))
def _compute_mle(self)
Compute the maximum likelihood estimate. Calls `scipy.optimize.brentq` to find the roots of the derivative.
2.063796
1.962
1.051884
mle_val = self.mle() # A little bit of paranoia to avoid zeros if mle_val <= 0.: mle_val = self._interp.xmin if mle_val <= 0.: mle_val = self._interp.x[1] log_mle = np.log10(mle_val) lnl_max = self.fn_mle() # This ultra-safe code to find an absolute maximum # fmax = self.fn_mle() # m = (fmax-self.interp.y > 0.1+dlnl) & (self.interp.x>self._mle) # if sum(m) == 0: # xmax = self.interp.x[-1]*10 # else: # xmax = self.interp.x[m][0] # Matt has found that it is faster to use an interpolator # than an actual root-finder to find the root, # probably b/c of python overhead. # That would be something like this: # rf = lambda x: self._interp(x)+dlnl-lnl_max # return opt.brentq(rf,self._mle,self._interp.xmax, # xtol=1e-10*np.abs(self._mle)) if upper: x = np.logspace(log_mle, np.log10(self._interp.xmax), 100) retVal = np.interp(dlnl, self.interp(x) - lnl_max, x) else: x = np.linspace(self._interp.xmin, self._mle, 100) retVal = np.interp(dlnl, self.interp(x)[::-1] - lnl_max, x[::-1]) return retVal
def getDeltaLogLike(self, dlnl, upper=True)
Find the point at which the log-likelihood changes by a given value with respect to its value at the MLE.
4.234731
4.129528
1.025476
dlnl = onesided_cl_to_dlnl(1.0 - alpha) return self.getDeltaLogLike(dlnl, upper=upper)
def getLimit(self, alpha, upper=True)
Evaluate the limits corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : limit confidence level. upper : upper or lower limits.
12.17295
14.491697
0.839995
dlnl = twosided_cl_to_dlnl(1.0 - alpha) lo_lim = self.getDeltaLogLike(dlnl, upper=False) hi_lim = self.getDeltaLogLike(dlnl, upper=True) return (lo_lim, hi_lim)
def getInterval(self, alpha)
Evaluate the interval corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : limit confidence level.
5.411427
5.611829
0.964289
convert_sed_cols(tab_e) try: emin = np.array(tab_e['e_min'].to(u.MeV)) emax = np.array(tab_e['e_max'].to(u.MeV)) except: emin = np.array(tab_e['e_min']) emax = np.array(tab_e['e_max']) ne = len(emin) try: ref_dnde = np.array(tab_e['ref_dnde']) except: ref_dnde = np.ones((ne)) try: ref_flux = np.array(tab_e['ref_flux']) except: ref_flux = np.ones((ne)) try: ref_eflux = np.array(tab_e['ref_eflux']) except: ref_eflux = np.ones((ne)) try: ref_npred = np.array(tab_e['ref_npred']) except: ref_npred = np.ones((ne)) return cls(emin, emax, ref_dnde, ref_flux, ref_eflux, ref_npred)
def create_from_table(cls, tab_e)
Parameters ---------- tab_e : `~astropy.table.Table` EBOUNDS table.
1.855549
1.835969
1.010665
cols = [ Column(name="E_MIN", dtype=float, data=self._emin, unit='MeV'), Column(name="E_MAX", dtype=float, data=self._emax, unit='MeV'), Column(name="E_REF", dtype=float, data=self._eref, unit='MeV'), Column(name="REF_DNDE", dtype=float, data=self._ref_dnde, unit='ph / (MeV cm2 s)'), Column(name="REF_FLUX", dtype=float, data=self._ref_flux, unit='ph / (cm2 s)'), Column(name="REF_EFLUX", dtype=float, data=self._ref_eflux, unit='MeV / (cm2 s)'), Column(name="REF_NPRED", dtype=float, data=self._ref_npred, unit='ph') ] tab = Table(data=cols) return tab
def build_ebound_table(self)
Build and return an EBOUNDS table with the encapsulated data.
2.146639
2.090705
1.026754
if len(x.shape) == 1: der_val = np.zeros((1)) else: der_val = np.zeros((x.shape[1:])) for i, xv in enumerate(x): der_val += self._loglikes[i].interp.derivative(xv, der=der) return der_val
def derivative(self, x, der=1)
Return the derivate of the log-like summed over the energy bins Parameters ---------- x : `~numpy.ndarray` Array of N x M values der : int Order of the derivate Returns ------- der_val : `~numpy.ndarray` Array of negative log-likelihood values.
3.956387
3.582422
1.104389
mle_vals = np.ndarray((self._nx)) for i in range(self._nx): mle_vals[i] = self._loglikes[i].mle() return mle_vals
def mles(self)
return the maximum likelihood estimates for each of the energy bins
3.92603
3.584162
1.095383
ts_vals = np.ndarray((self._nx)) for i in range(self._nx): ts_vals[i] = self._loglikes[i].TS() return ts_vals
def ts_vals(self)
returns test statistic values for each energy bin
5.372554
4.294322
1.251083
chi2_vals = np.ndarray((self._nx)) for i in range(self._nx): mle = self._loglikes[i].mle() nll0 = self._loglikes[i].interp(mle) nll1 = self._loglikes[i].interp(x[i]) chi2_vals[i] = 2.0 * np.abs(nll0 - nll1) return chi2_vals
def chi2_vals(self, x)
Compute the difference in the log-likelihood between the MLE in each energy bin and the normalization predicted by a global best-fit model. This array can be summed to get a goodness-of-fit chi2 for the model. Parameters ---------- x : `~numpy.ndarray` An array of normalizations derived from a global fit to all energy bins. Returns ------- chi2_vals : `~numpy.ndarray` An array of chi2 values for each energy bin.
3.378073
3.628624
0.930952
limit_vals = np.ndarray((self._nx)) for i in range(self._nx): limit_vals[i] = self._loglikes[i].getLimit(alpha, upper) return limit_vals
def getLimits(self, alpha, upper=True)
Evaluate the limits corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : float limit confidence level. upper : bool upper or lower limits. returns an array of values, one for each energy bin
4.580461
5.249269
0.87259
limit_vals_lo = np.ndarray((self._nx)) limit_vals_hi = np.ndarray((self._nx)) for i in range(self._nx): lo_lim, hi_lim = self._loglikes[i].getInterval(alpha) limit_vals_lo[i] = lo_lim limit_vals_hi[i] = hi_lim return limit_vals_lo, limit_vals_hi
def getIntervals(self, alpha)
Evaluate the two-sided intervals corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : float limit confidence level. Returns ------- limit_vals_hi : `~numpy.ndarray` An array of lower limit values. limit_vals_lo : `~numpy.ndarray` An array of upper limit values.
2.920476
2.768911
1.054738
from scipy.optimize import brentq def fDeriv(x): return self.norm_derivative(specVals, x) try: result = brentq(fDeriv, xlims[0], xlims[1]) except: check_underflow = self.__call__(specVals * xlims[0]) < \ self.__call__(specVals * xlims[1]) if check_underflow.any(): return xlims[0] else: return xlims[1] return result
def fitNormalization(self, specVals, xlims)
Fit the normalization given a set of spectral values that define a spectral shape This version is faster, and solves for the root of the derivatvie Parameters ---------- specVals : an array of (nebin values that define a spectral shape xlims : fit limits returns the best-fit normalization value
3.579072
3.626995
0.986787
from scipy.optimize import fmin def fToMin(x): return self.__call__(specVals * x) result = fmin(fToMin, 0., disp=False, xtol=1e-6) return result
def fitNorm_v2(self, specVals)
Fit the normalization given a set of spectral values that define a spectral shape. This version uses `scipy.optimize.fmin`. Parameters ---------- specVals : an array of (nebin values that define a spectral shape xlims : fit limits Returns ------- norm : float Best-fit normalization value
4.855884
5.358172
0.906258
if not isinstance(specFunc, SEDFunctor): specFunc = self.create_functor(specFunc, initPars, scale=specFunc.scale) if freePars is None: freePars = np.empty(len(initPars), dtype=bool) freePars.fill(True) initPars = np.array(initPars) freePars = np.array(freePars) def fToMin(x): xp = np.array(specFunc.params) xp[freePars] = x return self.__call__(specFunc(xp)) result = fmin(fToMin, initPars[freePars], disp=False, xtol=1e-6) out_pars = specFunc.params out_pars[freePars] = np.array(result) spec_vals = specFunc(out_pars) spec_npred = np.zeros(len(spec_vals)) if isinstance(specFunc, spectrum.SEDFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_flux elif isinstance(specFunc, spectrum.SEDEFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_eflux ts_spec = self.TS_spectrum(spec_vals) chi2_vals = self.chi2_vals(spec_vals) chi2_spec = np.sum(chi2_vals) pval_spec = stats.distributions.chi2.sf(chi2_spec, len(spec_vals)) return dict(params=out_pars, spec_vals=spec_vals, spec_npred=spec_npred, ts_spec=ts_spec, chi2_spec=chi2_spec, chi2_vals=chi2_vals, pval_spec=pval_spec)
def fit_spectrum(self, specFunc, initPars, freePars=None)
Fit for the free parameters of a spectral function Parameters ---------- specFunc : `~fermipy.spectrum.SpectralFunction` The Spectral Function initPars : `~numpy.ndarray` The initial values of the parameters freePars : `~numpy.ndarray` Boolean array indicating which parameters should be free in the fit. Returns ------- params : `~numpy.ndarray` Best-fit parameters. spec_vals : `~numpy.ndarray` The values of the best-fit spectral model in each energy bin. ts_spec : float The TS of the best-fit spectrum chi2_vals : `~numpy.ndarray` Array of chi-squared values for each energy bin. chi2_spec : float Global chi-squared value for the sum of all energy bins. pval_spec : float p-value of chi-squared for the best-fit spectrum.
2.713484
2.412549
1.124737
shape = self._norm_vals.shape col_norm = Column(name="norm", dtype=float) col_normv = Column(name="norm_scan", dtype=float, shape=shape) col_dll = Column(name="dloglike_scan", dtype=float, shape=shape) tab = Table(data=[col_norm, col_normv, col_dll]) tab.add_row({"norm": 1., "norm_scan": self._norm_vals, "dloglike_scan": -1 * self._nll_vals}) return tab
def build_scandata_table(self)
Build an `astropy.table.Table` object from these data.
4.001551
3.64466
1.097922
n_bins = shape[0] n_vals = shape[1] if weights is None: weights = np.ones((len(components))) norm_vals = np.zeros(shape) nll_vals = np.zeros(shape) nll_offsets = np.zeros((n_bins)) for i in range(n_bins): log_min = np.log10(ylims[0]) log_max = np.log10(ylims[1]) norm_vals[i, 1:] = np.logspace(log_min, log_max, n_vals - 1) for c, w in zip(components, weights): nll_vals[i] += w * c[i].interp(norm_vals[i]) - c.nll_offsets[i] # Reset the offsets nll_obj = LnLFn(norm_vals[i], nll_vals[i]) ll_offset = nll_obj.fn_mle() nll_vals[i] -= ll_offset nll_offsets[i] = -ll_offset pass return norm_vals, nll_vals, nll_offsets
def stack_nll(shape, components, ylims, weights=None)
Combine the log-likelihoods from a number of components. Parameters ---------- shape : tuple The shape of the return array components : `~fermipy.castro.CastroData_Base` The components to be stacked weights : array-like Returns ------- norm_vals : `numpy.ndarray` N X M array of Normalization values nll_vals : `numpy.ndarray` N X M array of log-likelihood values nll_offsets : `numpy.ndarray` N array of maximum log-likelihood values in each bin
2.822684
2.723472
1.036429
data = load_yaml(yamlfile) nebins = len(data) emin = np.array([data[i]['emin'] for i in range(nebins)]) emax = np.array([data[i]['emax'] for i in range(nebins)]) ref_flux = np.array([data[i]['flux'][1] for i in range(nebins)]) ref_eflux = np.array([data[i]['eflux'][1] for i in range(nebins)]) conv = np.array([data[i]['eflux2npred'] for i in range(nebins)]) ref_npred = conv*ref_eflux ones = np.ones(ref_flux.shape) ref_spec = ReferenceSpec(emin, emax, ones, ref_flux, ref_eflux, ref_npred) norm_data = np.array([data[i]['eflux'] for i in range(nebins)]) ll_data = np.array([data[i]['logLike'] for i in range(nebins)]) max_ll = ll_data.max(1) nll_data = (max_ll - ll_data.T).T return cls(norm_data, nll_data, ref_spec, 'eflux')
def create_from_yamlfile(cls, yamlfile)
Create a Castro data object from a yaml file contains the likelihood data.
2.621757
2.570505
1.019938
tab = Table.read(txtfile, format='ascii.ecsv') dnde_unit = u.ph / (u.MeV * u.cm ** 2 * u.s) loge = np.log10(np.array(tab['e_ref'].to(u.MeV))) norm = np.array(tab['norm'].to(dnde_unit)) norm_errp = np.array(tab['norm_errp'].to(dnde_unit)) norm_errn = np.array(tab['norm_errn'].to(dnde_unit)) norm_err = 0.5 * (norm_errp + norm_errn) dloge = loge[1:] - loge[:-1] dloge = np.insert(dloge, 0, dloge[0]) emin = 10 ** (loge - dloge * 0.5) emax = 10 ** (loge + dloge * 0.5) ectr = 10 ** loge deltae = emax - emin flux = norm * deltae eflux = norm * deltae * ectr ones = np.ones(flux.shape) ref_spec = ReferenceSpec(emin, emax, ones, ones, ones, ones) spec_data = SpecData(ref_spec, norm, flux, eflux, norm_err) stephi = np.linspace(0, 1, 11) steplo = -np.linspace(0, 1, 11)[1:][::-1] loscale = 3 * norm_err hiscale = 3 * norm_err loscale[loscale > norm] = norm[loscale > norm] norm_vals_hi = norm[:, np.newaxis] + \ stephi[np.newaxis, :] * hiscale[:, np.newaxis] norm_vals_lo = norm[:, np.newaxis] + \ steplo[np.newaxis, :] * loscale[:, np.newaxis] norm_vals = np.hstack((norm_vals_lo, norm_vals_hi)) nll_vals = 0.5 * \ (norm_vals - norm[:, np.newaxis]) ** 2 / \ norm_err[:, np.newaxis] ** 2 norm_vals *= flux[:, np.newaxis] / norm[:, np.newaxis] return cls(norm_vals, nll_vals, spec_data, 'flux')
def create_from_flux_points(cls, txtfile)
Create a Castro data object from a text file containing a sequence of differential flux points.
2.651246
2.634306
1.00643
if norm_type in ['flux', 'eflux', 'dnde']: norm_vals = np.array(tab_s['norm_scan'] * tab_e['ref_%s' % norm_type][:, np.newaxis]) elif norm_type == "norm": norm_vals = np.array(tab_s['norm_scan']) else: raise Exception('Unrecognized normalization type: %s' % norm_type) nll_vals = -np.array(tab_s['dloglike_scan']) rs = ReferenceSpec.create_from_table(tab_e) return cls(norm_vals, nll_vals, rs, norm_type)
def create_from_tables(cls, norm_type='eflux', tab_s="SCANDATA", tab_e="EBOUNDS")
Create a CastroData object from two tables Parameters ---------- norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) tab_s : str table scan data tab_e : str table energy binning and normalization data Returns ------- castro : `~fermipy.castro.CastroData`
3.626628
3.649611
0.993703
if irow is not None: tab_s = Table.read(fitsfile, hdu=hdu_scan)[irow] else: tab_s = Table.read(fitsfile, hdu=hdu_scan) tab_e = Table.read(fitsfile, hdu=hdu_energies) tab_s = convert_sed_cols(tab_s) tab_e = convert_sed_cols(tab_e) return cls.create_from_tables(norm_type, tab_s, tab_e)
def create_from_fits(cls, fitsfile, norm_type='eflux', hdu_scan="SCANDATA", hdu_energies="EBOUNDS", irow=None)
Create a CastroData object from a tscube FITS file. Parameters ---------- fitsfile : str Name of the fits file norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) hdu_scan : str Name of the FITS HDU with the scan data hdu_energies : str Name of the FITS HDU with the energy binning and normalization data irow : int or None If none, then this assumes that there is a single row in the scan data table Otherwise, this specifies which row of the table to use Returns ------- castro : `~fermipy.castro.CastroData`
2.086322
2.314486
0.901419
tab_s = Table.read(fitsfile, hdu=1) tab_s = convert_sed_cols(tab_s) if norm_type in ['flux', 'eflux', 'dnde']: ref_colname = 'ref_%s' % norm_type norm_vals = np.array(tab_s['norm_scan'] * tab_s[ref_colname][:, np.newaxis]) elif norm_type == "norm": norm_vals = np.array(tab_s['norm_scan']) else: raise ValueError('Unrecognized normalization type: %s' % norm_type) nll_vals = -np.array(tab_s['dloglike_scan']) ref_spec = ReferenceSpec.create_from_table(tab_s) spec_data = SpecData(ref_spec, tab_s['norm'], tab_s['norm_err']) return cls(norm_vals, nll_vals, spec_data, norm_type)
def create_from_sedfile(cls, fitsfile, norm_type='eflux')
Create a CastroData object from an SED fits file Parameters ---------- fitsfile : str Name of the fits file norm_type : str Type of normalization to use, options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) Returns ------- castro : `~fermipy.castro.CastroData`
3.34353
3.437164
0.972758
if len(components) == 0: return None norm_vals, nll_vals, nll_offsets = CastroData_Base.stack_nll(shape, components, ylims, weights) return cls(norm_vals, nll_vals, components[0].refSpec, components[0].norm_type)
def create_from_stack(cls, shape, components, ylims, weights=None)
Combine the log-likelihoods from a number of components. Parameters ---------- shape : tuple The shape of the return array components : [~fermipy.castro.CastroData_Base] The components to be stacked weights : array-like Returns ------- castro : `~fermipy.castro.CastroData`
7.457897
6.68621
1.115415
sfn = self.create_functor(specType, scale)[0] return self.__call__(sfn(params))
def spectrum_loglike(self, specType, params, scale=1E3)
return the log-likelihood for a particular spectrum Parameters ---------- specTypes : str The type of spectrum to try params : array-like The spectral parameters scale : float The energy scale or 'pivot' energy
12.533042
18.329472
0.683764
emin = self._refSpec.emin emax = self._refSpec.emax fn = SpectralFunction.create_functor(specType, self.norm_type, emin, emax, scale=scale) if initPars is None: if specType == 'PowerLaw': initPars = np.array([5e-13, -2.0]) elif specType == 'LogParabola': initPars = np.array([5e-13, -2.0, 0.0]) elif specType == 'PLExpCutoff': initPars = np.array([5e-13, -1.0, 1E4]) fn.params = initPars return fn
def create_functor(self, specType, initPars=None, scale=1E3)
Create a functor object that computes normalizations in a sequence of energy bins for a given spectral model. Parameters ---------- specType : str The type of spectrum to use. This can be a string corresponding to the spectral model class name or a `~fermipy.spectrum.SpectralFunction` object. initPars : `~numpy.ndarray` Arrays of parameter values with which the spectral function will be initialized. scale : float The 'pivot energy' or energy scale to use for the spectrum Returns ------- fn : `~fermipy.spectrum.SEDFunctor` A functor object.
2.862354
2.72143
1.051783
tsmap = WcsNDMap.read(fitsfile) tab_e = Table.read(fitsfile, 'EBOUNDS') tab_s = Table.read(fitsfile, 'SCANDATA') tab_f = Table.read(fitsfile, 'FITDATA') tab_e = convert_sed_cols(tab_e) tab_s = convert_sed_cols(tab_s) tab_f = convert_sed_cols(tab_f) emin = np.array(tab_e['e_min']) emax = np.array(tab_e['e_max']) try: if str(tab_e['e_min'].unit) == 'keV': emin /= 1000. except: pass try: if str(tab_e['e_max'].unit) == 'keV': emax /= 1000. except: pass nebins = len(tab_e) npred = tab_e['ref_npred'] ndim = len(tsmap.data.shape) if ndim == 2: cube_shape = (tsmap.data.shape[0], tsmap.data.shape[1], nebins) elif ndim == 1: cube_shape = (tsmap.data.shape[0], nebins) else: raise RuntimeError("Counts map has dimension %i" % (ndim)) refSpec = ReferenceSpec.create_from_table(tab_e) nll_vals = -np.array(tab_s["dloglike_scan"]) norm_vals = np.array(tab_s["norm_scan"]) axis = MapAxis.from_edges(np.concatenate((emin, emax[-1:])), interp='log') geom_3d = tsmap.geom.to_cube([axis]) tscube = WcsNDMap(geom_3d, np.rollaxis(tab_s["ts"].reshape(cube_shape), 2, 0)) ncube = WcsNDMap(geom_3d, np.rollaxis(tab_s["norm"].reshape(cube_shape), 2, 0)) nmap = WcsNDMap(tsmap.geom, tab_f['fit_norm'].reshape(tsmap.data.shape)) ref_colname = 'ref_%s' % norm_type norm_vals *= tab_e[ref_colname][np.newaxis, :, np.newaxis] return cls(tsmap, nmap, tscube, ncube, norm_vals, nll_vals, refSpec, norm_type)
def create_from_fits(cls, fitsfile, norm_type='flux')
Build a TSCube object from a fits file created by gttscube Parameters ---------- fitsfile : str Path to the tscube FITS file. norm_type : str String specifying the quantity used for the normalization
2.884163
2.959054
0.974691
# pix = utils.skydir_to_pix if colwise: ipix = self._tsmap.ipix_swap_axes(ipix, colwise) norm_d = self._norm_vals[ipix] nll_d = self._nll_vals[ipix] return CastroData(norm_d, nll_d, self._refSpec, self._norm_type)
def castroData_from_ipix(self, ipix, colwise=False)
Build a CastroData object for a particular pixel
7.881957
7.486078
1.052882
ipix = self._tsmap.xy_pix_to_ipix(xy, colwise) return self.castroData_from_ipix(ipix)
def castroData_from_pix_xy(self, xy, colwise=False)
Build a CastroData object for a particular pixel
5.469325
5.47583
0.998812
if use_cumul: theMap = self._ts_cumul else: theMap = self._tsmap peaks = find_peaks(theMap, threshold, min_separation) for peak in peaks: o, skydir = fit_error_ellipse(theMap, (peak['ix'], peak['iy']), dpix=2) peak['fit_loc'] = o peak['fit_skydir'] = skydir if o['fit_success']: skydir = peak['fit_skydir'] else: skydir = peak['skydir'] return peaks
def find_and_refine_peaks(self, threshold, min_separation=1.0, use_cumul=False)
Run a simple peak-finding algorithm, and fit the peaks to paraboloids to extract their positions and error ellipses. Parameters ---------- threshold : float Peak threshold in TS. min_separation : float Radius of region size in degrees. Sets the minimum allowable separation between peaks. use_cumul : bool If true, used the cumulative TS map (i.e., the TS summed over the energy bins) instead of the TS Map from the fit to and index=2 powerlaw. Returns ------- peaks : list List of dictionaries containing the location and amplitude of each peak. Output of `~fermipy.sourcefind.find_peaks`
5.124745
4.46765
1.147078
lats = np.degrees(np.arcsin(cvects[2])) lons = np.degrees(np.arctan2(cvects[0], cvects[1])) return np.hstack([lats, lons])
def make_lat_lons(cvects)
Convert from directional cosines to latitidue and longitude Parameters ---------- cvects : directional cosine (i.e., x,y,z component) values returns (np.ndarray(2,nsrc)) with the directional cosine (i.e., x,y,z component) values
1.95396
2.248373
0.869055
lon_rad = np.radians(lon_vect) lat_rad = np.radians(lat_vect) cvals = np.cos(lat_rad) xvals = cvals * np.sin(lon_rad) yvals = cvals * np.cos(lon_rad) zvals = np.sin(lat_rad) cvects = np.vstack([xvals, yvals, zvals]) return cvects
def make_cos_vects(lon_vect, lat_vect)
Convert from longitude (RA or GLON) and latitude (DEC or GLAT) values to directional cosines Parameters ---------- lon_vect,lat_vect : np.ndarray(nsrc) Input values returns (np.ndarray(3,nsrc)) with the directional cosine (i.e., x,y,z component) values
1.926178
1.958291
0.983601
dist_rad = np.radians(cut_dist) cos_t_cut = np.cos(dist_rad) nsrc = cos_vects.shape[1] match_dict = {} for i, v1 in enumerate(cos_vects.T): cos_t_vect = (v1 * cos_vects.T).sum(1) cos_t_vect[cos_t_vect < -1.0] = -1.0 cos_t_vect[cos_t_vect > 1.0] = 1.0 mask = cos_t_vect > cos_t_cut acos_t_vect = np.ndarray(nsrc) # The 1e-6 is here b/c we use 0.0 for sources that failed the cut elsewhere. # We should maybe do this better, but it works for now. acos_t_vect[mask] = np.degrees(np.arccos(cos_t_vect[mask])) + 1e-6 for j in np.where(mask[:i])[0]: match_dict[(j, i)] = acos_t_vect[j] return match_dict
def find_matches_by_distance(cos_vects, cut_dist)
Find all the pairs of sources within a given distance of each other. Parameters ---------- cos_vects : np.ndarray(e,nsrc) Directional cosines (i.e., x,y,z component) values of all the sources cut_dist : float Angular cut in degrees that will be used to select pairs by their separation. Returns ------- match_dict : dict((int,int):float). Each entry gives a pair of source indices, and the corresponding distance
3.059635
2.935464
1.0423
match_dict = {} sig_2_vect = unc_vect * unc_vect for i, v1 in enumerate(cos_vects.T): cos_t_vect = (v1 * cos_vects.T).sum(1) cos_t_vect[cos_t_vect < -1.0] = -1.0 cos_t_vect[cos_t_vect > 1.0] = 1.0 sig_2_i = sig_2_vect[i] acos_t_vect = np.degrees(np.arccos(cos_t_vect)) total_unc = np.sqrt(sig_2_i + sig_2_vect) sigma_vect = acos_t_vect / total_unc mask = sigma_vect < cut_sigma for j in np.where(mask[:i])[0]: match_dict[(j, i)] = sigma_vect[j] return match_dict
def find_matches_by_sigma(cos_vects, unc_vect, cut_sigma)
Find all the pairs of sources within a given distance of each other. Parameters ---------- cos_vects : np.ndarray(3,nsrc) Directional cosines (i.e., x,y,z component) values of all the sources unc_vect : np.ndarray(nsrc) Uncertainties on the source positions cut_sigma : float Angular cut in positional errors standard deviations that will be used to select pairs by their separation. Returns ------- match_dict : dict((int,int):float) Each entry gives a pair of source indices, and the corresponding sigma
2.592814
2.784271
0.931236
e_matrix = np.zeros((nsrcs, nsrcs)) for k, v in match_dict.items(): e_matrix[k[0], k[1]] = v return e_matrix
def fill_edge_matrix(nsrcs, match_dict)
Create and fill a matrix with the graph 'edges' between sources. Parameters ---------- nsrcs : int number of sources (used to allocate the size of the matrix) match_dict : dict((int,int):float) Each entry gives a pair of source indices, and the corresponding measure (either distance or sigma) Returns ------- e_matrix : `~numpy.ndarray` numpy.ndarray((nsrcs,nsrcs)) filled with zeros except for the matches, which are filled with the edge measures (either distances or sigmas)
1.954373
2.13327
0.916139
rev_dict = {} for k, v in cdict.items(): if k in rev_dict: rev_dict[k][k] = True else: rev_dict[k] = {k: True} for vv in v.keys(): if vv in rev_dict: rev_dict[vv][k] = True else: rev_dict[vv] = {k: True} return rev_dict
def make_rev_dict_unique(cdict)
Make a reverse dictionary Parameters ---------- in_dict : dict(int:dict(int:True)) A dictionary of clusters. Each cluster is a source index and the dictionary of other sources in the cluster. Returns ------- rev_dict : dict(int:dict(int:True)) A dictionary pointing from source index to the clusters it is included in.
1.737533
1.800366
0.9651
iv0, iv1 = span_tree.nonzero() # This is the dictionary of all the pairings for each source match_dict = {} for i0, i1 in zip(iv0, iv1): d = span_tree[i0, i1] # Cut on the link distance if d > cut_value: continue imin = int(min(i0, i1)) imax = int(max(i0, i1)) if imin in match_dict: match_dict[imin][imax] = True else: match_dict[imin] = {imax: True} working = True while working: working = False rev_dict = make_rev_dict_unique(match_dict) k_sort = rev_dict.keys() k_sort.sort() for k in k_sort: v = rev_dict[k] # Multiple mappings if len(v) > 1: working = True v_sort = v.keys() v_sort.sort() cluster_idx = v_sort[0] for vv in v_sort[1:]: try: to_merge = match_dict.pop(vv) except: continue try: match_dict[cluster_idx].update(to_merge) match_dict[cluster_idx][vv] = True except: continue # remove self references try: match_dict[cluster_idx].pop(cluster_idx) except: pass # Convert to a int:list dictionary cdict = {} for k, v in match_dict.items(): cdict[k] = v.keys() # make the reverse dictionary rdict = make_reverse_dict(cdict) return cdict, rdict
def make_clusters(span_tree, cut_value)
Find clusters from the spanning tree Parameters ---------- span_tree : a sparse nsrcs x nsrcs array Filled with zeros except for the active edges, which are filled with the edge measures (either distances or sigmas cut_value : float Value used to cluster group. All links with measures above this calue will be cut. returns dict(int:[int,...]) A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
2.982648
2.902023
1.027782
best_idx = idx_key best_measure = measure_vect[idx_key] out_list = [idx_key] + idx_list for idx, measure in zip(idx_list, measure_vect[idx_list]): if measure < best_measure: best_idx = idx best_measure = measure out_list.remove(best_idx) return best_idx, out_list
def select_from_cluster(idx_key, idx_list, measure_vect)
Select a single source from a cluster and make it the new cluster key Parameters ---------- idx_key : int index of the current key for a cluster idx_list : [int,...] list of the other source indices in the cluster measure_vect : np.narray((nsrc),float) vector of the measure used to select the best source in the cluster returns best_idx:out_list where best_idx is the index of the best source in the cluster and out_list is the list of all the other indices
2.234154
2.067178
1.080775
if weights is None: weighted = cvects.T[idx_list].sum(0) sum_weights = float(len(idx_list)) else: weighted = ((cvects * weights).T[idx_list]).sum(0) sum_weights = weights.sum(0) weighted /= sum_weights # make sure it is normalized norm = np.sqrt((weighted * weighted).sum()) weighted /= norm return weighted
def find_centroid(cvects, idx_list, weights=None)
Find the centroid for a set of vectors Parameters ---------- cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values idx_list : [int,...] list of the source indices in the cluster weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting returns (np.ndarray(3)) with the directional cosine (i.e., x,y,z component) values of the centroid
3.049527
3.091002
0.986582
ret_val = np.zeros((n_src), int) for i in range(n_src): try: key = rev_dict[i] except KeyError: key = i try: n = len(cdict[key]) except: n = 0 ret_val[i] = n return ret_val
def count_sources_in_cluster(n_src, cdict, rev_dict)
Make a vector of sources in each cluster Parameters ---------- n_src : number of sources cdict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. rev_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself. Returns ---------- `np.ndarray((n_src),int)' with the number of in the cluster a given source belongs to.
2.390785
2.293396
1.042465
centroid = find_centroid(cvects, idx_list, weights) dist_vals = np.degrees(np.arccos((centroid * cvects.T[idx_list]).sum(1))) return dist_vals, centroid
def find_dist_to_centroid(cvects, idx_list, weights=None)
Find the centroid for a set of vectors Parameters ---------- cvects : ~numpy.ndarray(3,nsrc) with directional cosine (i.e., x,y,z component) values idx_list : [int,...] list of the source indices in the cluster weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting returns (np.ndarray(nsrc)) distances to the centroid (in degrees)
3.57546
4.248931
0.841496
distances = np.zeros((cvects.shape[1])) cent_dict = {} for k, v in cluster_dict.items(): l = [k] + v distances[l], centroid = find_dist_to_centroid(cvects, l, weights) cent_dict[k] = make_lat_lons(centroid) return distances, cent_dict
def find_dist_to_centroids(cluster_dict, cvects, weights=None)
Find the centroids and the distances to the centroid for all sources in a set of clusters Parameters ---------- cluster_dict : dict(int:[int,...]) Each cluster is a source index and the list of other sources in the cluster. cvects : np.ndarray(3,nsrc) Directional cosines (i.e., x,y,z component) values of all the sources weights : ~numpy.ndarray(nsrc) with the weights to use. None for equal weighting Returns ---------- distances : ~numpy.ndarray(nsrc) with the distances to the centroid of the cluster. 0 for unclustered sources cent_dict : dict(int:numpy.ndarray(2)), dictionary for the centroid locations
3.611482
3.510965
1.02863
out_dict = {} for idx_key, idx_list in cluster_dict.items(): out_idx, out_list = select_from_cluster( idx_key, idx_list, measure_vect) out_dict[out_idx] = out_list return out_dict
def select_from_clusters(cluster_dict, measure_vect)
Select a single source from each cluster and make it the new cluster key cluster_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. measure_vect : np.narray((nsrc),float) vector of the measure used to select the best source in the cluster returns dict(int:[int,...]) New dictionary of clusters keyed by the best source in each cluster
2.926822
2.895704
1.010746
out_dict = {} for k, v in in_dict.items(): for vv in v: if vv in out_dict: if warn: print("Dictionary collision %i" % vv) out_dict[vv] = k return out_dict
def make_reverse_dict(in_dict, warn=True)
Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself.
2.652851
3.815572
0.69527
out_array = -1 * np.ones((n_src), int) for k, v in rev_dict.items(): out_array[k] = v # We need this to make sure the see source points at itself out_array[v] = v return out_array
def make_cluster_vector(rev_dict, n_src)
Converts the cluster membership dictionary to an array Parameters ---------- rev_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. n_src : int Number of source in the array Returns ------- out_array : `numpy.ndarray' An array filled with the index of the seed of a cluster if a source belongs to a cluster, and with -1 if it does not.
5.85651
6.257675
0.935892
out_array = np.where(cluster_vect >= 0, src_names[cluster_vect], "") return out_array
def make_cluster_name_vector(cluster_vect, src_names)
Converts the cluster membership dictionary to an array Parameters ---------- cluster_vect : `numpy.ndarray' An array filled with the index of the seed of a cluster if a source belongs to a cluster, and with -1 if it does not. src_names : An array with the source names Returns ------- out_array : `numpy.ndarray' An array filled with the name of the seed of a cluster if a source belongs to a cluster, and with an empty string if it does not.
3.931229
3.555522
1.105668
out_dict = {} for i, k in enumerate(in_array): if k < 0: continue try: out_dict[k].append(i) except KeyError: out_dict[k] = [i] return out_dict
def make_dict_from_vector(in_array)
Converts the cluster membership array stored in a fits file back to a dictionary Parameters ---------- in_array : `np.ndarray' An array filled with the index of the seed of a cluster if a source belongs to a cluster, and with -1 if it does not. Returns ------- returns dict(int:[int,...]) Dictionary of clusters keyed by the best source in each cluster
2.122726
2.368407
0.896268
nsrcs = len(tab) mask = np.zeros((nsrcs), '?') mask[to_remove] = True inv_mask = np.invert(mask) out_tab = tab[inv_mask] return out_tab
def filter_and_copy_table(tab, to_remove)
Filter and copy a FITS table. Parameters ---------- tab : FITS Table object to_remove : [int ...} list of indices to remove from the table returns FITS Table object
4.003784
4.275804
0.936381
gta.free_sources(False) gta.write_roi('base_roi', make_plots=make_plots) gta.free_sources(True, minmax_npred=[1e3, np.inf]) gta.optimize() gta.free_sources(False) gta.print_roi()
def baseline_roi_fit(gta, make_plots=False, minmax_npred=[1e3, np.inf])
Do baseline fitting for a target Region of Interest Parameters ---------- gta : `fermipy.gtaanalysis.GTAnalysis` The analysis object make_plots : bool Flag to make standard analysis plots minmax_npred : tuple or list Range of number of predicted coutns for which to free sources in initial fitting.
4.302209
4.490058
0.958163
# for s in gta.roi.sources: if not src['SpatialModel'] == 'PointSource': continue if src['offset_roi_edge'] > -0.1: continue gta.localize(src.name, **kwargs) gta.optimize() gta.print_roi()
def localize_sources(gta, **kwargs): # Localize all point sources for src in sorted(gta.roi.sources, key=lambda t: t['ts'], reverse=True)
Relocalize sources in the region of interest Parameters ---------- gta : `fermipy.gtaanalysis.GTAnalysis` The analysis object kwargs : These are passed to the gta.localize function
7.450588
8.825889
0.844174
if gta.roi.has_source(name): gta.zero_source(name) gta.update_source(name) test_src_name = "%s_test" % name else: test_src_name = name gta.add_source(test_src_name, src_dict) gta.free_norm(test_src_name) gta.free_shape(test_src_name, free=False) fit_result = gta.fit(covar=True) mask = fit_result['is_norm'] src_names = np.array(fit_result['src_names'])[mask] idx = (src_names == test_src_name).argmax() correl_vals = fit_result['correlation'][idx][mask] cdict = {} for src_name, correl_val in zip(src_names, correl_vals): if src_name == name: continue if np.fabs(correl_val) > 0.25: cdict[src_name] = correl_val if not non_null_src: gta.zero_source(test_src_name) gta.fit(covar=True) return cdict, test_src_name
def add_source_get_correlated(gta, name, src_dict, correl_thresh=0.25, non_null_src=False)
Add a source and get the set of correlated sources Parameters ---------- gta : `fermipy.gtaanalysis.GTAnalysis` The analysis object name : str Name of the source we are adding src_dict : dict Dictionary of the source parameters correl_thresh : float Threshold for considering a source to be correlated non_null_src : bool If True, don't zero the source Returns ------- cdict : dict Dictionary with names and correlation factors of correlated sources test_src_name : bool Name of the test source
3.133033
2.768089
1.131839