sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _compute_mean(map1, map2): """ Make a map that is the mean of two maps """ data = (map1.data + map2.data) / 2. return HpxMap(data, map1.hpx)
Make a map that is the mean of two maps
entailment
def _compute_ratio(top, bot): """ Make a map that is the ratio of two maps """ data = np.where(bot.data > 0, top.data / bot.data, 0.) return HpxMap(data, top.hpx)
Make a map that is the ratio of two maps
entailment
def _compute_diff(map1, map2): """ Make a map that is the difference of two maps """ data = map1.data - map2.data return HpxMap(data, map1.hpx)
Make a map that is the difference of two maps
entailment
def _compute_product(map1, map2): """ Make a map that is the product of two maps """ data = map1.data * map2.data return HpxMap(data, map1.hpx)
Make a map that is the product of two maps
entailment
def _compute_counts_from_intensity(intensity, bexpcube): """ Make the counts map from the intensity """ data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1]) return HpxMap(data, intensity.hpx)
Make the counts map from the intensity
entailment
def _compute_counts_from_model(model, bexpcube): """ Make the counts maps from teh mdoe """ data = model.data * bexpcube.data ebins = model.hpx.ebins ratio = ebins[1:] / ebins[0:-1] half_log_ratio = np.log(ratio) / 2. int_map = ((data[0:-1].T * ebins[0:-1]) + (data[1:].T * ebins[1:])) * half_log_ratio return HpxMap(int_map.T, model.hpx)
Make the counts maps from teh mdoe
entailment
def _make_bright_pixel_mask(intensity_mean, mask_factor=5.0): """ Make of mask of all the brightest pixels """ mask = np.zeros((intensity_mean.data.shape), bool) nebins = len(intensity_mean.data) sum_intensity = intensity_mean.data.sum(0) mean_intensity = sum_intensity.mean() for i in range(nebins): mask[i, 0:] = sum_intensity > (mask_factor * mean_intensity) return HpxMap(mask, intensity_mean.hpx)
Make of mask of all the brightest pixels
entailment
def _get_aeff_corrections(intensity_ratio, mask): """ Compute a correction for the effective area from the brighter pixesl """ nebins = len(intensity_ratio.data) aeff_corrections = np.zeros((nebins)) for i in range(nebins): bright_pixels_intensity = intensity_ratio.data[i][mask.data[i]] mean_bright_pixel = bright_pixels_intensity.mean() aeff_corrections[i] = 1. / mean_bright_pixel print("Aeff correction: ", aeff_corrections) return aeff_corrections
Compute a correction for the effective area from the brighter pixesl
entailment
def _apply_aeff_corrections(intensity_map, aeff_corrections): """ Multipy a map by the effective area correction """ data = aeff_corrections * intensity_map.data.T return HpxMap(data.T, intensity_map.hpx)
Multipy a map by the effective area correction
entailment
def _fill_masked_intensity_resid(intensity_resid, bright_pixel_mask): """ Fill the pixels used to compute the effective area correction with the mean intensity """ filled_intensity = np.zeros((intensity_resid.data.shape)) nebins = len(intensity_resid.data) for i in range(nebins): masked = bright_pixel_mask.data[i] unmasked = np.invert(masked) mean_intensity = intensity_resid.data[i][unmasked].mean() filled_intensity[i] = np.where(masked, mean_intensity, intensity_resid.data[i]) return HpxMap(filled_intensity, intensity_resid.hpx)
Fill the pixels used to compute the effective area correction with the mean intensity
entailment
def _smooth_hpx_map(hpx_map, sigma): """ Smooth a healpix map using a Gaussian """ if hpx_map.hpx.ordering == "NESTED": ring_map = hpx_map.swap_scheme() else: ring_map = hpx_map ring_data = ring_map.data.copy() nebins = len(hpx_map.data) smoothed_data = np.zeros((hpx_map.data.shape)) for i in range(nebins): smoothed_data[i] = healpy.sphtfunc.smoothing( ring_data[i], sigma=np.radians(sigma), verbose=False) smoothed_data.clip(0., 1e99) smoothed_ring_map = HpxMap(smoothed_data, ring_map.hpx) if hpx_map.hpx.ordering == "NESTED": return smoothed_ring_map.swap_scheme() return smoothed_ring_map
Smooth a healpix map using a Gaussian
entailment
def _intergral_to_differential(hpx_map, gamma=-2.0): """ Convert integral quantity to differential quantity Here we are assuming the spectrum is a powerlaw with index gamma and we are using log-log-quadrature to compute the integral quantities. """ nebins = len(hpx_map.data) diff_map = np.zeros((nebins + 1, hpx_map.hpx.npix)) ebins = hpx_map.hpx.ebins ratio = ebins[1:] / ebins[0:-1] half_log_ratio = np.log(ratio) / 2. ratio_gamma = np.power(ratio, gamma) #ratio_inv_gamma = np.power(ratio, -1. * gamma) diff_map[0] = hpx_map.data[0] / ((ebins[0] + ratio_gamma[0] * ebins[1]) * half_log_ratio[0]) for i in range(nebins): diff_map[i + 1] = (hpx_map.data[i] / (ebins[i + 1] * half_log_ratio[i])) - (diff_map[i] / ratio[i]) return HpxMap(diff_map, hpx_map.hpx)
Convert integral quantity to differential quantity Here we are assuming the spectrum is a powerlaw with index gamma and we are using log-log-quadrature to compute the integral quantities.
entailment
def _differential_to_integral(hpx_map): """ Convert a differential map to an integral map Here we are using log-log-quadrature to compute the integral quantities. """ ebins = hpx_map.hpx.ebins ratio = ebins[1:] / ebins[0:-1] half_log_ratio = np.log(ratio) / 2. int_map = ((hpx_map.data[0:-1].T * ebins[0:-1]) + (hpx_map.data[1:].T * ebins[1:])) * half_log_ratio return HpxMap(int_map.T, hpx_map.hpx)
Convert a differential map to an integral map Here we are using log-log-quadrature to compute the integral quantities.
entailment
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) # Read the input maps ccube_dirty = HpxMap.create_from_fits(args.ccube_dirty, hdu='SKYMAP') bexpcube_dirty = HpxMap.create_from_fits(args.bexpcube_dirty, hdu='HPXEXPOSURES') ccube_clean = HpxMap.create_from_fits(args.ccube_clean, hdu='SKYMAP') bexpcube_clean = HpxMap.create_from_fits(args.bexpcube_clean, hdu='HPXEXPOSURES') # Decide what HEALPix order to work at if args.hpx_order: hpx_order = args.hpx_order else: hpx_order = ccube_dirty.hpx.order # Cast all the input maps to match ccube_clean cube_dict = ResidualCR._match_cubes(ccube_clean, ccube_dirty, bexpcube_clean, bexpcube_dirty, hpx_order) # Intenstiy maps intensity_clean = ResidualCR._compute_intensity(cube_dict['ccube_clean'], cube_dict['bexpcube_clean']) intensity_dirty = ResidualCR._compute_intensity(cube_dict['ccube_dirty'], cube_dict['bexpcube_dirty']) # Mean & ratio of intensity maps intensity_mean = ResidualCR._compute_mean(intensity_dirty, intensity_clean) intensity_ratio = ResidualCR._compute_ratio(intensity_dirty, intensity_clean) # Selecting the bright pixels for Aeff correction and to mask when filling output map bright_pixel_select = ResidualCR._make_bright_pixel_mask(intensity_mean, args.select_factor) bright_pixel_mask = ResidualCR._make_bright_pixel_mask(intensity_mean, args.mask_factor) # Compute thte Aeff corrections using the brightest pixels aeff_corrections = ResidualCR._get_aeff_corrections(intensity_ratio, bright_pixel_select) # Apply the Aeff corrections and get the intensity residual corrected_dirty = ResidualCR._apply_aeff_corrections(intensity_dirty, aeff_corrections) corrected_ratio = ResidualCR._compute_ratio(corrected_dirty, intensity_clean) intensity_resid = ResidualCR._compute_diff(corrected_dirty, intensity_clean) # Replace the masked pixels with the map mean to avoid features associates with sources filled_resid = ResidualCR._fill_masked_intensity_resid(intensity_resid, bright_pixel_mask) # Smooth the map smooth_resid = ResidualCR._smooth_hpx_map(filled_resid, args.sigma) # Convert to a differential map out_model = ResidualCR._intergral_to_differential(smooth_resid) # Make the ENERGIES HDU out_energies = ccube_dirty.hpx.make_energies_hdu() # Write the maps cubes = dict(SKYMAP=out_model) fits_utils.write_maps(None, cubes, args.outfile, energy_hdu=out_energies) if args.full_output: # Some diagnostics check = ResidualCR._differential_to_integral(out_model) check_resid = ResidualCR._compute_diff(smooth_resid, check) counts_resid =\ ResidualCR._compute_counts_from_intensity(intensity_resid, cube_dict['bexpcube_dirty']) pred_counts\ = ResidualCR._compute_counts_from_model(out_model, cube_dict['bexpcube_dirty']) pred_resid = ResidualCR._compute_diff(pred_counts, counts_resid) out_ebounds = ccube_dirty.hpx.make_energy_bounds_hdu() cubes = dict(INTENSITY_CLEAN=intensity_clean, INTENSITY_DIRTY=intensity_dirty, INTENSITY_RATIO=intensity_ratio, CORRECTED_DIRTY=corrected_dirty, CORRECTED_RATIO=corrected_ratio, INTENSITY_RESID=intensity_resid, PIXEL_SELECT=bright_pixel_select, PIXEL_MASK=bright_pixel_mask, FILLED_RESID=filled_resid, SMOOTH_RESID=smooth_resid, CHECK=check, CHECK_RESID=check_resid, COUNTS_RESID=counts_resid, PRED_COUNTS=pred_counts, PRED_RESID=pred_resid) fits_utils.write_maps(None, cubes, args.outfile.replace('.fits', '_full.fits'), energy_hdu=out_ebounds)
Run this analysis
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} components = Component.build_from_yamlfile(args['comp']) NAME_FACTORY.update_base_dict(args['data']) NAME_FACTORY_CLEAN.update_base_dict(args['data']) NAME_FACTORY_DIRTY.update_base_dict(args['data']) NAME_FACTORY_CLEAN.base_dict['evclass'] = args['clean'] NAME_FACTORY_DIRTY.base_dict['evclass'] = args['dirty'] for comp in components: zcut = "zmax%i" % comp.zmax key = comp.make_key('{ebin_name}_{evtype_name}') name_keys = dict(zcut=zcut, ebin=comp.ebin_name, psftype=comp.evtype_name, coordsys=comp.coordsys, irf_ver=NAME_FACTORY.irf_ver(), mktime=args['mktimefilter'], fullpath=True) outfile = NAME_FACTORY.residual_cr(**name_keys) if args['hpx_order']: hpx_order = min(comp.hpx_order, args['hpx_order']) else: hpx_order = comp.hpx_order job_configs[key] = dict(bexpcube_dirty=NAME_FACTORY_DIRTY.bexpcube(**name_keys), ccube_dirty=NAME_FACTORY_DIRTY.ccube(**name_keys), bexpcube_clean=NAME_FACTORY_CLEAN.bexpcube(**name_keys), ccube_clean=NAME_FACTORY_CLEAN.ccube(**name_keys), outfile=outfile, hpx_order=hpx_order, full_output=args['full_output'], logfile=make_nfs_path(outfile.replace('.fits', '.log'))) return job_configs
Hook to build job configurations
entailment
def _map_arguments(self, args): """Map from the top-level arguments to the arguments provided to the indiviudal links """ config_yaml = args['config'] config_dict = load_yaml(config_yaml) data = config_dict.get('data') comp = config_dict.get('comp') dry_run = args.get('dry_run', False) self._set_link('prepare', SplitAndMktimeChain, comp=comp, data=data, ft1file=config_dict['ft1file'], ft2file=config_dict['ft2file'], hpx_order_ccube=config_dict.get('hpx_order_ccube', 7), hpx_order_expcube=config_dict.get('hpx_order_expcube', 7), mktime=config_dict.get('mktimefitler', None), do_ltsum=config_dict.get('do_ltsum', False), scratch=config_dict.get('scratch', None), dry_run=dry_run) self._set_link('residual-cr', ResidualCR_SG, comp=comp, data=data, mktimefilter=config_dict.get('mktimefitler', None), hpx_order=config_dict.get('hpx_order_fitting', 4), clean=config_dict.get('clean_class', None), dirty=config_dict.get('dirty_class', None), select_factor=config_dict.get('select_factor', None), mask_factor=config_dict.get('mask_factor', None), sigma=config_dict.get('sigma', None), full_output=config_dict.get('full_output', False), dry_run=dry_run)
Map from the top-level arguments to the arguments provided to the indiviudal links
entailment
def coadd_maps(geom, maps, preserve_counts=True): """Coadd a sequence of `~gammapy.maps.Map` objects.""" # FIXME: This functionality should be built into the Map.coadd method map_out = gammapy.maps.Map.from_geom(geom) for m in maps: m_tmp = m if isinstance(m, gammapy.maps.HpxNDMap): if m.geom.order < map_out.geom.order: factor = map_out.geom.nside // m.geom.nside m_tmp = m.upsample(factor, preserve_counts=preserve_counts) map_out.coadd(m_tmp) return map_out
Coadd a sequence of `~gammapy.maps.Map` objects.
entailment
def sum_over_energy(self): """ Reduce a 3D counts cube to a 2D counts map """ # Note that the array is using the opposite convention from WCS # so we sum over axis 0 in the array, but drop axis 2 in the WCS object return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))
Reduce a 3D counts cube to a 2D counts map
entailment
def xypix_to_ipix(self, xypix, colwise=False): """Return the flattened pixel indices from an array multi-dimensional pixel indices. Parameters ---------- xypix : list List of pixel indices in the order (LON,LAT,ENERGY). colwise : bool Use column-wise pixel indexing. """ return np.ravel_multi_index(xypix, self.npix, order='F' if colwise else 'C', mode='raise')
Return the flattened pixel indices from an array multi-dimensional pixel indices. Parameters ---------- xypix : list List of pixel indices in the order (LON,LAT,ENERGY). colwise : bool Use column-wise pixel indexing.
entailment
def ipix_to_xypix(self, ipix, colwise=False): """Return array multi-dimensional pixel indices from flattened index. Parameters ---------- colwise : bool Use column-wise pixel indexing. """ return np.unravel_index(ipix, self.npix, order='F' if colwise else 'C')
Return array multi-dimensional pixel indices from flattened index. Parameters ---------- colwise : bool Use column-wise pixel indexing.
entailment
def ipix_swap_axes(self, ipix, colwise=False): """ Return the transposed pixel index from the pixel xy coordinates if colwise is True (False) this assumes the original index was in column wise scheme """ xy = self.ipix_to_xypix(ipix, colwise) return self.xypix_to_ipix(xy, not colwise)
Return the transposed pixel index from the pixel xy coordinates if colwise is True (False) this assumes the original index was in column wise scheme
entailment
def get_pixel_skydirs(self): """Get a list of sky coordinates for the centers of every pixel. """ xpix = np.linspace(0, self.npix[0] - 1., self.npix[0]) ypix = np.linspace(0, self.npix[1] - 1., self.npix[1]) xypix = np.meshgrid(xpix, ypix, indexing='ij') return SkyCoord.from_pixel(np.ravel(xypix[0]), np.ravel(xypix[1]), self.wcs)
Get a list of sky coordinates for the centers of every pixel.
entailment
def get_pixel_indices(self, lons, lats, ibin=None): """Return the indices in the flat array corresponding to a set of coordinates Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all energy bins. Returns ---------- pixcrd : list Pixel indices along each dimension of the map. """ lons = np.array(lons, ndmin=1) lats = np.array(lats, ndmin=1) if len(lats) != len(lons): raise RuntimeError('Map.get_pixel_indices, input lengths ' 'do not match %i %i' % (len(lons), len(lats))) if len(self._npix) == 2: pix_x, pix_y = self._wcs.wcs_world2pix(lons, lats, 0) pixcrd = [np.floor(pix_x).astype(int), np.floor(pix_y).astype(int)] elif len(self._npix) == 3: all_lons = np.expand_dims(lons, -1) all_lats = np.expand_dims(lats, -1) if ibin is None: all_bins = (np.expand_dims( np.arange(self.npix[2]), -1) * np.ones(lons.shape)).T else: all_bins = ibin l = self.wcs.wcs_world2pix(all_lons, all_lats, all_bins, 0) pix_x = l[0] pix_y = l[1] pixcrd = [np.floor(l[0]).astype(int), np.floor(l[1]).astype(int), all_bins.astype(int)] return pixcrd
Return the indices in the flat array corresponding to a set of coordinates Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all energy bins. Returns ---------- pixcrd : list Pixel indices along each dimension of the map.
entailment
def get_map_values(self, lons, lats, ibin=None): """Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map """ pix_idxs = self.get_pixel_indices(lons, lats, ibin) idxs = copy.copy(pix_idxs) m = np.empty_like(idxs[0], dtype=bool) m.fill(True) for i, p in enumerate(pix_idxs): m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i]) idxs[i][~m] = 0 vals = self.counts.T[idxs] vals[~m] = np.nan return vals
Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
entailment
def create_from_hdu(cls, hdu, ebins): """ Creates and returns an HpxMap object from a FITS HDU. hdu : The FITS ebins : Energy bin edges [optional] """ hpx = HPX.create_from_hdu(hdu, ebins) colnames = hdu.columns.names cnames = [] if hpx.conv.convname == 'FGST_SRCMAP_SPARSE': pixs = hdu.data.field('PIX') chans = hdu.data.field('CHANNEL') keys = chans * hpx.npix + pixs vals = hdu.data.field('VALUE') nebin = len(ebins) data = np.zeros((nebin, hpx.npix)) data.flat[keys] = vals else: for c in colnames: if c.find(hpx.conv.colstring) == 0: cnames.append(c) nebin = len(cnames) data = np.ndarray((nebin, hpx.npix)) for i, cname in enumerate(cnames): data[i, 0:] = hdu.data.field(cname) return cls(data, hpx)
Creates and returns an HpxMap object from a FITS HDU. hdu : The FITS ebins : Energy bin edges [optional]
entailment
def create_from_hdulist(cls, hdulist, **kwargs): """ Creates and returns an HpxMap object from a FITS HDUList extname : The name of the HDU with the map data ebounds : The name of the HDU with the energy bin data """ extname = kwargs.get('hdu', hdulist[1].name) ebins = fits_utils.find_and_read_ebins(hdulist) return cls.create_from_hdu(hdulist[extname], ebins)
Creates and returns an HpxMap object from a FITS HDUList extname : The name of the HDU with the map data ebounds : The name of the HDU with the energy bin data
entailment
def make_wcs_from_hpx(self, sum_ebins=False, proj='CAR', oversample=2, normalize=True): """Make a WCS object and convert HEALPix data into WCS projection NOTE: this re-calculates the mapping, if you have already calculated the mapping it is much faster to use convert_to_cached_wcs() instead Parameters ---------- sum_ebins : bool sum energy bins over energy bins before reprojecting proj : str WCS-projection oversample : int Oversampling factor for WCS map normalize : bool True -> perserve integral by splitting HEALPix values between bins returns (WCS object, np.ndarray() with reprojected data) """ self._wcs_proj = proj self._wcs_oversample = oversample self._wcs_2d = self.hpx.make_wcs(2, proj=proj, oversample=oversample) self._hpx2wcs = HpxToWcsMapping(self.hpx, self._wcs_2d) wcs, wcs_data = self.convert_to_cached_wcs(self.counts, sum_ebins, normalize) return wcs, wcs_data
Make a WCS object and convert HEALPix data into WCS projection NOTE: this re-calculates the mapping, if you have already calculated the mapping it is much faster to use convert_to_cached_wcs() instead Parameters ---------- sum_ebins : bool sum energy bins over energy bins before reprojecting proj : str WCS-projection oversample : int Oversampling factor for WCS map normalize : bool True -> perserve integral by splitting HEALPix values between bins returns (WCS object, np.ndarray() with reprojected data)
entailment
def convert_to_cached_wcs(self, hpx_in, sum_ebins=False, normalize=True): """ Make a WCS object and convert HEALPix data into WCS projection Parameters ---------- hpx_in : `~numpy.ndarray` HEALPix input data sum_ebins : bool sum energy bins over energy bins before reprojecting normalize : bool True -> perserve integral by splitting HEALPix values between bins returns (WCS object, np.ndarray() with reprojected data) """ if self._hpx2wcs is None: raise Exception('HpxMap.convert_to_cached_wcs() called ' 'before make_wcs_from_hpx()') if len(hpx_in.shape) == 1: wcs_data = np.ndarray(self._hpx2wcs.npix) loop_ebins = False hpx_data = hpx_in elif len(hpx_in.shape) == 2: if sum_ebins: wcs_data = np.ndarray(self._hpx2wcs.npix) hpx_data = hpx_in.sum(0) loop_ebins = False else: wcs_data = np.ndarray((self.counts.shape[0], self._hpx2wcs.npix[0], self._hpx2wcs.npix[1])) hpx_data = hpx_in loop_ebins = True else: raise Exception('Wrong dimension for HpxMap %i' % len(hpx_in.shape)) if loop_ebins: for i in range(hpx_data.shape[0]): self._hpx2wcs.fill_wcs_map_from_hpx_data( hpx_data[i], wcs_data[i], normalize) pass wcs_data.reshape((self.counts.shape[0], self._hpx2wcs.npix[ 0], self._hpx2wcs.npix[1])) # replace the WCS with a 3D one wcs = self.hpx.make_wcs(3, proj=self._wcs_proj, energies=np.log10(self.hpx.ebins), oversample=self._wcs_oversample) else: self._hpx2wcs.fill_wcs_map_from_hpx_data( hpx_data, wcs_data, normalize) wcs_data.reshape(self._hpx2wcs.npix) wcs = self._wcs_2d return wcs, wcs_data
Make a WCS object and convert HEALPix data into WCS projection Parameters ---------- hpx_in : `~numpy.ndarray` HEALPix input data sum_ebins : bool sum energy bins over energy bins before reprojecting normalize : bool True -> perserve integral by splitting HEALPix values between bins returns (WCS object, np.ndarray() with reprojected data)
entailment
def get_pixel_skydirs(self): """Get a list of sky coordinates for the centers of every pixel. """ sky_coords = self._hpx.get_sky_coords() if self.hpx.coordsys == 'GAL': return SkyCoord(l=sky_coords.T[0], b=sky_coords.T[1], unit='deg', frame='galactic') else: return SkyCoord(ra=sky_coords.T[0], dec=sky_coords.T[1], unit='deg', frame='icrs')
Get a list of sky coordinates for the centers of every pixel.
entailment
def sum_over_energy(self): """ Reduce a counts cube to a counts map """ # We sum over axis 0 in the array, and drop the energy binning in the # hpx object return HpxMap(np.sum(self.counts, axis=0), self.hpx.copy_and_drop_energy())
Reduce a counts cube to a counts map
entailment
def get_map_values(self, lons, lats, ibin=None): """Return the indices in the flat array corresponding to a set of coordinates Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map """ theta = np.pi / 2. - np.radians(lats) phi = np.radians(lons) pix = hp.ang2pix(self.hpx.nside, theta, phi, nest=self.hpx.nest) if self.data.ndim == 2: return self.data[:, pix] if ibin is None else self.data[ibin, pix] else: return self.data[pix]
Return the indices in the flat array corresponding to a set of coordinates Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
entailment
def interpolate(self, lon, lat, egy=None, interp_log=True): """Interpolate map values. Parameters ---------- interp_log : bool Interpolate the z-coordinate in logspace. """ if self.data.ndim == 1: theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) return hp.pixelfunc.get_interp_val(self.counts, theta, phi, nest=self.hpx.nest) else: return self._interpolate_cube(lon, lat, egy, interp_log)
Interpolate map values. Parameters ---------- interp_log : bool Interpolate the z-coordinate in logspace.
entailment
def _interpolate_cube(self, lon, lat, egy=None, interp_log=True): """Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes. """ shape = np.broadcast(lon, lat, egy).shape lon = lon * np.ones(shape) lat = lat * np.ones(shape) theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) vals = [] for i, _ in enumerate(self.hpx.evals): v = hp.pixelfunc.get_interp_val(self.counts[i], theta, phi, nest=self.hpx.nest) vals += [np.expand_dims(np.array(v, ndmin=1), -1)] vals = np.concatenate(vals, axis=-1) if egy is None: return vals.T egy = egy * np.ones(shape) if interp_log: xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy)) else: xvals = utils.val_to_pix(self.hpx.evals, egy) vals = vals.reshape((-1, vals.shape[-1])) xvals = np.ravel(xvals) v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals], order=1) return v.reshape(shape)
Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes.
entailment
def expanded_counts_map(self): """ return the full counts map """ if self.hpx._ipix is None: return self.counts output = np.zeros( (self.counts.shape[0], self.hpx._maxpix), self.counts.dtype) for i in range(self.counts.shape[0]): output[i][self.hpx._ipix] = self.counts[i] return output
return the full counts map
entailment
def explicit_counts_map(self, pixels=None): """ return a counts map with explicit index scheme Parameters ---------- pixels : `np.ndarray` or None If set, grab only those pixels. If none, grab only non-zero pixels """ # No pixel index, so build one if self.hpx._ipix is None: if self.data.ndim == 2: summed = self.counts.sum(0) if pixels is None: nz = summed.nonzero()[0] else: nz = pixels data_out = np.vstack(self.data[i].flat[nz] for i in range(self.data.shape[0])) else: if pixels is None: nz = self.data.nonzero()[0] else: nz = pixels data_out = self.data[nz] return (nz, data_out) else: if pixels is None: return (self.hpx._ipix, self.data) # FIXME, can we catch this raise RuntimeError( 'HPX.explicit_counts_map called with pixels for a map that already has pixels')
return a counts map with explicit index scheme Parameters ---------- pixels : `np.ndarray` or None If set, grab only those pixels. If none, grab only non-zero pixels
entailment
def sparse_counts_map(self): """ return a counts map with sparse index scheme """ if self.hpx._ipix is None: flatarray = self.data.flattern() else: flatarray = self.expanded_counts_map() nz = flatarray.nonzero()[0] data_out = flatarray[nz] return (nz, data_out)
return a counts map with sparse index scheme
entailment
def compute_counts(self, skydir, fn, ebins=None): """Compute signal and background counts for a point source at position ``skydir`` with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` ebins : `~numpy.ndarray` Returns ------- sig : `~numpy.ndarray` Signal counts array. Dimensions are energy, angular separation, and event type. bkg : `~numpy.ndarray` Background counts array. Dimensions are energy, angular separation, and event type. """ if ebins is None: ebins = self.ebins ectr = self.ectr else: ectr = np.exp(utils.edge_to_center(np.log(ebins))) skydir_cel = skydir.transform_to('icrs') skydir_gal = skydir.transform_to('galactic') sig = [] bkg = [] bkg_fit = None if self._gdiff_fit is not None: bkg_fit = [] for psf, exp in zip(self._psf, self._exp): coords0 = np.meshgrid(*[skydir_cel.ra.deg, ectr], indexing='ij') coords1 = np.meshgrid(*[skydir_cel.dec.deg, ectr], indexing='ij') # expv = exp.interpolate(skydir_cel.icrs.ra.deg, # skydir_cel.icrs.dec.deg, # ectr) expv = exp.interpolate(coords0[0], coords1[0], coords0[1]) coords0 = np.meshgrid(*[skydir_gal.l.deg, ectr], indexing='ij') coords1 = np.meshgrid(*[skydir_gal.b.deg, ectr], indexing='ij') bkgv = self._gdiff.interpolate(np.ravel(coords0[0]), np.ravel(coords1[0]), np.ravel(coords0[1])) bkgv = bkgv.reshape(expv.shape) # bkgv = self._gdiff.interpolate( # skydir_gal.l.deg, skydir_gal.b.deg, ectr) isov = np.exp(np.interp(np.log(ectr), np.log(self._iso[0]), np.log(self._iso[1]))) bkgv += isov s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn, egy_dim=1, spatial_model=self.spatial_model, spatial_size=self.spatial_size) sig += [s0] bkg += [b0] if self._iso_fit is not None: isov_fit = np.exp(np.interp(np.log(ectr), np.log(self._iso_fit[0]), np.log(self._iso_fit[1]))) else: isov_fit = isov if self._gdiff_fit is not None: bkgv_fit = self._gdiff_fit.interpolate(np.ravel(coords0[0]), np.ravel(coords1[0]), np.ravel(coords0[1])) bkgv_fit = bkgv_fit.reshape(expv.shape) bkgv_fit += isov_fit s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv_fit, fn, egy_dim=1, spatial_model=self.spatial_model, spatial_size=self.spatial_size) bkg_fit += [b0] sig = np.concatenate([np.expand_dims(t, -1) for t in sig]) bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg]) if self._gdiff_fit is not None: bkg_fit = np.concatenate([np.expand_dims(t, -1) for t in bkg_fit]) return sig, bkg, bkg_fit
Compute signal and background counts for a point source at position ``skydir`` with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` ebins : `~numpy.ndarray` Returns ------- sig : `~numpy.ndarray` Signal counts array. Dimensions are energy, angular separation, and event type. bkg : `~numpy.ndarray` Background counts array. Dimensions are energy, angular separation, and event type.
entailment
def diff_flux_threshold(self, skydir, fn, ts_thresh, min_counts): """Compute the differential flux threshold for a point source at position ``skydir`` with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Sky coordinates at which the sensitivity will be evaluated. fn : `~fermipy.spectrum.SpectralFunction` ts_thresh : float Threshold on the detection test statistic (TS). min_counts : float Threshold on the minimum number of counts. """ sig, bkg, bkg_fit = self.compute_counts(skydir, fn) norms = irfs.compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=[2, 3], rebin_axes=[10, 1], bkg_fit=bkg_fit) npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [2, 3])) norms = np.squeeze(norms) flux = norms * fn.flux(self.ebins[:-1], self.ebins[1:]) eflux = norms * fn.eflux(self.ebins[:-1], self.ebins[1:]) dnde = norms * fn.dnde(self.ectr) e2dnde = self.ectr**2 * dnde return dict(e_min=self.ebins[:-1], e_max=self.ebins[1:], e_ref=self.ectr, npred=npred, flux=flux, eflux=eflux, dnde=dnde, e2dnde=e2dnde)
Compute the differential flux threshold for a point source at position ``skydir`` with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Sky coordinates at which the sensitivity will be evaluated. fn : `~fermipy.spectrum.SpectralFunction` ts_thresh : float Threshold on the detection test statistic (TS). min_counts : float Threshold on the minimum number of counts.
entailment
def int_flux_threshold(self, skydir, fn, ts_thresh, min_counts): """Compute the integral flux threshold for a point source at position ``skydir`` with spectral parameterization ``fn``. """ ebins = 10**np.linspace(np.log10(self.ebins[0]), np.log10(self.ebins[-1]), 33) ectr = np.sqrt(ebins[0] * ebins[-1]) sig, bkg, bkg_fit = self.compute_counts(skydir, fn, ebins) norms = irfs.compute_norm(sig, bkg, ts_thresh, min_counts, sum_axes=[1, 2, 3], bkg_fit=bkg_fit, rebin_axes=[4, 10, 1]) npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [1, 2, 3])) npred = np.array(npred, ndmin=1) flux = np.squeeze(norms) * fn.flux(ebins[0], ebins[-1]) eflux = np.squeeze(norms) * fn.eflux(ebins[0], ebins[-1]) dnde = np.squeeze(norms) * fn.dnde(ectr) e2dnde = ectr**2 * dnde o = dict(e_min=self.ebins[0], e_max=self.ebins[-1], e_ref=ectr, npred=npred, flux=flux, eflux=eflux, dnde=dnde, e2dnde=e2dnde) sig, bkg, bkg_fit = self.compute_counts(skydir, fn) npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [2, 3])) flux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] * fn.flux(self.ebins[:-1], self.ebins[1:])) eflux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] * fn.eflux(self.ebins[:-1], self.ebins[1:])) dnde = np.squeeze(np.squeeze(norms, axis=(1, 2, 3)) [:, None] * fn.dnde(self.ectr)) e2dnde = ectr**2 * dnde o['bins'] = dict(npred=npred, flux=flux, eflux=eflux, dnde=dnde, e2dnde=e2dnde, e_min=self.ebins[:-1], e_max=self.ebins[1:], e_ref=self.ectr) return o
Compute the integral flux threshold for a point source at position ``skydir`` with spectral parameterization ``fn``.
entailment
def coords_to_vec(lon, lat): """ Converts longitute and latitude coordinates to a unit 3-vector return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines """ phi = np.radians(lon) theta = (np.pi / 2) - np.radians(lat) sin_t = np.sin(theta) cos_t = np.cos(theta) xVals = sin_t * np.cos(phi) yVals = sin_t * np.sin(phi) zVals = cos_t # Stack them into the output array out = np.vstack((xVals, yVals, zVals)).swapaxes(0, 1) return out
Converts longitute and latitude coordinates to a unit 3-vector return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines
entailment
def get_pixel_size_from_nside(nside): """ Returns an estimate of the pixel size from the HEALPix nside coordinate This just uses a lookup table to provide a nice round number for each HEALPix order. """ order = int(np.log2(nside)) if order < 0 or order > 13: raise ValueError('HEALPix order must be between 0 to 13 %i' % order) return HPX_ORDER_TO_PIXSIZE[order]
Returns an estimate of the pixel size from the HEALPix nside coordinate This just uses a lookup table to provide a nice round number for each HEALPix order.
entailment
def hpx_to_axes(h, npix): """ Generate a sequence of bin edge vectors corresponding to the axes of a HPX object.""" x = h.ebins z = np.arange(npix[-1] + 1) return x, z
Generate a sequence of bin edge vectors corresponding to the axes of a HPX object.
entailment
def hpx_to_coords(h, shape): """ Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.""" x, z = hpx_to_axes(h, shape) x = np.sqrt(x[0:-1] * x[1:]) z = z[:-1] + 0.5 x = np.ravel(np.ones(shape) * x[:, np.newaxis]) z = np.ravel(np.ones(shape) * z[np.newaxis, :]) return np.vstack((x, z))
Generate an N x D list of pixel center coordinates where N is the number of pixels and D is the dimensionality of the map.
entailment
def make_hpx_to_wcs_mapping_centers(hpx, wcs): """ Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel -1 indicates the wcs pixel does not contain the center of a HEALpix pixel mult_val : array(nx,ny) of 1. npix : tuple(nx,ny) with the shape of the wcs grid """ npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2)) mult_val = np.ones(npix).T.flatten() sky_crds = hpx.get_sky_coords() pix_crds = wcs.wcs_world2pix(sky_crds, 0).astype(int) ipixs = -1 * np.ones(npix, int).T.flatten() pix_index = npix[1] * pix_crds[0:, 0] + pix_crds[0:, 1] if hpx._ipix is None: for ipix, pix_crd in enumerate(pix_index): ipixs[pix_crd] = ipix else: for pix_crd, ipix in zip(pix_index, hpx._ipix): ipixs[pix_crd] = ipix ipixs = ipixs.reshape(npix).T.flatten() return ipixs, mult_val, npix
Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel -1 indicates the wcs pixel does not contain the center of a HEALpix pixel mult_val : array(nx,ny) of 1. npix : tuple(nx,ny) with the shape of the wcs grid
entailment
def make_hpx_to_wcs_mapping(hpx, wcs): """Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel mult_val : array(nx,ny) of 1./number of wcs pixels pointing at each HEALPix pixel npix : tuple(nx,ny) with the shape of the wcs grid """ npix = (int(wcs.wcs.crpix[0] * 2), int(wcs.wcs.crpix[1] * 2)) pix_crds = np.dstack(np.meshgrid(np.arange(npix[0]), np.arange(npix[1]))).swapaxes(0, 1).reshape((npix[0] * npix[1], 2)) if wcs.wcs.naxis == 2: sky_crds = wcs.wcs_pix2world(pix_crds, 0) else: use_wcs = wcs.dropaxis(2) sky_crds = use_wcs.wcs_pix2world(pix_crds, 0) sky_crds *= np.radians(1.) sky_crds[0:, 1] = (np.pi / 2) - sky_crds[0:, 1] fullmask = np.isnan(sky_crds) mask = (fullmask[0:, 0] + fullmask[0:, 1]) == 0 ipixs = -1 * np.ones(npix, int).T.flatten() ipixs[mask] = hp.pixelfunc.ang2pix(hpx.nside, sky_crds[0:, 1][mask], sky_crds[0:, 0][mask], hpx.nest) # Here we are counting the number of HEALPix pixels each WCS pixel points to; # this could probably be vectorized by filling a histogram. d_count = {} for ipix in ipixs: if ipix in d_count: d_count[ipix] += 1 else: d_count[ipix] = 1 # Here we are getting a multiplicative factor that tells use how to split up # the counts in each HEALPix pixel (by dividing the corresponding WCS pixels # by the number of associated HEALPix pixels). # This could also likely be vectorized. mult_val = np.ones(ipixs.shape) for i, ipix in enumerate(ipixs): mult_val[i] /= d_count[ipix] ipixs = ipixs.reshape(npix).flatten() mult_val = mult_val.reshape(npix).flatten() return ipixs, mult_val, npix
Make the mapping data needed to from from HPX pixelization to a WCS-based array Parameters ---------- hpx : `~fermipy.hpx_utils.HPX` The healpix mapping (an HPX object) wcs : `~astropy.wcs.WCS` The wcs mapping (a pywcs.wcs object) Returns ------- ipixs : array(nx,ny) of HEALPix pixel indices for each wcs pixel mult_val : array(nx,ny) of 1./number of wcs pixels pointing at each HEALPix pixel npix : tuple(nx,ny) with the shape of the wcs grid
entailment
def parse_hpxregion(region): """Parse the HPX_REG header keyword into a list of tokens.""" m = re.match(r'([A-Za-z\_]*?)\((.*?)\)', region) if m is None: raise Exception('Failed to parse hpx region string.') if not m.group(1): return re.split(',', m.group(2)) else: return [m.group(1)] + re.split(',', m.group(2))
Parse the HPX_REG header keyword into a list of tokens.
entailment
def upix_to_pix(upix): """Get the nside from a unique pixel number.""" nside = np.power(2, np.floor(np.log2(upix / 4)) / 2).astype(int) pix = upix - 4 * np.power(nside, 2) return pix, nside
Get the nside from a unique pixel number.
entailment
def create_hpx(cls, nside, nest, coordsys='CEL', order=-1, ebins=None, region=None, conv=HPX_Conv('FGST_CCUBE'), pixels=None): """Create a HPX object. Parameters ---------- nside : int HEALPix nside paramter nest : bool True for HEALPix "NESTED" indexing scheme, False for "RING" scheme. coordsys : str "CEL" or "GAL" order : int nside = 2**order ebins : `~numpy.ndarray` Energy bin edges region : str Allows for partial-sky mappings conv : `HPX_Conv` Object defining the convention for column names and the like pixels : `np.array` or `None` For use with 'EXPLICIT' region string """ return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=pixels)
Create a HPX object. Parameters ---------- nside : int HEALPix nside paramter nest : bool True for HEALPix "NESTED" indexing scheme, False for "RING" scheme. coordsys : str "CEL" or "GAL" order : int nside = 2**order ebins : `~numpy.ndarray` Energy bin edges region : str Allows for partial-sky mappings conv : `HPX_Conv` Object defining the convention for column names and the like pixels : `np.array` or `None` For use with 'EXPLICIT' region string
entailment
def identify_HPX_convention(header): """ Identify the convention used to write this file """ # Hopefully the file contains the HPX_CONV keyword specifying # the convention used try: return header['HPX_CONV'] except KeyError: pass indxschm = header.get('INDXSCHM', None) # Try based on the EXTNAME keyword extname = header.get('EXTNAME', None) if extname == 'HPXEXPOSURES': return 'FGST_BEXPCUBE' elif extname == 'SKYMAP2': if 'COORDTYPE' in header.keys(): return 'GALPROP' else: return 'GALPROP2' # Check for the INDXSCHM keyword if indxschm == 'SPARSE': return 'FGST_SRCMAP_SPARSE' # Check the name of the first column colname = header['TTYPE1'] if colname == 'PIX': colname = header['TTYPE2'] if colname == 'KEY': return 'FGST_SRCMAP_SPARSE' elif colname == 'ENERGY1': return 'FGST_TEMPLATE' elif colname == 'COSBINS': return 'FGST_LTCUBE' elif colname == 'Bin0': return 'GALPROP' elif colname in ['CHANNEL1', 'Bin 0']: if extname == 'SKYMAP': return 'FGST_CCUBE' else: return 'FGST_SRCMAP' else: raise ValueError("Could not identify HEALPix convention")
Identify the convention used to write this file
entailment
def create_from_header(cls, header, ebins=None, pixels=None): """ Creates an HPX object from a FITS header. header : The FITS header ebins : Energy bin edges [optional] """ convname = HPX.identify_HPX_convention(header) conv = HPX_FITS_CONVENTIONS[convname] if conv.convname not in ['GALPROP', 'GALPROP2']: if header["PIXTYPE"] != "HEALPIX": raise Exception("PIXTYPE != HEALPIX") if header["PIXTYPE"] != "HEALPIX": raise Exception("PIXTYPE != HEALPIX") if header["ORDERING"] == "RING": nest = False elif header["ORDERING"] == "NESTED": nest = True else: raise Exception("ORDERING != RING | NESTED") try: order = header["ORDER"] except KeyError: order = -1 if order < 0: nside = header["NSIDE"] else: nside = -1 try: coordsys = header[conv.coordsys] except KeyError: coordsys = header['COORDSYS'] try: region = header["HPX_REG"] except KeyError: try: region = header["HPXREGION"] except KeyError: region = None try: if header['INDXSCHM'] in ['EXPLICIT', 'PARTIAL']: use_pixels = pixels else: use_pixels = None except KeyError: use_pixels = None return cls(nside, nest, coordsys, order, ebins, region=region, conv=conv, pixels=use_pixels)
Creates an HPX object from a FITS header. header : The FITS header ebins : Energy bin edges [optional]
entailment
def create_from_hdu(cls, hdu, ebins=None): """ Creates an HPX object from a FITS header. hdu : The FITS hdu ebins : Energy bin edges [optional] """ convname = HPX.identify_HPX_convention(hdu.header) conv = HPX_FITS_CONVENTIONS[convname] try: pixels = hdu.data[conv.idxstring] except KeyError: pixels = None return cls.create_from_header(hdu.header, ebins, pixels)
Creates an HPX object from a FITS header. hdu : The FITS hdu ebins : Energy bin edges [optional]
entailment
def make_header(self): """ Builds and returns FITS header for this HEALPix map """ cards = [fits.Card("TELESCOP", "GLAST"), fits.Card("INSTRUME", "LAT"), fits.Card(self._conv.coordsys, self._coordsys), fits.Card("PIXTYPE", "HEALPIX"), fits.Card("ORDERING", self.ordering), fits.Card("ORDER", self._order), fits.Card("NSIDE", self._nside), fits.Card("FIRSTPIX", 0), fits.Card("LASTPIX", self._maxpix - 1), fits.Card("HPX_CONV", self._conv.convname)] if self._coordsys == "CEL": cards.append(fits.Card("EQUINOX", 2000.0, "Equinox of RA & DEC specifications")) if self._region is not None: cards.append(fits.Card("HPX_REG", self._region)) cards.append(fits.Card("INDXSCHM", "PARTIAL")) elif self._ipix is not None: cards.append(fits.Card("INDXSCHM", "EXPLICIT")) else: if self._conv.convname in ['FGST_SRCMAP_SPARSE']: cards.append(fits.Card("INDXSCHM", "SPARSE")) else: cards.append(fits.Card("INDXSCHM", "IMPLICIT")) header = fits.Header(cards) return header
Builds and returns FITS header for this HEALPix map
entailment
def make_hdu(self, data, **kwargs): """ Builds and returns a FITs HDU with input data data : The data begin stored Keyword arguments ------------------- extname : The HDU extension name colbase : The prefix for column names """ shape = data.shape extname = kwargs.get('extname', self.conv.extname) if shape[-1] != self._npix: raise Exception( "Size of data array does not match number of pixels") cols = [] if self._ipix is not None: cols.append(fits.Column(self.conv.idxstring, "J", array=self._ipix)) if self.conv.convname == 'FGST_SRCMAP_SPARSE': nonzero = data.nonzero() nfilled = len(nonzero[0]) if len(shape) == 1: cols.append(fits.Column("PIX", "J", array=nonzero[0].astype(int))) cols.append(fits.Column("VALUE", "E", array=data.flat[nonzero].astype(float).reshape(nfilled))) elif len(shape) == 2: keys = self._npix * nonzero[0] + nonzero[1] cols.append(fits.Column("PIX", "J", array=nonzero[1].reshape(nfilled))) cols.append(fits.Column("CHANNEL", "I", array=nonzero[0].reshape(nfilled))) cols.append(fits.Column("VALUE", "E", array=data.flat[keys].astype(float).reshape(nfilled))) else: raise Exception("HPX.write_fits only handles 1D and 2D maps") else: if len(shape) == 1: cols.append(fits.Column(self.conv.colname( indx=self.conv.firstcol), "E", array=data.astype(float))) elif len(shape) == 2: for i in range(shape[0]): cols.append(fits.Column(self.conv.colname( indx=i + self.conv.firstcol), "E", array=data[i].astype(float))) else: raise Exception("HPX.write_fits only handles 1D and 2D maps") header = self.make_header() hdu = fits.BinTableHDU.from_columns(cols, header=header, name=extname) return hdu
Builds and returns a FITs HDU with input data data : The data begin stored Keyword arguments ------------------- extname : The HDU extension name colbase : The prefix for column names
entailment
def make_energy_bounds_hdu(self, extname="EBOUNDS"): """ Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name """ if self._ebins is None: return None cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))), fits.Column("E_MIN", "1E", unit='keV', array=1000 * self._ebins[0:-1]), fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])] hdu = fits.BinTableHDU.from_columns( cols, self.make_header(), name=extname) return hdu
Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name
entailment
def make_energies_hdu(self, extname="ENERGIES"): """ Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name """ if self._evals is None: return None cols = [fits.Column("ENERGY", "1E", unit='MeV', array=self._evals)] hdu = fits.BinTableHDU.from_columns( cols, self.make_header(), name=extname) return hdu
Builds and returns a FITs HDU with the energy bin boundries extname : The HDU extension name
entailment
def write_fits(self, data, outfile, extname="SKYMAP", clobber=True): """ Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files """ hdu_prim = fits.PrimaryHDU() hdu_hpx = self.make_hdu(data, extname=extname) hl = [hdu_prim, hdu_hpx] if self.conv.energy_hdu == 'EBOUNDS': hdu_energy = self.make_energy_bounds_hdu() elif self.conv.energy_hdu == 'ENERGIES': hdu_energy = self.make_energies_hdu() if hdu_energy is not None: hl.append(hdu_energy) hdulist = fits.HDUList(hl) hdulist.writeto(outfile, overwrite=clobber)
Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files
entailment
def get_index_list(nside, nest, region): """ Returns the list of pixels indices for all the pixels in a region nside : HEALPix nside parameter nest : True for 'NESTED', False = 'RING' region : HEALPix region string """ tokens = parse_hpxregion(region) if tokens[0] == 'DISK': vec = coords_to_vec(float(tokens[1]), float(tokens[2])) ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])), inclusive=False, nest=nest) elif tokens[0] == 'DISK_INC': vec = coords_to_vec(float(tokens[1]), float(tokens[2])) ilist = hp.query_disc(nside, vec[0], np.radians(float(tokens[3])), inclusive=True, fact=int(tokens[4]), nest=nest) elif tokens[0] == 'HPX_PIXEL': nside_pix = int(tokens[2]) if tokens[1] == 'NESTED': ipix_ring = hp.nest2ring(nside_pix, int(tokens[3])) elif tokens[1] == 'RING': ipix_ring = int(tokens[3]) else: raise Exception( "Did not recognize ordering scheme %s" % tokens[1]) ilist = match_hpx_pixel(nside, nest, nside_pix, ipix_ring) else: raise Exception( "HPX.get_index_list did not recognize region type %s" % tokens[0]) return ilist
Returns the list of pixels indices for all the pixels in a region nside : HEALPix nside parameter nest : True for 'NESTED', False = 'RING' region : HEALPix region string
entailment
def get_ref_dir(region, coordsys): """ Finds and returns the reference direction for a given HEALPix region string. region : a string describing a HEALPix region coordsys : coordinate system, GAL | CEL """ if region is None: if coordsys == "GAL": c = SkyCoord(0., 0., frame=Galactic, unit="deg") elif coordsys == "CEL": c = SkyCoord(0., 0., frame=ICRS, unit="deg") return c tokens = parse_hpxregion(region) if tokens[0] in ['DISK', 'DISK_INC']: if coordsys == "GAL": c = SkyCoord(float(tokens[1]), float( tokens[2]), frame=Galactic, unit="deg") elif coordsys == "CEL": c = SkyCoord(float(tokens[1]), float( tokens[2]), frame=ICRS, unit="deg") return c elif tokens[0] == 'HPX_PIXEL': nside_pix = int(tokens[2]) ipix_pix = int(tokens[3]) if tokens[1] == 'NESTED': nest_pix = True elif tokens[1] == 'RING': nest_pix = False else: raise Exception( "Did not recognize ordering scheme %s" % tokens[1]) theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix) lat = np.degrees((np.pi / 2) - theta) lon = np.degrees(phi) if coordsys == "GAL": c = SkyCoord(lon, lat, frame=Galactic, unit="deg") elif coordsys == "CEL": c = SkyCoord(lon, lat, frame=ICRS, unit="deg") return c else: raise Exception( "HPX.get_ref_dir did not recognize region type %s" % tokens[0]) return None
Finds and returns the reference direction for a given HEALPix region string. region : a string describing a HEALPix region coordsys : coordinate system, GAL | CEL
entailment
def get_region_size(region): """ Finds and returns the approximate size of region (in degrees) from a HEALPix region string. """ if region is None: return 180. tokens = parse_hpxregion(region) if tokens[0] in ['DISK', 'DISK_INC']: return float(tokens[3]) elif tokens[0] == 'HPX_PIXEL': pixel_size = get_pixel_size_from_nside(int(tokens[2])) return 2. * pixel_size else: raise Exception( "HPX.get_region_size did not recognize region type %s" % tokens[0]) return None
Finds and returns the approximate size of region (in degrees) from a HEALPix region string.
entailment
def make_wcs(self, naxis=2, proj='CAR', energies=None, oversample=2): """ Make a WCS projection appropirate for this HPX pixelization """ w = WCS(naxis=naxis) skydir = self.get_ref_dir(self._region, self.coordsys) if self.coordsys == 'CEL': w.wcs.ctype[0] = 'RA---%s' % (proj) w.wcs.ctype[1] = 'DEC--%s' % (proj) w.wcs.crval[0] = skydir.ra.deg w.wcs.crval[1] = skydir.dec.deg elif self.coordsys == 'GAL': w.wcs.ctype[0] = 'GLON-%s' % (proj) w.wcs.ctype[1] = 'GLAT-%s' % (proj) w.wcs.crval[0] = skydir.galactic.l.deg w.wcs.crval[1] = skydir.galactic.b.deg else: raise Exception('Unrecognized coordinate system.') pixsize = get_pixel_size_from_nside(self.nside) roisize = self.get_region_size(self._region) allsky = False if roisize > 45: roisize = 90 allsky = True npixels = int(2. * roisize / pixsize) * oversample crpix = npixels / 2. if allsky: w.wcs.crpix[0] = 2 * crpix npix = (2 * npixels, npixels) else: w.wcs.crpix[0] = crpix npix = (npixels, npixels) w.wcs.crpix[1] = crpix w.wcs.cdelt[0] = -pixsize / oversample w.wcs.cdelt[1] = pixsize / oversample if naxis == 3: w.wcs.crpix[2] = 1 w.wcs.ctype[2] = 'Energy' if energies is not None: w.wcs.crval[2] = 10 ** energies[0] w.wcs.cdelt[2] = 10 ** energies[1] - 10 ** energies[0] w = WCS(w.to_header()) wcs_proj = WCSProj(w, npix) return wcs_proj
Make a WCS projection appropirate for this HPX pixelization
entailment
def get_sky_coords(self): """ Get the sky coordinates of all the pixels in this pixelization """ if self._ipix is None: theta, phi = hp.pix2ang( self._nside, list(range(self._npix)), self._nest) else: theta, phi = hp.pix2ang(self._nside, self._ipix, self._nest) lat = np.degrees((np.pi / 2) - theta) lon = np.degrees(phi) return np.vstack([lon, lat]).T
Get the sky coordinates of all the pixels in this pixelization
entailment
def get_pixel_indices(self, lats, lons): """ "Return the indices in the flat array corresponding to a set of coordinates """ theta = np.radians(90. - lats) phi = np.radians(lons) return hp.ang2pix(self.nside, theta, phi, self.nest)
"Return the indices in the flat array corresponding to a set of coordinates
entailment
def skydir_to_pixel(self, skydir): """Return the pixel index of a SkyCoord object.""" if self.coordsys in ['CEL', 'EQU']: skydir = skydir.transform_to('icrs') lon = skydir.ra.deg lat = skydir.dec.deg else: skydir = skydir.transform_to('galactic') lon = skydir.l.deg lat = skydir.b.deg return self.get_pixel_indices(lat, lon)
Return the pixel index of a SkyCoord object.
entailment
def write_to_fitsfile(self, fitsfile, clobber=True): """Write this mapping to a FITS file, to avoid having to recompute it """ from fermipy.skymap import Map hpx_header = self._hpx.make_header() index_map = Map(self.ipixs, self.wcs) mult_map = Map(self.mult_val, self.wcs) prim_hdu = index_map.create_primary_hdu() mult_hdu = index_map.create_image_hdu() for key in ['COORDSYS', 'ORDERING', 'PIXTYPE', 'ORDERING', 'ORDER', 'NSIDE', 'FIRSTPIX', 'LASTPIX']: prim_hdu.header[key] = hpx_header[key] mult_hdu.header[key] = hpx_header[key] hdulist = fits.HDUList([prim_hdu, mult_hdu]) hdulist.writeto(fitsfile, overwrite=clobber)
Write this mapping to a FITS file, to avoid having to recompute it
entailment
def create_from_fitsfile(cls, fitsfile): """ Read a fits file and use it to make a mapping """ from fermipy.skymap import Map index_map = Map.create_from_fits(fitsfile) mult_map = Map.create_from_fits(fitsfile, hdu=1) ff = fits.open(fitsfile) hpx = HPX.create_from_hdu(ff[0]) mapping_data = dict(ipixs=index_map.counts, mult_val=mult_map.counts, npix=mult_map.counts.shape) return cls(hpx, index_map.wcs, mapping_data)
Read a fits file and use it to make a mapping
entailment
def fill_wcs_map_from_hpx_data(self, hpx_data, wcs_data, normalize=True): """Fills the wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs_data : the data array being filled normalize : True -> perserve integral by splitting HEALPix values between bins """ # FIXME, there really ought to be a better way to do this hpx_naxis = len(hpx_data.shape) wcs_naxis = len(wcs_data.shape) if hpx_naxis + 1 != wcs_naxis: raise ValueError("HPX.fill_wcs_map_from_hpx_data: HPX naxis should be 1 less that WCS naxis: %i, %i"%(hpx_naxis, wcs_naxis)) if hpx_naxis == 2: if hpx_data.shape[1] != wcs_data.shape[2]: raise ValueError("HPX.fill_wcs_map_from_hpx_data: size of energy axes don't match: %i, %i"%(hpx_naxis[1], wcs_naxis[2])) lmap_valid = self._lmap[self._valid] wcs_layer_shape = wcs_data.shape[0]*wcs_data.shape[1] if hpx_naxis == 2: for i in range(hpx_data.shape[1]): wcs_data_layer = np.zeros(wcs_layer_shape) wcs_data_layer[self._valid] = hpx_data[:,i][lmap_valid] orig_value = wcs_data_layer.sum() if normalize: wcs_data_layer *= self._mult_val wcs_data[:,:,i].flat = wcs_data_layer else: wcs_data_flat = np.zeros(wcs_layer_shape) wcs_data_flat[self._valid] = hpx_data[lmap_valid] if normalize: wcs_data_flat *= self._mult_val wcs_data.flat = wcs_data_flat
Fills the wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs_data : the data array being filled normalize : True -> perserve integral by splitting HEALPix values between bins
entailment
def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True): """ Creates and fills a wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs : the WCS object normalize : True -> perserve integral by splitting HEALPix values between bins """ wcs_data = np.zeros(wcs.npix) self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize) return wcs_data
Creates and fills a wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs : the WCS object normalize : True -> perserve integral by splitting HEALPix values between bins
entailment
def _get_enum_bins(configfile): """Get the number of energy bin in the SED Parameters ---------- configfile : str Fermipy configuration file. Returns ------- nbins : int The number of energy bins """ config = yaml.safe_load(open(configfile)) emin = config['selection']['emin'] emax = config['selection']['emax'] log_emin = np.log10(emin) log_emax = np.log10(emax) ndec = log_emax - log_emin binsperdec = config['binning']['binsperdec'] nebins = int(np.round(binsperdec * ndec)) return nebins
Get the number of energy bin in the SED Parameters ---------- configfile : str Fermipy configuration file. Returns ------- nbins : int The number of energy bins
entailment
def fill_output_table(filelist, hdu, collist, nbins): """Fill the arrays from the files in filelist Parameters ---------- filelist : list List of the files to get data from. hdu : str Name of the HDU containing the table with the input data. colllist : list List of the column names nbins : int Number of bins in the input data arrays Returns ------- table : astropy.table.Table A table with all the requested data extracted. """ nfiles = len(filelist) shape = (nbins, nfiles) outdict = {} for c in collist: outdict[c['name']] = np.ndarray(shape) sys.stdout.write('Working on %i files: ' % nfiles) sys.stdout.flush() for i, f in enumerate(filelist): sys.stdout.write('.') sys.stdout.flush() tab = Table.read(f, hdu) for c in collist: cname = c['name'] outdict[cname][:, i] = tab[cname].data sys.stdout.write('!\n') outcols = [] for c in collist: cname = c['name'] if 'unit' in c: col = Column(data=outdict[cname], name=cname, dtype=np.float, shape=nfiles, unit=c['unit']) else: col = Column(data=outdict[cname], name=cname, dtype=np.float, shape=nfiles) outcols.append(col) tab = Table(data=outcols) return tab
Fill the arrays from the files in filelist Parameters ---------- filelist : list List of the files to get data from. hdu : str Name of the HDU containing the table with the input data. colllist : list List of the column names nbins : int Number of bins in the input data arrays Returns ------- table : astropy.table.Table A table with all the requested data extracted.
entailment
def vstack_tables(filelist, hdus): """vstack a set of HDUs from a set of files Parameters ---------- filelist : list List of the files to get data from. hdus : list Names of the HDU containing the table with the input data. Returns ------- out_tables : list A list with the table with all the requested data extracted. out_names : list A list with the names of the tables. """ nfiles = len(filelist) out_tables = [] out_names = [] for hdu in hdus: sys.stdout.write('Working on %i files for %s: ' % (nfiles, hdu)) sys.stdout.flush() tlist = [] for f in filelist: try: tab = Table.read(f, hdu) tlist.append(tab) sys.stdout.write('.') except KeyError: sys.stdout.write('x') sys.stdout.flush() sys.stdout.write('!\n') if tlist: out_table = vstack(tlist) out_tables.append(out_table) out_names.append(hdu) return (out_tables, out_names)
vstack a set of HDUs from a set of files Parameters ---------- filelist : list List of the files to get data from. hdus : list Names of the HDU containing the table with the input data. Returns ------- out_tables : list A list with the table with all the requested data extracted. out_names : list A list with the names of the tables.
entailment
def collect_summary_stats(data): """Collect summary statisitics from an array This creates a dictionry of output arrays of summary statistics, with the input array dimension reducted by one. Parameters ---------- data : `numpy.ndarray` Array with the collected input data Returns ------- output : dict Dictionary of `np.ndarray` with the summary data. These include mean, std, median, and 4 quantiles (0.025, 0.16, 0.86, 0.975). """ mean = np.mean(data, axis=0) std = np.std(data, axis=0) median = np.median(data, axis=0) q02, q16, q84, q97 = np.percentile(data, [2.5, 16, 84, 97.5], axis=0) o = dict(mean=mean, std=std, median=median, q02=q02, q16=q16, q84=q84, q97=q97) return o
Collect summary statisitics from an array This creates a dictionry of output arrays of summary statistics, with the input array dimension reducted by one. Parameters ---------- data : `numpy.ndarray` Array with the collected input data Returns ------- output : dict Dictionary of `np.ndarray` with the summary data. These include mean, std, median, and 4 quantiles (0.025, 0.16, 0.86, 0.975).
entailment
def add_summary_stats_to_table(table_in, table_out, colnames): """Collect summary statisitics from an input table and add them to an output table Parameters ---------- table_in : `astropy.table.Table` Table with the input data. table_out : `astropy.table.Table` Table with the output data. colnames : list List of the column names to get summary statistics for. """ for col in colnames: col_in = table_in[col] stats = collect_summary_stats(col_in.data) for k, v in stats.items(): out_name = "%s_%s" % (col, k) col_out = Column(data=np.vstack( [v]), name=out_name, dtype=col_in.dtype, shape=v.shape, unit=col_in.unit) table_out.add_column(col_out)
Collect summary statisitics from an input table and add them to an output table Parameters ---------- table_in : `astropy.table.Table` Table with the input data. table_out : `astropy.table.Table` Table with the output data. colnames : list List of the column names to get summary statistics for.
entailment
def summarize_sed_results(sed_table): """Build a stats summary table for a table that has all the SED results """ del_cols = ['dnde', 'dnde_err', 'dnde_errp', 'dnde_errn', 'dnde_ul', 'e2dnde', 'e2dnde_err', 'e2dnde_errp', 'e2dnde_errn', 'e2dnde_ul', 'norm', 'norm_err', 'norm_errp', 'norm_errn', 'norm_ul', 'ts'] stats_cols = ['dnde', 'dnde_ul', 'e2dnde', 'e2dnde_ul', 'norm', 'norm_ul'] table_out = Table(sed_table[0]) table_out.remove_columns(del_cols) add_summary_stats_to_table(sed_table, table_out, stats_cols) return table_out
Build a stats summary table for a table that has all the SED results
entailment
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) sedfile = args.sed_file if is_not_null(args.config): configfile = os.path.join(os.path.dirname(sedfile), args.config) else: configfile = os.path.join(os.path.dirname(sedfile), 'config.yaml') nbins = _get_enum_bins(configfile) first = args.seed last = first + args.nsims flist = [sedfile.replace("_SEED.fits", "_%06i.fits" % seed) for seed in range(first, last)] outfile = args.outfile summaryfile = args.summaryfile outtable = fill_output_table( flist, "SED", CollectSED.collist, nbins=nbins) if is_not_null(outfile): outtable.write(outfile) if is_not_null(summaryfile): summary = summarize_sed_results(outtable) summary.write(summaryfile)
Run this analysis
entailment
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile( args, require_sim_name=True) if targets_yaml is None: return job_configs write_full = args['write_full'] targets = load_yaml(targets_yaml) base_config = dict(config=args['config'], nsims=args['nsims'], seed=args['seed']) first = args['seed'] last = first + args['nsims'] - 1 for target_name, profile_list in targets.items(): for profile in profile_list: full_key = "%s:%s:%s" % (target_name, profile, sim) name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, profile=profile, fullpath=True) sed_file = NAME_FACTORY.sim_sedfile(**name_keys) outfile = sed_file.replace( '_SEED.fits', '_collected_%06i_%06i.fits' % (first, last)) logfile = make_nfs_path(outfile.replace('.fits', '.log')) if not write_full: outfile = None summaryfile = sed_file.replace( '_SEED.fits', '_summary_%06i_%06i.fits' % (first, last)) job_config = base_config.copy() job_config.update(dict(sed_file=sed_file, outfile=outfile, summaryfile=summaryfile, logfile=logfile)) job_configs[full_key] = job_config return job_configs
Hook to build job configurations
entailment
def update_base_dict(self, yamlfile): """Update the values in baseline dictionary used to resolve names """ self.base_dict.update(**yaml.safe_load(open(yamlfile)))
Update the values in baseline dictionary used to resolve names
entailment
def _format_from_dict(self, format_string, **kwargs): """Return a formatted file name dictionary components """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) localpath = format_string.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
Return a formatted file name dictionary components
entailment
def sim_sedfile(self, **kwargs): """Return the name for the simulated SED file for a particular target """ if 'seed' not in kwargs: kwargs['seed'] = 'SEED' return self._format_from_dict(NameFactory.sim_sedfile_format, **kwargs)
Return the name for the simulated SED file for a particular target
entailment
def stamp(self, **kwargs): """Return the path for a stamp file for a scatter gather job""" kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) return NameFactory.stamp_format.format(**kwargs_copy)
Return the path for a stamp file for a scatter gather job
entailment
def resolve_targetfile(self, args, require_sim_name=False): # x """Get the name of the targetfile based on the job arguments""" ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return (None, None) sim = args.get('sim') if is_null(sim): if require_sim_name: sys.stderr.write('Simulation scenario must be specified') return (None, None) else: sim = None name_keys = dict(target_type=ttype, targetlist='target_list.yaml', sim_name=sim, fullpath=True) if sim is None: targetfile = self.targetfile(**name_keys) else: targetfile = self.sim_targetfile(**name_keys) targets_override = args.get('targetfile') if is_not_null(targets_override): targetfile = targets_override return (targetfile, sim)
Get the name of the targetfile based on the job arguments
entailment
def resolve_randconfig(self, args): """Get the name of the specturm file based on the job arguments""" ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return None name_keys = dict(target_type=ttype, fullpath=True) randconfig = self.randconfig(**name_keys) rand_override = args.get('rand_config') if is_not_null(rand_override): randconfig = rand_override return randconfig
Get the name of the specturm file based on the job arguments
entailment
def main(): usage = "usage: %(prog)s [options] " description = "Run gtselect and gtmktime on one or more FT1 files. " "Note that gtmktime will be skipped if no FT2 file is provided." parser = argparse.ArgumentParser(usage=usage, description=description) add_lsf_args(parser) parser.add_argument('--zmax', default=100., type=float, help='') parser.add_argument('--dcostheta', default=0.025, type=float, help='') parser.add_argument('--binsz', default=1.0, type=float, help='') parser.add_argument('--outdir', default=None, type=str, help='Path to output directory used when merge=False.') parser.add_argument('--outfile', default=None, type=str, help='Path to output file used when merge=True.') parser.add_argument('--scfile', default=None, type=str, help='', required=True) parser.add_argument('--dry_run', default=False, action='store_true') parser.add_argument('--overwrite', default=False, action='store_true') parser.add_argument('--merge', default=False, action='store_true', help='Merge input FT1 files into a single file.') parser.add_argument('files', nargs='+', default=None, help='List of directories in which the analysis will ' 'be run.') args = parser.parse_args() args.outdir = os.path.abspath(args.outdir) args.scfile = os.path.abspath(args.scfile) mkdir(args.outdir) input_files = [[os.path.abspath(x)] for x in args.files] output_files = [os.path.join(args.outdir, os.path.basename(x)) for x in args.files] if args.batch: opts = copy.deepcopy(args.__dict__) opts.pop('files') opts.pop('batch') submit_jobs('python ' + os.path.abspath(__file__.rstrip('cd')), input_files, output_files, {k: v for k, v in opts.items()}) sys.exit(0) logger = Logger.get(os.path.basename(__file__), None, logging.INFO) logger.info('Starting.') cwd = os.getcwd() user = os.environ['USER'] tmpdir = tempfile.mkdtemp(prefix=user + '.', dir='/scratch') os.chdir(tmpdir) logger.info('tmpdir %s', tmpdir) logger.info('outdir %s', args.outdir) logger.info('outfile %s', args.outfile) for infiles, outfile in zip(input_files, output_files): logger.info('infiles %s', pprint.pformat(infiles)) logger.info('outfile %s', outfile) kw = dict(evfile='list.txt', scfile=args.scfile, outfile='ltcube.fits', binsz=args.binsz, dcostheta=args.dcostheta, zmax=args.zmax) create_filelist(infiles, 'list.txt') staged_outfile = kw['outfile'] run_gtapp('gtltcube', logger, kw) logger.info('cp %s %s', staged_outfile, outfile) shutil.copy(staged_outfile, outfile) os.chdir(cwd) logger.info('Deleting %s', tmpdir) shutil.rmtree(tmpdir) logger.info('Done.')
Note that gtmktime will be skipped if no FT2 file is provided.
entailment
def convert_sed_cols(tab): """Cast SED column names to lowercase.""" # Update Column names for colname in list(tab.columns.keys()): newname = colname.lower() newname = newname.replace('dfde', 'dnde') if tab.columns[colname].name == newname: continue tab.columns[colname].name = newname return tab
Cast SED column names to lowercase.
entailment
def derivative(self, x, der=1): """ return the derivative a an array of input values x : the inputs der : the order of derivative """ from scipy.interpolate import splev return splev(x, self._sp, der=der)
return the derivative a an array of input values x : the inputs der : the order of derivative
entailment
def _compute_mle(self): """Compute the maximum likelihood estimate. Calls `scipy.optimize.brentq` to find the roots of the derivative. """ min_y = np.min(self._interp.y) if self._interp.y[0] == min_y: self._mle = self._interp.x[0] elif self._interp.y[-1] == min_y: self._mle = self._interp.x[-1] else: argmin_y = np.argmin(self._interp.y) ix0 = max(argmin_y - 4, 0) ix1 = min(argmin_y + 4, len(self._interp.x) - 1) while np.sign(self._interp.derivative(self._interp.x[ix0])) == \ np.sign(self._interp.derivative(self._interp.x[ix1])): ix0 += 1 self._mle = scipy.optimize.brentq(self._interp.derivative, self._interp.x[ix0], self._interp.x[ix1], xtol=1e-10 * np.median(self._interp.x))
Compute the maximum likelihood estimate. Calls `scipy.optimize.brentq` to find the roots of the derivative.
entailment
def getDeltaLogLike(self, dlnl, upper=True): """Find the point at which the log-likelihood changes by a given value with respect to its value at the MLE.""" mle_val = self.mle() # A little bit of paranoia to avoid zeros if mle_val <= 0.: mle_val = self._interp.xmin if mle_val <= 0.: mle_val = self._interp.x[1] log_mle = np.log10(mle_val) lnl_max = self.fn_mle() # This ultra-safe code to find an absolute maximum # fmax = self.fn_mle() # m = (fmax-self.interp.y > 0.1+dlnl) & (self.interp.x>self._mle) # if sum(m) == 0: # xmax = self.interp.x[-1]*10 # else: # xmax = self.interp.x[m][0] # Matt has found that it is faster to use an interpolator # than an actual root-finder to find the root, # probably b/c of python overhead. # That would be something like this: # rf = lambda x: self._interp(x)+dlnl-lnl_max # return opt.brentq(rf,self._mle,self._interp.xmax, # xtol=1e-10*np.abs(self._mle)) if upper: x = np.logspace(log_mle, np.log10(self._interp.xmax), 100) retVal = np.interp(dlnl, self.interp(x) - lnl_max, x) else: x = np.linspace(self._interp.xmin, self._mle, 100) retVal = np.interp(dlnl, self.interp(x)[::-1] - lnl_max, x[::-1]) return retVal
Find the point at which the log-likelihood changes by a given value with respect to its value at the MLE.
entailment
def getLimit(self, alpha, upper=True): """ Evaluate the limits corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : limit confidence level. upper : upper or lower limits. """ dlnl = onesided_cl_to_dlnl(1.0 - alpha) return self.getDeltaLogLike(dlnl, upper=upper)
Evaluate the limits corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : limit confidence level. upper : upper or lower limits.
entailment
def getInterval(self, alpha): """ Evaluate the interval corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : limit confidence level. """ dlnl = twosided_cl_to_dlnl(1.0 - alpha) lo_lim = self.getDeltaLogLike(dlnl, upper=False) hi_lim = self.getDeltaLogLike(dlnl, upper=True) return (lo_lim, hi_lim)
Evaluate the interval corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : limit confidence level.
entailment
def create_from_table(cls, tab_e): """ Parameters ---------- tab_e : `~astropy.table.Table` EBOUNDS table. """ convert_sed_cols(tab_e) try: emin = np.array(tab_e['e_min'].to(u.MeV)) emax = np.array(tab_e['e_max'].to(u.MeV)) except: emin = np.array(tab_e['e_min']) emax = np.array(tab_e['e_max']) ne = len(emin) try: ref_dnde = np.array(tab_e['ref_dnde']) except: ref_dnde = np.ones((ne)) try: ref_flux = np.array(tab_e['ref_flux']) except: ref_flux = np.ones((ne)) try: ref_eflux = np.array(tab_e['ref_eflux']) except: ref_eflux = np.ones((ne)) try: ref_npred = np.array(tab_e['ref_npred']) except: ref_npred = np.ones((ne)) return cls(emin, emax, ref_dnde, ref_flux, ref_eflux, ref_npred)
Parameters ---------- tab_e : `~astropy.table.Table` EBOUNDS table.
entailment
def build_ebound_table(self): """ Build and return an EBOUNDS table with the encapsulated data. """ cols = [ Column(name="E_MIN", dtype=float, data=self._emin, unit='MeV'), Column(name="E_MAX", dtype=float, data=self._emax, unit='MeV'), Column(name="E_REF", dtype=float, data=self._eref, unit='MeV'), Column(name="REF_DNDE", dtype=float, data=self._ref_dnde, unit='ph / (MeV cm2 s)'), Column(name="REF_FLUX", dtype=float, data=self._ref_flux, unit='ph / (cm2 s)'), Column(name="REF_EFLUX", dtype=float, data=self._ref_eflux, unit='MeV / (cm2 s)'), Column(name="REF_NPRED", dtype=float, data=self._ref_npred, unit='ph') ] tab = Table(data=cols) return tab
Build and return an EBOUNDS table with the encapsulated data.
entailment
def derivative(self, x, der=1): """Return the derivate of the log-like summed over the energy bins Parameters ---------- x : `~numpy.ndarray` Array of N x M values der : int Order of the derivate Returns ------- der_val : `~numpy.ndarray` Array of negative log-likelihood values. """ if len(x.shape) == 1: der_val = np.zeros((1)) else: der_val = np.zeros((x.shape[1:])) for i, xv in enumerate(x): der_val += self._loglikes[i].interp.derivative(xv, der=der) return der_val
Return the derivate of the log-like summed over the energy bins Parameters ---------- x : `~numpy.ndarray` Array of N x M values der : int Order of the derivate Returns ------- der_val : `~numpy.ndarray` Array of negative log-likelihood values.
entailment
def mles(self): """ return the maximum likelihood estimates for each of the energy bins """ mle_vals = np.ndarray((self._nx)) for i in range(self._nx): mle_vals[i] = self._loglikes[i].mle() return mle_vals
return the maximum likelihood estimates for each of the energy bins
entailment
def ts_vals(self): """ returns test statistic values for each energy bin """ ts_vals = np.ndarray((self._nx)) for i in range(self._nx): ts_vals[i] = self._loglikes[i].TS() return ts_vals
returns test statistic values for each energy bin
entailment
def chi2_vals(self, x): """Compute the difference in the log-likelihood between the MLE in each energy bin and the normalization predicted by a global best-fit model. This array can be summed to get a goodness-of-fit chi2 for the model. Parameters ---------- x : `~numpy.ndarray` An array of normalizations derived from a global fit to all energy bins. Returns ------- chi2_vals : `~numpy.ndarray` An array of chi2 values for each energy bin. """ chi2_vals = np.ndarray((self._nx)) for i in range(self._nx): mle = self._loglikes[i].mle() nll0 = self._loglikes[i].interp(mle) nll1 = self._loglikes[i].interp(x[i]) chi2_vals[i] = 2.0 * np.abs(nll0 - nll1) return chi2_vals
Compute the difference in the log-likelihood between the MLE in each energy bin and the normalization predicted by a global best-fit model. This array can be summed to get a goodness-of-fit chi2 for the model. Parameters ---------- x : `~numpy.ndarray` An array of normalizations derived from a global fit to all energy bins. Returns ------- chi2_vals : `~numpy.ndarray` An array of chi2 values for each energy bin.
entailment
def getLimits(self, alpha, upper=True): """ Evaluate the limits corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : float limit confidence level. upper : bool upper or lower limits. returns an array of values, one for each energy bin """ limit_vals = np.ndarray((self._nx)) for i in range(self._nx): limit_vals[i] = self._loglikes[i].getLimit(alpha, upper) return limit_vals
Evaluate the limits corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : float limit confidence level. upper : bool upper or lower limits. returns an array of values, one for each energy bin
entailment
def getIntervals(self, alpha): """ Evaluate the two-sided intervals corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : float limit confidence level. Returns ------- limit_vals_hi : `~numpy.ndarray` An array of lower limit values. limit_vals_lo : `~numpy.ndarray` An array of upper limit values. """ limit_vals_lo = np.ndarray((self._nx)) limit_vals_hi = np.ndarray((self._nx)) for i in range(self._nx): lo_lim, hi_lim = self._loglikes[i].getInterval(alpha) limit_vals_lo[i] = lo_lim limit_vals_hi[i] = hi_lim return limit_vals_lo, limit_vals_hi
Evaluate the two-sided intervals corresponding to a C.L. of (1-alpha)%. Parameters ---------- alpha : float limit confidence level. Returns ------- limit_vals_hi : `~numpy.ndarray` An array of lower limit values. limit_vals_lo : `~numpy.ndarray` An array of upper limit values.
entailment
def fitNormalization(self, specVals, xlims): """Fit the normalization given a set of spectral values that define a spectral shape This version is faster, and solves for the root of the derivatvie Parameters ---------- specVals : an array of (nebin values that define a spectral shape xlims : fit limits returns the best-fit normalization value """ from scipy.optimize import brentq def fDeriv(x): return self.norm_derivative(specVals, x) try: result = brentq(fDeriv, xlims[0], xlims[1]) except: check_underflow = self.__call__(specVals * xlims[0]) < \ self.__call__(specVals * xlims[1]) if check_underflow.any(): return xlims[0] else: return xlims[1] return result
Fit the normalization given a set of spectral values that define a spectral shape This version is faster, and solves for the root of the derivatvie Parameters ---------- specVals : an array of (nebin values that define a spectral shape xlims : fit limits returns the best-fit normalization value
entailment
def fitNorm_v2(self, specVals): """Fit the normalization given a set of spectral values that define a spectral shape. This version uses `scipy.optimize.fmin`. Parameters ---------- specVals : an array of (nebin values that define a spectral shape xlims : fit limits Returns ------- norm : float Best-fit normalization value """ from scipy.optimize import fmin def fToMin(x): return self.__call__(specVals * x) result = fmin(fToMin, 0., disp=False, xtol=1e-6) return result
Fit the normalization given a set of spectral values that define a spectral shape. This version uses `scipy.optimize.fmin`. Parameters ---------- specVals : an array of (nebin values that define a spectral shape xlims : fit limits Returns ------- norm : float Best-fit normalization value
entailment
def fit_spectrum(self, specFunc, initPars, freePars=None): """ Fit for the free parameters of a spectral function Parameters ---------- specFunc : `~fermipy.spectrum.SpectralFunction` The Spectral Function initPars : `~numpy.ndarray` The initial values of the parameters freePars : `~numpy.ndarray` Boolean array indicating which parameters should be free in the fit. Returns ------- params : `~numpy.ndarray` Best-fit parameters. spec_vals : `~numpy.ndarray` The values of the best-fit spectral model in each energy bin. ts_spec : float The TS of the best-fit spectrum chi2_vals : `~numpy.ndarray` Array of chi-squared values for each energy bin. chi2_spec : float Global chi-squared value for the sum of all energy bins. pval_spec : float p-value of chi-squared for the best-fit spectrum. """ if not isinstance(specFunc, SEDFunctor): specFunc = self.create_functor(specFunc, initPars, scale=specFunc.scale) if freePars is None: freePars = np.empty(len(initPars), dtype=bool) freePars.fill(True) initPars = np.array(initPars) freePars = np.array(freePars) def fToMin(x): xp = np.array(specFunc.params) xp[freePars] = x return self.__call__(specFunc(xp)) result = fmin(fToMin, initPars[freePars], disp=False, xtol=1e-6) out_pars = specFunc.params out_pars[freePars] = np.array(result) spec_vals = specFunc(out_pars) spec_npred = np.zeros(len(spec_vals)) if isinstance(specFunc, spectrum.SEDFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_flux elif isinstance(specFunc, spectrum.SEDEFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_eflux ts_spec = self.TS_spectrum(spec_vals) chi2_vals = self.chi2_vals(spec_vals) chi2_spec = np.sum(chi2_vals) pval_spec = stats.distributions.chi2.sf(chi2_spec, len(spec_vals)) return dict(params=out_pars, spec_vals=spec_vals, spec_npred=spec_npred, ts_spec=ts_spec, chi2_spec=chi2_spec, chi2_vals=chi2_vals, pval_spec=pval_spec)
Fit for the free parameters of a spectral function Parameters ---------- specFunc : `~fermipy.spectrum.SpectralFunction` The Spectral Function initPars : `~numpy.ndarray` The initial values of the parameters freePars : `~numpy.ndarray` Boolean array indicating which parameters should be free in the fit. Returns ------- params : `~numpy.ndarray` Best-fit parameters. spec_vals : `~numpy.ndarray` The values of the best-fit spectral model in each energy bin. ts_spec : float The TS of the best-fit spectrum chi2_vals : `~numpy.ndarray` Array of chi-squared values for each energy bin. chi2_spec : float Global chi-squared value for the sum of all energy bins. pval_spec : float p-value of chi-squared for the best-fit spectrum.
entailment
def build_scandata_table(self): """Build an `astropy.table.Table` object from these data. """ shape = self._norm_vals.shape col_norm = Column(name="norm", dtype=float) col_normv = Column(name="norm_scan", dtype=float, shape=shape) col_dll = Column(name="dloglike_scan", dtype=float, shape=shape) tab = Table(data=[col_norm, col_normv, col_dll]) tab.add_row({"norm": 1., "norm_scan": self._norm_vals, "dloglike_scan": -1 * self._nll_vals}) return tab
Build an `astropy.table.Table` object from these data.
entailment