code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
o = '' spatial_type = src['SpatialModel'].lower() o += spatial_type if spatial_type == 'gaussian': o += '_s%04.2f' % src['SpatialWidth'] if src['SpectrumType'] == 'PowerLaw': o += '_powerlaw_%04.2f' % float(src.spectral_pars['Index']['value']) else: o += '_%s' % (src['SpectrumType'].lower()) return o
def create_model_name(src)
Generate a name for a source object given its spatial/spectral properties. Parameters ---------- src : `~fermipy.roi_model.Source` A source object. Returns ------- name : str A source name.
4.809521
4.23781
1.134907
err = np.sqrt(np.diag(cov)) errinv = np.ones_like(err) * np.nan m = np.isfinite(err) & (err != 0) errinv[m] = 1. / err[m] corr = np.array(cov) return corr * np.outer(errinv, errinv)
def cov_to_correlation(cov)
Compute the correlation matrix given the covariance matrix. Parameters ---------- cov : `~numpy.ndarray` N x N matrix of covariances among N parameters. Returns ------- corr : `~numpy.ndarray` N x N matrix of correlations among N parameters.
3.18985
3.601751
0.885639
cth = np.cos(theta) sth = np.sin(theta) covxx = cth**2 * sigma_maj**2 + sth**2 * sigma_min**2 covyy = sth**2 * sigma_maj**2 + cth**2 * sigma_min**2 covxy = cth * sth * sigma_maj**2 - cth * sth * sigma_min**2 return np.array([[covxx, covxy], [covxy, covyy]])
def ellipse_to_cov(sigma_maj, sigma_min, theta)
Compute the covariance matrix in two variables x and y given the std. deviation along the semi-major and semi-minor axes and the rotation angle of the error ellipse. Parameters ---------- sigma_maj : float Std. deviation along major axis of error ellipse. sigma_min : float Std. deviation along minor axis of error ellipse. theta : float Rotation angle in radians from x-axis to ellipse major axis.
1.718092
1.856347
0.925523
alpha = 1.0 - cl return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)
def onesided_cl_to_dlnl(cl)
Compute the delta-loglikehood values that corresponds to an upper limit of the given confidence level. Parameters ---------- cl : float Confidence level. Returns ------- dlnl : float Delta-loglikelihood value with respect to the maximum of the likelihood function.
5.400274
6.598449
0.818416
if x0 == xb: return np.nan for i in range(10): if np.sign(fn(xb) + delta) != np.sign(fn(x0) + delta): break if bounds is not None and (xb < bounds[0] or xb > bounds[1]): break if xb < x0: xb *= 0.5 else: xb *= 2.0 # Failed to find a root if np.sign(fn(xb) + delta) == np.sign(fn(x0) + delta): return np.nan if x0 == 0: xtol = 1e-10 * np.abs(xb) else: xtol = 1e-10 * np.abs(xb + x0) return brentq(lambda t: fn(t) + delta, x0, xb, xtol=xtol)
def find_function_root(fn, x0, xb, delta=0.0, bounds=None)
Find the root of a function: f(x)+delta in the interval encompassed by x0 and xb. Parameters ---------- fn : function Python function. x0 : float Fixed bound for the root search. This will either be used as the lower or upper bound depending on the relative value of xb. xb : float Upper or lower bound for the root search. If a root is not found in the interval [x0,xb]/[xb,x0] this value will be increased/decreased until a change in sign is found.
2.216246
2.301679
0.962882
x = xy[0] y = xy[1] cth = np.cos(theta) sth = np.sin(theta) a = (cth ** 2) / (2 * sx ** 2) + (sth ** 2) / (2 * sy ** 2) b = -(np.sin(2 * theta)) / (4 * sx ** 2) + (np.sin(2 * theta)) / ( 4 * sy ** 2) c = (sth ** 2) / (2 * sx ** 2) + (cth ** 2) / (2 * sy ** 2) vals = amplitude - (a * ((x - x0) ** 2) + 2 * b * (x - x0) * (y - y0) + c * ((y - y0) ** 2)) return vals
def parabola(xy, amplitude, x0, y0, sx, sy, theta)
Evaluate a 2D parabola given by: f(x,y) = f_0 - (1/2) * \delta^T * R * \Sigma * R^T * \delta where \delta = [(x - x_0), (y - y_0)] and R is the matrix for a 2D rotation by angle \theta and \Sigma is the covariance matrix: \Sigma = [[1/\sigma_x^2, 0 ], [0 , 1/\sigma_y^2]] Parameters ---------- xy : tuple Tuple containing x and y arrays for the values at which the parabola will be evaluated. amplitude : float Constant offset value. x0 : float Centroid in x coordinate. y0 : float Centroid in y coordinate. sx : float Standard deviation along first axis (x-axis when theta=0). sy : float Standard deviation along second axis (y-axis when theta=0). theta : float Rotation angle in radians. Returns ------- vals : `~numpy.ndarray` Values of the parabola evaluated at the points defined in the `xy` input tuple.
1.797893
1.818805
0.988503
if xy is None: ix, iy = np.unravel_index(np.argmax(z), z.shape) else: ix, iy = xy mz = (z > z[ix, iy] - delta) labels = label(mz)[0] mz &= labels == labels[ix, iy] return mz
def get_region_mask(z, delta, xy=None)
Get mask of connected region within delta of max(z).
3.320457
3.000436
1.106658
offset = make_pixel_distance(z.shape, iy, ix) x, y = np.meshgrid(np.arange(z.shape[0]), np.arange(z.shape[1]), indexing='ij') m = (offset <= dpix) if np.sum(m) < 9: m = (offset <= dpix + 0.5) if zmin is not None: m |= get_region_mask(z, np.abs(zmin), (ix, iy)) sx = get_bounded_slice(ix, dpix, z.shape[0]) sy = get_bounded_slice(iy, dpix, z.shape[1]) coeffx = poly_to_parabola(np.polyfit(x[sx, iy], z[sx, iy], 2)) coeffy = poly_to_parabola(np.polyfit(y[ix, sy], z[ix, sy], 2)) #p0 = [coeffx[2], coeffx[0], coeffy[0], coeffx[1], coeffy[1], 0.0] p0 = [coeffx[2], float(ix), float(iy), coeffx[1], coeffy[1], 0.0] o = {'fit_success': True, 'p0': p0} def curve_fit_fn(*args): return np.ravel(parabola(*args)) try: bounds = (-np.inf * np.ones(6), np.inf * np.ones(6)) bounds[0][1] = -0.5 bounds[0][2] = -0.5 bounds[1][1] = z.shape[0] - 0.5 bounds[1][2] = z.shape[1] - 0.5 popt, pcov = scipy.optimize.curve_fit(curve_fit_fn, (np.ravel(x[m]), np.ravel(y[m])), np.ravel(z[m]), p0, bounds=bounds) except Exception: popt = copy.deepcopy(p0) o['fit_success'] = False fm = parabola((x[m], y[m]), *popt) df = fm - z[m] rchi2 = np.sum(df ** 2) / len(fm) o['rchi2'] = rchi2 o['x0'] = popt[1] o['y0'] = popt[2] o['sigmax'] = np.abs(popt[3]) o['sigmay'] = np.abs(popt[4]) o['sigma'] = np.sqrt(o['sigmax'] ** 2 + o['sigmay'] ** 2) o['z0'] = popt[0] o['theta'] = popt[5] o['popt'] = popt o['mask'] = m a = max(o['sigmax'], o['sigmay']) b = min(o['sigmax'], o['sigmay']) o['eccentricity'] = np.sqrt(1 - b ** 2 / a ** 2) o['eccentricity2'] = np.sqrt(a ** 2 / b ** 2 - 1) return o
def fit_parabola(z, ix, iy, dpix=3, zmin=None)
Fit a parabola to a 2D numpy array. This function will fit a parabola with the functional form described in `~fermipy.utils.parabola` to a 2D slice of the input array `z`. The fit region encompasses pixels that are within `dpix` of the pixel coordinate (iz,iy) OR that have a value relative to the peak value greater than `zmin`. Parameters ---------- z : `~numpy.ndarray` ix : int X index of center pixel of fit region in array `z`. iy : int Y index of center pixel of fit region in array `z`. dpix : int Max distance from center pixel of fit region. zmin : float
2.380927
2.418287
0.984551
if npts < 2: return edges x = (edges[:-1, None] + (edges[1:, None] - edges[:-1, None]) * np.linspace(0.0, 1.0, npts + 1)[None, :]) return np.unique(np.ravel(x))
def split_bin_edges(edges, npts=2)
Subdivide an array of bins by splitting each bin into ``npts`` subintervals. Parameters ---------- edges : `~numpy.ndarray` Bin edge array. npts : int Number of intervals into which each bin will be subdivided. Returns ------- edges : `~numpy.ndarray` Subdivided bin edge array.
2.957887
3.324103
0.88983
ibin = np.digitize(np.array(x, ndmin=1), edges) - 1 return ibin
def val_to_bin(edges, x)
Convert axis coordinate to bin index.
4.254906
3.926342
1.083682
edges = np.array(edges) w = edges[1:] - edges[:-1] w = np.insert(w, 0, w[0]) ibin = np.digitize(np.array(x, ndmin=1), edges - 0.5 * w) - 1 ibin[ibin < 0] = 0 return ibin
def val_to_edge(edges, x)
Convert axis coordinate to bin index.
3.06163
2.48573
1.231682
nbins = len(edges) - 1 ibin = val_to_bin(edges, x) ibin[ibin < 0] = 0 ibin[ibin > nbins - 1] = nbins - 1 return ibin
def val_to_bin_bounded(edges, x)
Convert axis coordinate to bin index.
2.245377
2.146888
1.045875
numlo = int(np.ceil((edges[0] - lo) / binsz)) numhi = int(np.ceil((hi - edges[-1]) / binsz)) edges = copy.deepcopy(edges) if numlo > 0: edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1) edges = np.concatenate((edges_lo[:-1], edges)) if numhi > 0: edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1) edges = np.concatenate((edges, edges_hi[1:])) return edges
def extend_array(edges, binsz, lo, hi)
Extend an array to encompass lo and hi values.
1.756665
1.710618
1.026918
cols = {} for icol, col in enumerate(table.columns.names): col_data = table.data[col] if type(col_data[0]) == np.float32: cols[col] = np.array(col_data, dtype=float) elif type(col_data[0]) == np.float64: cols[col] = np.array(col_data, dtype=float) elif type(col_data[0]) == str: cols[col] = np.array(col_data, dtype=str) elif type(col_data[0]) == np.string_: cols[col] = np.array(col_data, dtype=str) elif type(col_data[0]) == np.int16: cols[col] = np.array(col_data, dtype=int) elif type(col_data[0]) == np.ndarray: cols[col] = np.array(col_data) else: raise Exception( 'Unrecognized column type: %s %s' % (col, str(type(col_data)))) return cols
def fits_recarray_to_dict(table)
Convert a FITS recarray to a python dictionary.
1.672449
1.649201
1.014097
from xml.dom import minidom import xml.etree.cElementTree as et rough_string = et.tostring(elem, 'utf-8') reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
def prettify_xml(elem)
Return a pretty-printed XML string for the Element.
1.771091
1.56248
1.133513
if d1 is None: return d0 elif d0 is None: return d1 elif d0 is None and d1 is None: return {} od = {} for k, v in d0.items(): t0 = None t1 = None if k in d0: t0 = type(d0[k]) if k in d1: t1 = type(d1[k]) if k not in d1: od[k] = copy.deepcopy(d0[k]) elif isinstance(v, dict) and isinstance(d1[k], dict): od[k] = merge_dict(d0[k], d1[k], add_new_keys, append_arrays) elif isinstance(v, list) and isstr(d1[k]): od[k] = d1[k].split(',') elif isinstance(v, dict) and d1[k] is None: od[k] = copy.deepcopy(d0[k]) elif isinstance(v, np.ndarray) and append_arrays: od[k] = np.concatenate((v, d1[k])) elif (d0[k] is not None and d1[k] is not None) and t0 != t1: if t0 == dict or t0 == list: raise Exception('Conflicting types in dictionary merge for ' 'key %s %s %s' % (k, t0, t1)) od[k] = t0(d1[k]) else: od[k] = copy.copy(d1[k]) if add_new_keys: for k, v in d1.items(): if k not in d0: od[k] = copy.deepcopy(d1[k]) return od
def merge_dict(d0, d1, add_new_keys=False, append_arrays=False)
Recursively merge the contents of python dictionary d0 with the contents of another python dictionary, d1. Parameters ---------- d0 : dict The input dictionary. d1 : dict Dictionary to be merged with the input dictionary. add_new_keys : str Do not skip keys that only exist in d1. append_arrays : bool If an element is a numpy array set the value of that element by concatenating the two arrays.
1.891718
1.915677
0.987493
if isinstance(x, list): return map(tolist, x) elif isinstance(x, dict): return dict((tolist(k), tolist(v)) for k, v in x.items()) elif isinstance(x, np.ndarray) or isinstance(x, np.number): # note, call tolist again to convert strings of numbers to numbers return tolist(x.tolist()) elif isinstance(x, OrderedDict): return dict(x) elif isinstance(x, np.bool_): return bool(x) elif isstr(x) or isinstance(x, np.str): x = str(x) # convert unicode & numpy strings try: return int(x) except: try: return float(x) except: if x == 'True': return True elif x == 'False': return False else: return x else: return x
def tolist(x)
convenience function that takes in a nested structure of lists and dictionaries and converts everything to its base objects. This is useful for dupming a file to yaml. (a) numpy arrays into python lists >>> type(tolist(np.asarray(123))) == int True >>> tolist(np.asarray([1,2,3])) == [1,2,3] True (b) numpy strings into python strings. >>> tolist([np.asarray('cat')])==['cat'] True (c) an ordered dict to a dict >>> ordered=OrderedDict(a=1, b=2) >>> type(tolist(ordered)) == dict True (d) converts unicode to regular strings >>> type(u'a') == str False >>> type(tolist(u'a')) == str True (e) converts numbers & bools in strings to real represntation, (i.e. '123' -> 123) >>> type(tolist(np.asarray('123'))) == int True >>> type(tolist('123')) == int True >>> tolist('False') == False True
2.505594
2.432299
1.030134
r = np.array(r, ndmin=1) sig = np.array(sig, ndmin=1) rmin = r - sig rmax = r + sig rmin[rmin < 0] = 0 delta = (rmax - rmin) / nstep redge = rmin[..., np.newaxis] + \ delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1) rp = 0.5 * (redge[..., 1:] + redge[..., :-1]) dr = redge[..., 1:] - redge[..., :-1] fnv = fn(rp) r = r.reshape(r.shape + (1,)) cphi = -np.ones(dr.shape) m = ((rp + r) / sig < 1) | (r == 0) rrp = r * rp sx = r ** 2 + rp ** 2 - sig ** 2 cphi[~m] = sx[~m] / (2 * rrp[~m]) dphi = 2 * np.arccos(cphi) v = rp * fnv * dphi * dr / (np.pi * sig * sig) s = np.sum(v, axis=-1) return s
def convolve2d_disk(fn, r, sig, nstep=200)
Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is azimuthally symmetric function in two dimensions and g is a step function given by: g(r) = H(1-r/s) Parameters ---------- fn : function Input function that takes a single radial coordinate parameter. r : `~numpy.ndarray` Array of points at which the convolution is to be evaluated. sig : float Radius parameter of the step function. nstep : int Number of sampling point for numeric integration.
3.424618
3.508934
0.975971
r = np.array(r, ndmin=1) sig = np.array(sig, ndmin=1) rmin = r - 10 * sig rmax = r + 10 * sig rmin[rmin < 0] = 0 delta = (rmax - rmin) / nstep redge = (rmin[..., np.newaxis] + delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1)) rp = 0.5 * (redge[..., 1:] + redge[..., :-1]) dr = redge[..., 1:] - redge[..., :-1] fnv = fn(rp) r = r.reshape(r.shape + (1,)) sig2 = sig * sig x = r * rp / (sig2) if 'je_fn' not in convolve2d_gauss.__dict__: t = 10 ** np.linspace(-8, 8, 1000) t = np.insert(t, 0, [0]) je = special.ive(0, t) convolve2d_gauss.je_fn = UnivariateSpline(t, je, k=2, s=0) je = convolve2d_gauss.je_fn(x.flat).reshape(x.shape) #je2 = special.ive(0,x) v = (rp * fnv / (sig2) * je * np.exp(x - (r * r + rp * rp) / (2 * sig2)) * dr) s = np.sum(v, axis=-1) return s
def convolve2d_gauss(fn, r, sig, nstep=200)
Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is azimuthally symmetric function in two dimensions and g is a 2D gaussian with standard deviation s given by: g(r) = 1/(2*pi*s^2) Exp[-r^2/(2*s^2)] Parameters ---------- fn : function Input function that takes a single radial coordinate parameter. r : `~numpy.ndarray` Array of points at which the convolution is to be evaluated. sig : float Width parameter of the gaussian. nstep : int Number of sampling point for numeric integration.
3.370394
3.480503
0.968364
if np.isscalar(shape): shape = [shape, shape] if xpix is None: xpix = (shape[1] - 1.0) / 2. if ypix is None: ypix = (shape[0] - 1.0) / 2. dx = np.linspace(0, shape[1] - 1, shape[1]) - xpix dy = np.linspace(0, shape[0] - 1, shape[0]) - ypix dxy = np.zeros(shape) dxy += np.sqrt(dx[np.newaxis, :] ** 2 + dy[:, np.newaxis] ** 2) return dxy
def make_pixel_distance(shape, xpix=None, ypix=None)
Fill a 2D array with dimensions `shape` with the distance of each pixel from a reference direction (xpix,ypix) in pixel coordinates. Pixel coordinates are defined such that (0,0) is located at the center of the corner pixel.
1.73662
1.798236
0.965736
sigma /= cdelt def fn(t, s): return 1. / (2 * np.pi * s ** 2) * np.exp( -t ** 2 / (s ** 2 * 2.0)) dxy = make_pixel_distance(npix, xpix, ypix) k = fn(dxy, sigma) k /= (np.sum(k) * np.radians(cdelt) ** 2) return k
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=None, ypix=None)
Make kernel for a 2D gaussian. Parameters ---------- sigma : float Standard deviation in degrees.
4.180968
4.584194
0.91204
radius /= cdelt def fn(t, s): return 0.5 * (np.sign(s - t) + 1.0) dxy = make_pixel_distance(npix, xpix, ypix) k = fn(dxy, radius) k /= (np.sum(k) * np.radians(cdelt) ** 2) return k
def make_disk_kernel(radius, npix=501, cdelt=0.01, xpix=None, ypix=None)
Make kernel for a 2D disk. Parameters ---------- radius : float Disk radius in deg.
5.004377
5.860939
0.853852
sigma /= 0.8246211251235321 dtheta = psf.dtheta egy = psf.energies x = make_pixel_distance(npix, xpix, ypix) x *= cdelt k = np.zeros((len(egy), npix, npix)) for i in range(len(egy)): def fn(t): return psf.eval(i, t, scale_fn=psf_scale_fn) psfc = convolve2d_disk(fn, dtheta, sigma) k[i] = np.interp(np.ravel(x), dtheta, psfc).reshape(x.shape) if normalize: k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2) return k
def make_cdisk_kernel(psf, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False)
Make a kernel for a PSF-convolved 2D disk. Parameters ---------- psf : `~fermipy.irfs.PSFModel` sigma : float 68% containment radius in degrees.
4.657747
4.826658
0.965005
if klims is None: egy = psf.energies else: egy = psf.energies[klims[0]:klims[1] + 1] ang_dist = make_pixel_distance(npix, xpix, ypix) * cdelt max_ang_dist = np.max(ang_dist) + cdelt #dtheta = np.linspace(0.0, (np.max(ang_dist) * 1.05)**0.5, 200)**2.0 # z = create_kernel_function_lookup(psf, fn, sigma, egy, # dtheta, psf_scale_fn) shape = (len(egy), npix, npix) k = np.zeros(shape) r99 = psf.containment_angle(energies=egy, fraction=0.997) r34 = psf.containment_angle(energies=egy, fraction=0.34) rmin = np.maximum(r34 / 4., 0.01) rmax = np.maximum(r99, 0.1) if sigma is not None: rmin = np.maximum(rmin, 0.5 * sigma) rmax = np.maximum(rmax, 2.0 * r34 + 3.0 * sigma) rmax = np.minimum(rmax, max_ang_dist) for i in range(len(egy)): rebin = min(int(np.ceil(cdelt / rmin[i])), 8) if sparse: dtheta = np.linspace(0.0, rmax[i]**0.5, 100)**2.0 else: dtheta = np.linspace(0.0, max_ang_dist**0.5, 200)**2.0 z = eval_radial_kernel(psf, fn, sigma, i, dtheta, psf_scale_fn) xdist = make_pixel_distance(npix * rebin, xpix * rebin + (rebin - 1.0) / 2., ypix * rebin + (rebin - 1.0) / 2.) xdist *= cdelt / float(rebin) #x = val_to_pix(dtheta, np.ravel(xdist)) if sparse: m = np.ravel(xdist) < rmax[i] kk = np.zeros(xdist.size) #kk[m] = map_coordinates(z, [x[m]], order=2, prefilter=False) kk[m] = np.interp(np.ravel(xdist)[m], dtheta, z) kk = kk.reshape(xdist.shape) else: kk = np.interp(np.ravel(xdist), dtheta, z).reshape(xdist.shape) # kk = map_coordinates(z, [x], order=2, # prefilter=False).reshape(xdist.shape) if rebin > 1: kk = sum_bins(kk, 0, rebin) kk = sum_bins(kk, 1, rebin) k[i] = kk / float(rebin)**2 k = k.reshape((len(egy),) + ang_dist.shape) if normalize: k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2) return k
def make_radial_kernel(psf, fn, sigma, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False, klims=None, sparse=False)
Make a kernel for a general radially symmetric 2D function. Parameters ---------- psf : `~fermipy.irfs.PSFModel` fn : callable Function that evaluates the kernel at a radial coordinate r. sigma : float 68% containment radius in degrees.
2.819422
2.89229
0.974806
egy = psf.energies x = make_pixel_distance(npix, xpix, ypix) x *= cdelt k = np.zeros((len(egy), npix, npix)) for i in range(len(egy)): k[i] = psf.eval(i, x, scale_fn=psf_scale_fn) if normalize: k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2) return k
def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False)
Generate a kernel for a point-source. Parameters ---------- psf : `~fermipy.irfs.PSFModel` npix : int Number of pixels in X and Y dimensions. cdelt : float Pixel size in degrees.
3.386749
3.681175
0.920019
# Get edge coordinates edges_min = [int(pos - small_shape // 2) for (pos, small_shape) in zip(position, small_array_shape)] edges_max = [int(pos + (small_shape - small_shape // 2)) for (pos, small_shape) in zip(position, small_array_shape)] # Set up slices slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max)) for (edge_min, edge_max, large_shape) in zip(edges_min, edges_max, large_array_shape)) slices_small = tuple(slice(max(0, -edge_min), min(large_shape - edge_min, edge_max - edge_min)) for (edge_min, edge_max, large_shape) in zip(edges_min, edges_max, large_array_shape)) return slices_large, slices_small
def overlap_slices(large_array_shape, small_array_shape, position)
Modified version of `~astropy.nddata.utils.overlap_slices`. Get slices for the overlapping part of a small and a large array. Given a certain position of the center of the small array, with respect to the large array, tuples of slices are returned which can be used to extract, add or subtract the small array at the given position. This function takes care of the correct behavior at the boundaries, where the small array is cut of appropriately. Parameters ---------- large_array_shape : tuple Shape of the large array. small_array_shape : tuple Shape of the small array. position : tuple Position of the small array's center, with respect to the large array. Coordinates should be in the same order as the array shape. Returns ------- slices_large : tuple of slices Slices in all directions for the large array, such that ``large_array[slices_large]`` extracts the region of the large array that overlaps with the small array. slices_small : slice Slices in all directions for the small array, such that ``small_array[slices_small]`` extracts the region that is inside the large array.
1.904689
1.958423
0.972563
library_yaml = kwargs.pop('library', 'models/library.yaml') comp_yaml = kwargs.pop('comp', 'config/binning.yaml') basedir = kwargs.pop('basedir', os.path.abspath('.')) model_man = kwargs.get('ModelManager', ModelManager(basedir=basedir)) model_comp_dict = model_man.make_library(library_yaml, library_yaml, comp_yaml) return dict(model_comp_dict=model_comp_dict, ModelManager=model_man)
def make_library(**kwargs)
Build and return a ModelManager object and fill the associated model library
4.601835
4.151198
1.108556
l = [] for model_comp in self.model_components.values(): if model_comp.edisp_disable: l += [model_comp.info.source_name] return l
def edisp_disable_list(self)
Return the list of source for which energy dispersion should be turned off
5.416791
4.847607
1.117416
ret_dict = {} for comp in components: compkey = comp.make_key('{ebin_name}_{evtype_name}') zcut = "zmax%i" % comp.zmax name_keys = dict(modelkey=self.model_name, zcut=zcut, ebin=comp.ebin_name, mktime='none', psftype=comp.evtype_name, coordsys=comp.coordsys) outsrcmap = name_factory.merged_srcmaps(**name_keys) ccube = name_factory.ccube(**name_keys) src_dict = {} for comp_name, model_comp in self.model_components.items(): comp_info = model_comp.info model_type = comp_info.model_type name_keys['sourcekey'] = comp_name if model_type in ['CatalogSources']: #sourcekey = comp_info.comp_key sources = comp_info.source_names name_keys['sourcekey'] = comp_info.catalog_info.catalog_name elif model_type in ['CompositeSource']: #sourcekey = comp_info.sourcekey name_keys['sourcekey'] = comp_info.sourcekey sources = [comp_info.source_name] else: #sourcekey = comp_name sources = [comp_info.source_name] src_dict[comp_name] = dict(sourcekey=comp_name, srcmap_file=name_factory.srcmaps(**name_keys), source_names=sources) comp_dict = dict(outsrcmap=outsrcmap, ccube=ccube, source_dict=src_dict) ret_dict[compkey] = comp_dict return ret_dict
def make_srcmap_manifest(self, components, name_factory)
Build a yaml file that specfies how to make the srcmap files for a particular model Parameters ---------- components : list The binning components used in this analysis name_factory : `NameFactory` Object that handles naming conventions Returns a dictionary that contains information about where to find the source maps for each component of the model
3.908972
3.696867
1.057374
ret_dict = {} # Figure out which sources need to be split by components master_roi_source_info = {} sub_comp_sources = {} for comp_name, model_comp in self.model_components.items(): comp_info = model_comp.info if comp_info.components is None: master_roi_source_info[comp_name] = model_comp else: sub_comp_sources[comp_name] = model_comp # Build the xml for the master master_roi = SourceFactory.make_roi(master_roi_source_info) master_xml_mdl = name_factory.master_srcmdl_xml( modelkey=self.model_name) print("Writing master ROI model to %s" % master_xml_mdl) master_roi.write_xml(master_xml_mdl) ret_dict['master'] = master_roi # Now deal with the components for comp in components: zcut = "zmax%i" % comp.zmax compkey = "%s_%s" % (zcut, comp.make_key( '{ebin_name}_{evtype_name}')) # name_keys = dict(zcut=zcut, # modelkey=self.model_name, # component=compkey) comp_roi_source_info = {} for comp_name, model_comp in sub_comp_sources.items(): comp_info = model_comp.info if comp_info.selection_dependent: key = comp.make_key('{ebin_name}_{evtype_name}') elif comp_info.moving: key = zcut info_clone = comp_info.components[key].clone_and_merge_sub(key) comp_roi_source_info[comp_name] =\ ModelComponent(info=info_clone, spectrum=model_comp.spectrum) # Build the xml for the component comp_roi = SourceFactory.make_roi(comp_roi_source_info) comp_xml_mdl = name_factory.comp_srcmdl_xml(modelkey=self.model_name, component=compkey) print("Writing component ROI model to %s" % comp_xml_mdl) comp_roi.write_xml(comp_xml_mdl) ret_dict[compkey] = comp_roi return ret_dict
def make_model_rois(self, components, name_factory)
Make the fermipy roi_model objects for each of a set of binning components
3.225348
3.136672
1.028271
model_yaml = self._name_factory.model_yaml(modelkey=modelkey, fullpath=True) model = yaml.safe_load(open(model_yaml)) return model
def read_model_yaml(self, modelkey)
Read the yaml file for the diffuse components
5.054893
4.918532
1.027724
ret_dict = {} #catalog_dict = yaml.safe_load(open(catalog_yaml)) components_dict = Component.build_from_yamlfile(binning_yaml) diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm, DiffuseModelManager=self._dmm, library=diffuse_yaml, components=components_dict) catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml, CatalogSourceManager=self._csm) ret_dict.update(diffuse_ret_dict['comp_info_dict']) ret_dict.update(catalog_ret_dict['comp_info_dict']) self._library.update(ret_dict) return ret_dict
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml)
Build up the library of all the components Parameters ---------- diffuse_yaml : str Name of the yaml file with the library of diffuse component definitions catalog_yaml : str Name of the yaml file width the library of catalog split definitions binning_yaml : str Name of the yaml file with the binning definitions
4.197453
4.33255
0.968818
model = self.read_model_yaml(modelkey) sources = model['sources'] components = OrderedDict() spec_model_yaml = self._name_factory.fullpath(localpath=model['spectral_models']) self._spec_lib.update(yaml.safe_load(open(spec_model_yaml))) for source, source_info in sources.items(): model_type = source_info.get('model_type', None) par_overrides = source_info.get('par_overides', None) version = source_info['version'] spec_type = source_info['SpectrumType'] edisp_disable = source_info.get('edisp_disable', False) sourcekey = "%s_%s" % (source, version) if model_type == 'galprop_rings': comp_info_dict = self.gmm.diffuse_comp_info_dicts(version) def_spec_type = spec_type['default'] for comp_key, comp_info in comp_info_dict.items(): model_comp = ModelComponent(info=comp_info, spectrum=\ self._spec_lib[spec_type.get(comp_key, def_spec_type)], par_overrides=par_overrides, edisp_disable=edisp_disable) components[comp_key] = model_comp elif model_type == 'Catalog': comp_info_dict = self.csm.split_comp_info_dict(source, version) def_spec_type = spec_type['default'] for comp_key, comp_info in comp_info_dict.items(): model_comp = ModelComponent(info=comp_info, spectrum=\ self._spec_lib[spec_type.get(comp_key, def_spec_type)], par_overrides=par_overrides, edisp_disable=edisp_disable) components[comp_key] = model_comp else: comp_info = self.dmm.diffuse_comp_info(sourcekey) model_comp = ModelComponent(info=comp_info, spectrum=self._spec_lib[spec_type], par_overrides=par_overrides, edisp_disable=edisp_disable) components[sourcekey] = model_comp ret_val = ModelInfo(model_name=modelkey, model_components=components) self._models[modelkey] = ret_val return ret_val
def make_model_info(self, modelkey)
Build a dictionary with the information for a particular model. Parameters ---------- modelkey : str Key used to identify this particular model Return `ModelInfo`
2.680213
2.728831
0.982184
try: model_info = self._models[modelkey] except KeyError: model_info = self.make_model_info(modelkey) self._name_factory.update_base_dict(data) outfile = os.path.join('analysis', 'model_%s' % modelkey, 'srcmap_manifest_%s.yaml' % modelkey) manifest = model_info.make_srcmap_manifest( components, self._name_factory) outdir = os.path.dirname(outfile) try: os.makedirs(outdir) except OSError: pass utils.write_yaml(manifest, outfile)
def make_srcmap_manifest(self, modelkey, components, data)
Build a yaml file that specfies how to make the srcmap files for a particular model Parameters ---------- modelkey : str Key used to identify this particular model components : list The binning components used in this analysis data : str Path to file containing dataset definition
3.522955
3.447175
1.021983
sub_comps = source_info.get('components', None) if sub_comps is None: return source_info.copy() moving = source_info.get('moving', False) selection_dependent = source_info.get('selection_dependent', False) if selection_dependent: key = comp.make_key('{ebin_name}_{evtype_name}') elif moving: key = "zmax%i" % comp.zmax ret_dict = source_info.copy() ret_dict.update(sub_comps[key]) return ret_dict
def get_sub_comp_info(source_info, comp)
Build and return information about a sub-component for a particular selection
4.623013
4.511195
1.024787
for k, v in cut_dict.items(): for k0, v0 in aliases.items(): cut_dict[k] = cut_dict[k].replace(k0, '(%s)' % v0)
def replace_aliases(cut_dict, aliases)
Substitute aliases in a cut dictionary.
2.595843
2.496626
1.03974
files_out = [] for f in files: mime = mimetypes.guess_type(f) if os.path.splitext(f)[1] in extnames: files_out += [f] elif mime[0] == 'text/plain': files_out += list(np.loadtxt(f, unpack=True, dtype='str')) else: raise Exception('Unrecognized input type.') return files_out
def get_files(files, extnames=['.root'])
Extract a list of file paths from a list containing both paths and file lists with one path per line.
2.836427
3.070277
0.923834
root = ElementTree.ElementTree(file=xmlfile).getroot() event_maps = root.findall('EventMap') alias_maps = root.findall('AliasDict')[0] event_classes = {} event_types = {} event_aliases = {} for m in event_maps: if m.attrib['altName'] == 'EVENT_CLASS': for c in m.findall('EventCategory'): event_classes[c.attrib['name']] = strip( c.find('ShortCut').text) elif m.attrib['altName'] == 'EVENT_TYPE': for c in m.findall('EventCategory'): event_types[c.attrib['name']] = strip(c.find('ShortCut').text) for m in alias_maps.findall('Alias'): event_aliases[m.attrib['name']] = strip(m.text) replace_aliases(event_aliases, event_aliases.copy()) replace_aliases(event_aliases, event_aliases.copy()) replace_aliases(event_classes, event_aliases) replace_aliases(event_types, event_aliases) event_selections = {} event_selections.update(event_classes) event_selections.update(event_types) event_selections.update(event_aliases) return event_selections
def get_cuts_from_xml(xmlfile)
Extract event selection strings from the XML file.
2.337386
2.243411
1.041889
import ROOT elist = rand_str() if selection is None: cuts = '' else: cuts = selection if fraction is None or fraction >= 1.0: n = tree.Draw(">>%s" % elist, cuts, "goff") tree.SetEventList(ROOT.gDirectory.Get(elist)) elif start_fraction is None: nentries = int(tree.GetEntries()) first_entry = min(int((1.0 - fraction) * nentries), nentries) n = tree.Draw(">>%s" % elist, cuts, "goff", nentries, first_entry) tree.SetEventList(ROOT.gDirectory.Get(elist)) else: nentries = int(tree.GetEntries()) first_entry = min(int(start_fraction * nentries), nentries) n = first_entry + int(nentries * fraction) n = tree.Draw(">>%s" % elist, cuts, "goff", n - first_entry, first_entry) tree.SetEventList(ROOT.gDirectory.Get(elist)) return n
def set_event_list(tree, selection=None, fraction=None, start_fraction=None)
Set the event list for a tree or chain. Parameters ---------- tree : `ROOT.TTree` Input tree/chain. selection : str Cut string defining the event list. fraction : float Fraction of the total file to include in the event list starting from the *end* of the file.
2.639175
2.646841
0.997104
timer = Timer.create(start=True) self.logger.info('Starting.') schema = ConfigSchema(self.defaults['sourcefind'], tsmap=self.defaults['tsmap'], tscube=self.defaults['tscube']) schema.add_option('search_skydir', None, '', SkyCoord) schema.add_option('search_minmax_radius', [None, 1.0], '', list) config = utils.create_dict(self.config['sourcefind'], tsmap=self.config['tsmap'], tscube=self.config['tscube']) config = schema.create_config(config, **kwargs) # Defining default properties of test source model config['model'].setdefault('Index', 2.0) config['model'].setdefault('SpectrumType', 'PowerLaw') config['model'].setdefault('SpatialModel', 'PointSource') config['model'].setdefault('Prefactor', 1E-13) o = {'sources': [], 'peaks': []} for i in range(config['max_iter']): srcs, peaks = self._find_sources_iterate(prefix, i, **config) self.logger.info('Found %i sources in iteration %i.' % (len(srcs), i)) o['sources'] += srcs o['peaks'] += peaks if len(srcs) == 0: break self.logger.info('Done.') self.logger.info('Execution time: %.2f s', timer.elapsed_time) return o
def find_sources(self, prefix='', **kwargs)
An iterative source-finding algorithm that uses likelihood ratio (TS) maps of the region of interest to find new sources. After each iteration a new TS map is generated incorporating sources found in the previous iteration. The method stops when the number of iterations exceeds ``max_iter`` or no sources exceeding ``sqrt_ts_threshold`` are found. Parameters ---------- {options} tsmap : dict Keyword arguments dictionary for tsmap method. tscube : dict Keyword arguments dictionary for tscube method. Returns ------- peaks : list List of peak objects. sources : list List of source objects.
4.431355
3.850648
1.150808
timer = Timer.create(start=True) name = self.roi.get_source_by_name(name).name schema = ConfigSchema(self.defaults['localize'], optimizer=self.defaults['optimizer']) schema.add_option('use_cache', True) schema.add_option('prefix', '') config = utils.create_dict(self.config['localize'], optimizer=self.config['optimizer']) config = schema.create_config(config, **kwargs) self.logger.info('Running localization for %s' % name) free_state = FreeParameterState(self) loc = self._localize(name, **config) free_state.restore() self.logger.info('Finished localization.') if config['make_plots']: self._plotter.make_localization_plots(loc, self.roi, prefix=config['prefix']) outfile = \ utils.format_filename(self.workdir, 'loc', prefix=[config['prefix'], name.lower().replace(' ', '_')]) if config['write_fits']: loc['file'] = os.path.basename(outfile) + '.fits' self._make_localize_fits(loc, outfile + '.fits', **config) if config['write_npy']: np.save(outfile + '.npy', dict(loc)) self.logger.info('Execution time: %.2f s', timer.elapsed_time) return loc
def localize(self, name, **kwargs)
Find the best-fit position of a source. Localization is performed in two steps. First a TS map is computed centered on the source with half-width set by ``dtheta_max``. A fit is then performed to the maximum TS peak in this map. The source position is then further refined by scanning the likelihood in the vicinity of the peak found in the first step. The size of the scan region is set to encompass the 99% positional uncertainty contour as determined from the peak fit. Parameters ---------- name : str Source name. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- localize : dict Dictionary containing results of the localization analysis.
4.890171
4.746072
1.030362
prefix = kwargs.get('prefix', '') dtheta_max = kwargs.get('dtheta_max', 0.5) zmin = kwargs.get('zmin', -3.0) kw = { 'map_size': 2.0 * dtheta_max, 'write_fits': kwargs.get('write_fits', False), 'write_npy': kwargs.get('write_npy', False), 'use_pylike': kwargs.get('use_pylike', True), 'max_kernel_radius': self.config['tsmap']['max_kernel_radius'], 'loglevel': logging.DEBUG } src = self.roi.copy_source(name) if src['SpatialModel'] in ['RadialDisk', 'RadialGaussian']: kw['max_kernel_radius'] = max(kw['max_kernel_radius'], 2.0 * src['SpatialWidth']) skydir = kwargs.get('skydir', src.skydir) tsmap = self.tsmap(utils.join_strings([prefix, name.lower(). replace(' ', '_')]), model=src.data, map_skydir=skydir, exclude=[name], make_plots=False, **kw) # Find peaks with TS > 4 peaks = find_peaks(tsmap['ts'], 4.0, 0.2) peak_best = None o = {} for p in sorted(peaks, key=lambda t: t['amp'], reverse=True): xy = p['ix'], p['iy'] ts_value = tsmap['ts'].data[xy[1], xy[0]] posfit = fit_error_ellipse(tsmap['ts'], xy=xy, dpix=2, zmin=max(zmin, -ts_value * 0.5)) offset = posfit['skydir'].separation(self.roi[name].skydir).deg if posfit['fit_success'] and posfit['fit_inbounds']: peak_best = p break if peak_best is None: ts_value = np.max(tsmap['ts'].data) posfit = fit_error_ellipse(tsmap['ts'], dpix=2, zmin=max(zmin, -ts_value * 0.5)) o.update(posfit) pix = posfit['skydir'].to_pixel(self.geom.wcs) o['xpix'] = float(pix[0]) o['ypix'] = float(pix[1]) o['skydir'] = posfit['skydir'].transform_to('icrs') o['pos_offset'] = posfit['skydir'].separation( self.roi[name].skydir).deg o['loglike'] = 0.5 * posfit['zoffset'] o['tsmap'] = tsmap['ts'] return o
def _fit_position_tsmap(self, name, **kwargs)
Localize a source from its TS map.
3.81603
3.756214
1.015924
if os.path.isabs(path): fullpath = path else: fullpath = os.path.abspath(path) if len(fullpath) < 6: return fullpath if fullpath[0:6] == '/gpfs/': fullpath = fullpath.replace('/gpfs/', '/nfs/') return fullpath
def make_nfs_path(path)
Make a nfs version of a file path. This just puts /nfs at the beginning instead of /gpfs
2.716217
2.310839
1.175425
if os.path.isabs(path): fullpath = os.path.abspath(path) else: fullpath = os.path.abspath(path) if len(fullpath) < 5: return fullpath if fullpath[0:5] == '/nfs/': fullpath = fullpath.replace('/nfs/', '/gpfs/') return fullpath
def make_gpfs_path(path)
Make a gpfs version of a file path. This just puts /gpfs at the beginning instead of /nfs
2.430146
2.356755
1.031141
status_count = {'RUN': 0, 'PEND': 0, 'SUSP': 0, 'USUSP': 0, 'NJOB': 0, 'UNKNWN': 0} try: subproc = subprocess.Popen(['bjobs'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) subproc.stderr.close() output = subproc.stdout.readlines() except OSError: return status_count for line in output[1:]: line = line.strip().split() # Protect against format of multiproc jobs if len(line) < 5: continue status_count['NJOB'] += 1 for k in status_count: if line[2] == k: status_count[k] += 1 return status_count
def get_lsf_status()
Count and print the number of jobs in various LSF states
3.075469
2.936309
1.047393
if command_template is None: return "" full_command = 'bsub -o {logfile}' for key, value in lsf_args.items(): full_command += ' -%s' % key if value is not None: full_command += ' %s' % value full_command += ' %s' % command_template return full_command
def build_bsub_command(command_template, lsf_args)
Build and return a lsf batch command template The structure will be 'bsub -s <key> <value> <command_template>' where <key> and <value> refer to items in lsf_args
2.413138
2.555508
0.944289
slac_default_args = dict(lsf_args={'W': job_time, 'R': '\"select[rhel60&&!fell]\"'}, max_jobs=500, time_per_cycle=15, jobs_per_cycle=20, max_job_age=90, no_batch=False) return slac_default_args.copy()
def get_slac_default_args(job_time=1500)
Create a batch job interface object. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion.
7.818889
9.023581
0.866495
full_sub_dict = job_config.copy() if self._no_batch: full_command = "%s >& %s" % ( link.command_template().format(**full_sub_dict), logfile) else: full_sub_dict['logfile'] = logfile full_command_template = build_bsub_command( link.command_template(), self._lsf_args) full_command = full_command_template.format(**full_sub_dict) logdir = os.path.dirname(logfile) print_bsub = True if self._dry_run: if print_bsub: stream.write("%s\n" % full_command) return 0 try: os.makedirs(logdir) except OSError: pass proc = subprocess.Popen(full_command.split(), stderr=stream, stdout=stream) proc.communicate() return proc.returncode
def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout)
Send a single job to the LSF batch Parameters ---------- link : `fermipy.jobs.chain.Link` The link used to invoke the command we are running key : str A string that identifies this particular instance of the job job_config : dict A dictionrary with the arguments for the job. Used with the self._command_template job template logfile : str The logfile for this job, may be used to check for success/ failure
3.232384
3.403957
0.949596
if link is None: return JobStatus.no_job if job_dict is None: job_keys = link.jobs.keys() else: job_keys = sorted(job_dict.keys()) # copy & reverse the keys b/c we will be popping item off the back of # the list unsubmitted_jobs = job_keys unsubmitted_jobs.reverse() failed = False if unsubmitted_jobs: if stream != sys.stdout: sys.stdout.write('Submitting jobs (%i): ' % len(unsubmitted_jobs)) sys.stdout.flush() while unsubmitted_jobs: status = get_lsf_status() njob_to_submit = min(self._max_jobs - status['NJOB'], self._jobs_per_cycle, len(unsubmitted_jobs)) if self._dry_run: njob_to_submit = len(unsubmitted_jobs) for i in range(njob_to_submit): job_key = unsubmitted_jobs.pop() # job_details = job_dict[job_key] job_details = link.jobs[job_key] job_config = job_details.job_config if job_details.status == JobStatus.failed: clean_job(job_details.logfile, {}, self._dry_run) # clean_job(job_details.logfile, # job_details.outfiles, self.args['dry_run']) job_config['logfile'] = job_details.logfile new_job_details = self.dispatch_job( link, job_key, job_archive, stream) if new_job_details.status == JobStatus.failed: failed = True clean_job(new_job_details.logfile, new_job_details.outfiles, self._dry_run) link.jobs[job_key] = new_job_details if unsubmitted_jobs: if stream != sys.stdout: sys.stdout.write('.') sys.stdout.flush() stream.write('Sleeping %.0f seconds between submission cycles\n' % self._time_per_cycle) time.sleep(self._time_per_cycle) if failed: return JobStatus.failed if stream != sys.stdout: sys.stdout.write('!\n') return JobStatus.done
def submit_jobs(self, link, job_dict=None, job_archive=None, stream=sys.stdout)
Submit all the jobs in job_dict
2.985765
2.958289
1.009288
if utils.is_fits_file(scfile) and colnames is None: return create_table_from_fits(scfile, 'SC_DATA') if utils.is_fits_file(scfile): files = [scfile] else: files = [line.strip() for line in open(scfile, 'r')] tables = [create_table_from_fits(f, 'SC_DATA', colnames) for f in files] return vstack(tables)
def create_sc_table(scfile, colnames=None)
Load an FT2 file from a file or list of files.
2.643172
2.590257
1.020429
if colnames is None: return Table.read(fitsfile, hduname) cols = [] with fits.open(fitsfile, memmap=True) as h: for k in colnames: data = h[hduname].data.field(k) cols += [Column(name=k, data=data)] return Table(cols)
def create_table_from_fits(fitsfile, hduname, colnames=None)
Memory efficient function for loading a table from a FITS file.
2.309031
2.347261
0.983713
delta = 1E-5 f0 = src.spectrum()(pyLike.dArg(egy * (1 - delta))) f1 = src.spectrum()(pyLike.dArg(egy * (1 + delta))) if f0 > 0 and f1 > 0: gamma = np.log10(f0 / f1) / np.log10((1 - delta) / (1 + delta)) else: gamma = np.nan return gamma
def get_spectral_index(src, egy)
Compute the local spectral index of a source.
4.014561
3.901788
1.028903
infile = os.path.abspath(infile) roi_file, roi_data = utils.load_data(infile) if config is None: config = roi_data['config'] validate = False else: validate = True gta = cls(config, validate=validate) gta.setup(init_sources=False) gta.load_roi(infile, params=params, mask=mask) return gta
def create(cls, infile, config=None, params=None, mask=None)
Create a new instance of GTAnalysis from an analysis output file generated with `~fermipy.GTAnalysis.write_roi`. By default the new instance will inherit the configuration of the saved analysis instance. The configuration may be overriden by passing a configuration file path with the ``config`` argument. Parameters ---------- infile : str Path to the ROI results file. config : str Path to a configuration file. This will override the configuration in the ROI results file. params : str Path to a yaml file with updated parameter values mask : str Path to a fits file with an updated mask
3.905287
3.87193
1.008615
gta = GTAnalysis(config, **kwargs) gta._roi = copy.deepcopy(self.roi) return gta
def clone(self, config, **kwargs)
Make a clone of this analysis instance.
9.473588
7.238648
1.308751
self.config['mc']['seed'] = seed np.random.seed(seed)
def set_random_seed(self, seed)
Set the seed for the random number generator
6.79608
6.383778
1.064586
for c in self.components: c.reload_source(name) if init_source: self._init_source(name) self.like.model = self.like.components[0].model
def reload_source(self, name, init_source=True)
Delete and reload a source in the model. This will update the spatial model of this source to the one defined in the XML model.
4.538498
4.54478
0.998618
name = self.roi.get_source_by_name(name).name src = self.roi[name] spatial_model = kwargs.get('spatial_model', src['SpatialModel']) spatial_pars = kwargs.get('spatial_pars', {}) use_pylike = kwargs.get('use_pylike', True) psf_scale_fn = kwargs.get('psf_scale_fn', None) update_source = kwargs.get('update_source', False) if hasattr(pyLike.BinnedLikelihood, 'setSourceMapImage') and not use_pylike: src.set_spatial_model(spatial_model, spatial_pars) self._update_srcmap(src.name, src, psf_scale_fn=psf_scale_fn) else: src = self.delete_source(name, loglevel=logging.DEBUG, save_template=False) src.set_spatial_model(spatial_model, spatial_pars) self.add_source(src.name, src, init_source=False, use_pylike=use_pylike, loglevel=logging.DEBUG) if update_source: self.update_source(name)
def set_source_morphology(self, name, **kwargs)
Set the spatial model of a source. Parameters ---------- name : str Source name. spatial_model : str Spatial model name (PointSource, RadialGaussian, etc.). spatial_pars : dict Dictionary of spatial parameters (optional). use_cache : bool Generate the spatial model by interpolating the cached source map. use_pylike : bool
3.589439
3.104352
1.15626
name = self.roi.get_source_by_name(name).name src = self.roi[name] spectrum_pars = {} if spectrum_pars is None else spectrum_pars if (self.roi[name]['SpectrumType'] == 'PowerLaw' and spectrum_type == 'LogParabola'): spectrum_pars.setdefault('beta', {'value': 0.0, 'scale': 1.0, 'min': 0.0, 'max': 1.0}) spectrum_pars.setdefault('Eb', src.spectral_pars['Scale']) spectrum_pars.setdefault('norm', src.spectral_pars['Prefactor']) if 'alpha' not in spectrum_pars: spectrum_pars['alpha'] = src.spectral_pars['Index'] spectrum_pars['alpha']['value'] *= -1.0 if spectrum_pars['alpha']['scale'] == -1.0: spectrum_pars['alpha']['value'] *= -1.0 spectrum_pars['alpha']['scale'] *= -1.0 if spectrum_type == 'FileFunction': self._create_filefunction(name, spectrum_pars) else: fn = gtutils.create_spectrum_from_dict(spectrum_type, spectrum_pars) self.like.setSpectrum(str(name), fn) # Get parameters src = self.components[0].like.logLike.getSource(str(name)) pars_dict = gtutils.get_function_pars_dict(src.spectrum()) self.roi[name]['SpectrumType'] = spectrum_type self.roi[name].set_spectral_pars(pars_dict) for c in self.components: c.roi[name]['SpectrumType'] = spectrum_type c.roi[name].set_spectral_pars(pars_dict) if update_source: self.update_source(name)
def set_source_spectrum(self, name, spectrum_type='PowerLaw', spectrum_pars=None, update_source=True)
Set the spectral model of a source. This function can be used to change the spectral type of a source or modify its spectral parameters. If called with spectrum_type='FileFunction' and spectrum_pars=None, the source spectrum will be replaced with a FileFunction with the same differential flux distribution as the original spectrum. Parameters ---------- name : str Source name. spectrum_type : str Spectrum type (PowerLaw, etc.). spectrum_pars : dict Dictionary of spectral parameters (optional). update_source : bool Recompute all source characteristics (flux, TS, NPred) using the new spectral model of the source.
2.964775
2.915719
1.016825
name = self.roi.get_source_by_name(name).name if self.roi[name]['SpectrumType'] != 'FileFunction': msg = 'Wrong spectral type: %s' % self.roi[name]['SpectrumType'] self.logger.error(msg) raise Exception(msg) xy = self.get_source_dnde(name) if len(dnde) != len(xy[0]): msg = 'Wrong length for dnde array: %i' % len(dnde) self.logger.error(msg) raise Exception(msg) for c in self.components: src = c.like.logLike.getSource(str(name)) spectrum = src.spectrum() file_function = pyLike.FileFunction_cast(spectrum) file_function.setSpectrum(10**xy[0], dnde) if update_source: self.update_source(name)
def set_source_dnde(self, name, dnde, update_source=True)
Set the differential flux distribution of a source with the FileFunction spectral type. Parameters ---------- name : str Source name. dnde : `~numpy.ndarray` Array of differential flux values (cm^{-2} s^{-1} MeV^{-1}).
4.26827
4.055813
1.052383
name = self.roi.get_source_by_name(name).name if self.roi[name]['SpectrumType'] != 'FileFunction': src = self.components[0].like.logLike.getSource(str(name)) spectrum = src.spectrum() file_function = pyLike.FileFunction_cast(spectrum) loge = file_function.log_energy() logdnde = file_function.log_dnde() loge = np.log10(np.exp(loge)) dnde = np.exp(logdnde) return loge, dnde else: ebinsz = (self.log_energies[-1] - self.log_energies[0]) / self.enumbins loge = utils.extend_array(self.log_energies, ebinsz, 0.5, 6.5) dnde = np.array([self.like[name].spectrum()(pyLike.dArg(10 ** egy)) for egy in loge]) return loge, dnde
def get_source_dnde(self, name)
Return differential flux distribution of a source. For sources with FileFunction spectral type this returns the internal differential flux array. Returns ------- loge : `~numpy.ndarray` Array of energies at which the differential flux is evaluated (log10(E/MeV)). dnde : `~numpy.ndarray` Array of differential flux values (cm^{-2} s^{-1} MeV^{-1}) evaluated at energies in ``loge``.
5.839879
5.382655
1.084944
spectrum_pars = {} if spectrum_pars is None else spectrum_pars if 'loge' in spectrum_pars: loge = spectrum_pars.get('loge') else: ebinsz = (self.log_energies[-1] - self.log_energies[0]) / self.enumbins loge = utils.extend_array(self.log_energies, ebinsz, 0.5, 6.5) # Get the values dnde = np.zeros(len(loge)) if 'dnde' in spectrum_pars: dnde = spectrum_pars.get('dnde') else: dnde = np.array([self.like[name].spectrum()(pyLike.dArg(10 ** egy)) for egy in loge]) filename = \ os.path.join(self.workdir, '%s_filespectrum.txt' % (name.lower().replace(' ', '_'))) # Create file spectrum txt file np.savetxt(filename, np.vstack((10**loge, dnde)).T) self.like.setSpectrum(name, str('FileFunction')) self.roi[name]['Spectrum_Filename'] = filename # Update for c in self.components: src = c.like.logLike.getSource(str(name)) spectrum = src.spectrum() spectrum.getParam(str('Normalization')).setBounds(1E-3, 1E3) file_function = pyLike.FileFunction_cast(spectrum) file_function.readFunction(str(filename)) c.roi[name]['Spectrum_Filename'] = filename
def _create_filefunction(self, name, spectrum_pars)
Replace the spectrum of an existing source with a FileFunction.
5.432248
5.228904
1.038889
if self.workdir == self.outdir: return elif not os.path.isdir(self.workdir): self.logger.error('Working directory does not exist.') return regex = self.config['fileio']['outdir_regex'] savefits = self.config['fileio']['savefits'] files = os.listdir(self.workdir) self.logger.info('Staging files to %s', self.outdir) fitsfiles = [] for c in self.components: for f in c.files.values(): if f is None: continue fitsfiles += [os.path.basename(f)] for f in files: wpath = os.path.join(self.workdir, f) opath = os.path.join(self.outdir, f) if not utils.match_regex_list(regex, os.path.basename(f)): continue if os.path.isfile(opath) and filecmp.cmp(wpath, opath, False): continue if not savefits and f in fitsfiles: continue self.logger.debug('Copying ' + f) self.logger.info('Copying ' + f) shutil.copy(wpath, self.outdir) self.logger.info('Finished.')
def stage_output(self)
Copy data products to final output directory.
3.189904
3.111845
1.025085
if self.workdir == self.outdir: return elif not os.path.isdir(self.workdir): self.logger.error('Working directory does not exist.') return self.logger.info('Staging files to %s', self.workdir) files = [os.path.join(self.outdir, f) for f in os.listdir(self.outdir)] regex = copy.deepcopy(self.config['fileio']['workdir_regex']) for f in files: if not os.path.isfile(f): continue if not utils.match_regex_list(regex, os.path.basename(f)): continue self.logger.debug('Copying ' + os.path.basename(f)) shutil.copy(f, self.workdir) for c in self.components: for f in c.files.values(): if f is None: continue wpath = os.path.join(self.workdir, os.path.basename(f)) opath = os.path.join(self.outdir, os.path.basename(f)) if os.path.isfile(wpath): continue elif os.path.isfile(opath): self.logger.debug('Copying ' + os.path.basename(f)) shutil.copy(opath, self.workdir) self.logger.info('Finished.')
def stage_input(self)
Copy input files to working directory.
2.480138
2.388581
1.038331
loglevel = kwargs.get('loglevel', self.loglevel) self.logger.log(loglevel, 'Running setup.') # Make spatial maps for extended sources for s in self.roi.sources: if s.diffuse: continue if not s.extended: continue self.make_template(s) # Run setup for each component for i, c in enumerate(self.components): c.setup(overwrite=overwrite) # Create likelihood self._create_likelihood() # Determine tmin, tmax for i, c in enumerate(self._components): self._tmin = (c.tmin if self._tmin is None else min(self._tmin, c.tmin)) self._tmax = (c.tmax if self._tmax is None else min(self._tmax, c.tmax)) if init_sources: self.logger.log(loglevel, 'Initializing source properties') for name in self.like.sourceNames(): self.logger.debug('Initializing source %s', name) self._init_source(name) self._update_roi() self.logger.log(loglevel, 'Finished setup.')
def setup(self, init_sources=True, overwrite=False, **kwargs)
Run pre-processing for each analysis component and construct a joint likelihood object. This function performs the following tasks: data selection (gtselect, gtmktime), data binning (gtbin), and model generation (gtexpcube2,gtsrcmaps). Parameters ---------- init_sources : bool Choose whether to compute properties (flux, TS, etc.) for individual sources. overwrite : bool Run all pre-processing steps even if the output file of that step is present in the working directory. By default this function will skip any steps for which the output file already exists.
3.314774
3.258032
1.017416
self._like = SummedLikelihood() for c in self.components: c._create_binned_analysis(srcmdl) self._like.addComponent(c.like) self.like.model = self.like.components[0].model self._fitcache = None self._init_roi_model()
def _create_likelihood(self, srcmdl=None)
Instantiate the likelihood object for each component and create a SummedLikelihood.
7.570062
5.697943
1.328561
for i, c in enumerate(self._components): c.generate_model(model_name=model_name)
def generate_model(self, model_name=None)
Generate model maps for all components. model_name should be a unique identifier for the model. If model_name is None then the model maps will be generated using the current parameters of the ROI.
4.533455
3.484682
1.300967
if logemin is None: logemin = self.log_energies[0] else: imin = int(utils.val_to_edge(self.log_energies, logemin)[0]) logemin = self.log_energies[imin] if logemax is None: logemax = self.log_energies[-1] else: imax = int(utils.val_to_edge(self.log_energies, logemax)[0]) logemax = self.log_energies[imax] self._loge_bounds = np.array([logemin, logemax]) self._roi_data['loge_bounds'] = np.copy(self.loge_bounds) for c in self.components: c.set_energy_range(logemin, logemax) return self._loge_bounds
def set_energy_range(self, logemin, logemax)
Set the energy bounds of the analysis. This restricts the evaluation of the likelihood to the data that falls in this range. Input values will be rounded to the closest bin edge value. If either argument is None then the lower or upper bound of the analysis instance will be used. Parameters ---------- logemin : float Lower energy bound in log10(E/MeV). logemax : float Upper energy bound in log10(E/MeV). Returns ------- eminmax : array Minimum and maximum energy in log10(E/MeV).
2.198544
2.173612
1.01147
maps = [c.model_counts_map(name, exclude, use_mask=use_mask) for c in self.components] return skymap.coadd_maps(self.geom, maps)
def model_counts_map(self, name=None, exclude=None, use_mask=False)
Return the model counts map for a single source, a list of sources, or for the sum of all sources in the ROI. The exclude parameter can be used to exclude one or more components when generating the model map. Parameters ---------- name : str or list of str Parameter controlling the set of sources for which the model counts map will be calculated. If name=None the model map will be generated for all sources in the ROI. exclude : str or list of str List of sources that will be excluded when calculating the model map. use_mask : bool Parameter that specifies in the model counts map should include mask pixels (i.e., ones whose weights are <= 0) Returns ------- map : `~gammapy.maps.Map`
5.258041
7.918869
0.663989
if logemin is None: logemin = self.log_energies[0] if logemax is None: logemax = self.log_energies[-1] if summed: cs = np.zeros(self.enumbins) imin = utils.val_to_bin_bounded(self.log_energies, logemin + 1E-7)[0] imax = utils.val_to_bin_bounded(self.log_energies, logemax - 1E-7)[0] + 1 for c in self.components: ecenter = 0.5 * (c.log_energies[:-1] + c.log_energies[1:]) counts = c.model_counts_spectrum(name, self.log_energies[0], self.log_energies[-1], weighted) cs += np.histogram(ecenter, weights=counts, bins=self.log_energies)[0] return cs[imin:imax] else: cs = [] for c in self.components: cs += [c.model_counts_spectrum(name, logemin, logemax, weighted=weighted)] return cs
def model_counts_spectrum(self, name, logemin=None, logemax=None, summed=False, weighted=False)
Return the predicted number of model counts versus energy for a given source and energy range. If summed=True return the counts spectrum summed over all components otherwise return a list of model spectra. If weighted=True return the weighted version of the counts spectrum
2.331965
2.287924
1.01925
coordsys = self.config['binning']['coordsys'] return self.roi.get_sources(skydir, distance, cuts, minmax_ts, minmax_npred, exclude, square, coordsys=coordsys)
def get_sources(self, cuts=None, distance=None, skydir=None, minmax_ts=None, minmax_npred=None, exclude=None, square=False)
Retrieve list of sources in the ROI satisfying the given selections. Returns ------- srcs : list A list of `~fermipy.roi_model.Model` objects.
4.000685
5.04704
0.792679
if self.roi.has_source(name): msg = 'Source %s already exists.' % name self.logger.error(msg) raise Exception(msg) loglevel = kwargs.pop('loglevel', self.loglevel) self.logger.log(loglevel, 'Adding source ' + name) src = self.roi.create_source(name, src_dict, rescale=True) self.make_template(src) for c in self.components: c.add_source(name, src_dict, free=free, save_source_maps=save_source_maps, use_pylike=use_pylike, use_single_psf=use_single_psf) if self._like is None: return if self.config['gtlike']['edisp'] and src.name not in \ self.config['gtlike']['edisp_disable']: self.set_edisp_flag(src.name, True) self.like.syncSrcParams(str(name)) self.like.model = self.like.components[0].model # if free is not None: # self.free_norm(name, free, loglevel=logging.DEBUG) if init_source: self._init_source(name) self._update_roi() if self._fitcache is not None: self._fitcache.update_source(name)
def add_source(self, name, src_dict, free=None, init_source=True, save_source_maps=True, use_pylike=True, use_single_psf=False, **kwargs)
Add a source to the ROI model. This function may be called either before or after `~fermipy.gtanalysis.GTAnalysis.setup`. Parameters ---------- name : str Source name. src_dict : dict or `~fermipy.roi_model.Source` object Dictionary or source object defining the source properties (coordinates, spectral parameters, etc.). free : bool Initialize the source with a free normalization parameter. use_pylike : bool Create source maps with pyLikelihood. use_single_psf : bool Use the PSF model calculated for the ROI center. If false then a new model will be generated using the position of the source.
3.559771
3.381714
1.052653
for name in names: self.add_source(name, roi[name].data, free=free, **kwargs)
def add_sources_from_roi(self, names, roi, free=False, **kwargs)
Add multiple sources to the current ROI model copied from another ROI model. Parameters ---------- names : list List of str source names to add. roi : `~fermipy.roi_model.ROIModel` object The roi model from which to add sources. free : bool Initialize the source with a free normalization paramter.
3.460089
4.254112
0.813352
if not self.roi.has_source(name): self.logger.error('No source with name: %s', name) return loglevel = kwargs.pop('loglevel', self.loglevel) self.logger.log(loglevel, 'Deleting source %s', name) # STs require a source to be freed before deletion if self.like is not None: self.free_norm(name, loglevel=logging.DEBUG) for c in self.components: c.delete_source(name, save_template=save_template, delete_source_map=delete_source_map, build_fixed_wts=build_fixed_wts) src = self.roi.get_source_by_name(name) self.roi.delete_sources([src]) if self.like is not None: self.like.model = self.like.components[0].model self._update_roi() return src
def delete_source(self, name, save_template=True, delete_source_map=False, build_fixed_wts=True, **kwargs)
Delete a source from the ROI model. Parameters ---------- name : str Source name. save_template : bool Keep the SpatialMap FITS template associated with this source. delete_source_map : bool Delete the source map associated with this source from the source maps file. Returns ------- src : `~fermipy.roi_model.Model` The deleted source object.
3.391676
3.23907
1.047114
srcs = self.roi.get_sources(skydir=skydir, distance=distance, cuts=cuts, minmax_ts=minmax_ts, minmax_npred=minmax_npred, exclude=exclude, square=square, coordsys=self.config[ 'binning']['coordsys'], names=names) for s in srcs: self.delete_source(s.name, build_fixed_wts=False) if self.like is not None: # Build fixed model weights in one pass for c in self.components: c.like.logLike.buildFixedModelWts() self._update_roi() return srcs
def delete_sources(self, cuts=None, distance=None, skydir=None, minmax_ts=None, minmax_npred=None, exclude=None, square=False, names=None)
Delete sources in the ROI model satisfying the given selection criteria. Parameters ---------- cuts : dict Dictionary of [min,max] selections on source properties. distance : float Cut on angular distance from ``skydir``. If None then no selection will be applied. skydir : `~astropy.coordinates.SkyCoord` Reference sky coordinate for ``distance`` selection. If None then the distance selection will be applied with respect to the ROI center. minmax_ts : list Select sources that have TS in the range [min,max]. If either min or max are None then only a lower (upper) bound will be applied. If this parameter is none no selection will be applied. minmax_npred : list Select sources that have npred in the range [min,max]. If either min or max are None then only a lower (upper) bound will be applied. If this parameter is none no selection will be applied. square : bool Switch between applying a circular or square (ROI-like) selection on the maximum projected distance from the ROI center. names : list Select sources matching a name in this list. Returns ------- srcs : list A list of `~fermipy.roi_model.Model` objects.
4.550054
4.959594
0.917425
if names is None: return names = [names] if not isinstance(names, list) else names names = [self.roi.get_source_by_name(t).name for t in names] srcs = [s for s in self.roi.sources if s.name in names] for s in srcs: self.free_source(s.name, free=free, pars=pars, **kwargs) return srcs
def free_sources_by_name(self, names, free=True, pars=None, **kwargs)
Free all sources with names matching ``names``. Parameters ---------- names : list List of source names. free : bool Choose whether to free (free=True) or fix (free=False) source parameters. pars : list Set a list of parameters to be freed/fixed for each source. If none then all source parameters will be freed/fixed. If pars='norm' then only normalization parameters will be freed. Returns ------- srcs : list A list of `~fermipy.roi_model.Model` objects.
2.616518
2.794681
0.936249
srcs = self.roi.get_sources(skydir=skydir, distance=distance, cuts=cuts, minmax_ts=minmax_ts, minmax_npred=minmax_npred, exclude=exclude, square=square, coordsys=self.config['binning']['coordsys']) for s in srcs: self.free_source(s.name, free=free, pars=pars, **kwargs) return srcs
def free_sources(self, free=True, pars=None, cuts=None, distance=None, skydir=None, minmax_ts=None, minmax_npred=None, exclude=None, square=False, **kwargs)
Free or fix sources in the ROI model satisfying the given selection. When multiple selections are defined, the selected sources will be those satisfying the logical AND of all selections (e.g. distance < X && minmax_ts[0] < ts < minmax_ts[1] && ...). Parameters ---------- free : bool Choose whether to free (free=True) or fix (free=False) source parameters. pars : list Set a list of parameters to be freed/fixed for each source. If none then all source parameters will be freed/fixed. If pars='norm' then only normalization parameters will be freed. cuts : dict Dictionary of [min,max] selections on source properties. distance : float Cut on angular distance from ``skydir``. If None then no selection will be applied. skydir : `~astropy.coordinates.SkyCoord` Reference sky coordinate for ``distance`` selection. If None then the distance selection will be applied with respect to the ROI center. minmax_ts : list Free sources that have TS in the range [min,max]. If either min or max are None then only a lower (upper) bound will be applied. If this parameter is none no selection will be applied. minmax_npred : list Free sources that have npred in the range [min,max]. If either min or max are None then only a lower (upper) bound will be applied. If this parameter is none no selection will be applied. exclude : list Names of sources that will be excluded from the selection. square : bool Switch between applying a circular or square (ROI-like) selection on the maximum projected distance from the ROI center. Returns ------- srcs : list A list of `~fermipy.roi_model.Model` objects.
2.489959
3.125455
0.796671
name = self.roi.get_source_by_name(name).name idx = self.like.par_index(name, par) current_bounds = list(self.like.model[idx].getBounds()) if scale is not None: self.like[idx].setScale(scale) else: scale = self.like.model[idx].getScale() if true_value: current_bounds[0] = min(current_bounds[0], value / scale) current_bounds[1] = max(current_bounds[1], value / scale) if error is not None: error = error / scale else: current_bounds[0] = min(current_bounds[0], value) current_bounds[1] = max(current_bounds[1], value) # update current bounds to encompass new value self.like[idx].setBounds(*current_bounds) if true_value: for p in self.like[idx].pars: p.setTrueValue(value) else: self.like[idx].setValue(value) if bounds is not None: if true_value: bounds[0] = min(bounds[0], value / scale) bounds[1] = max(bounds[1], value / scale) else: bounds[0] = min(bounds[0], value) bounds[1] = max(bounds[1], value) # For some reason the numerical accuracy is causing this to throw exceptions. try: if bounds is not None: self.like[idx].setBounds(*bounds) except RuntimeError: self.logger.warning( "Caught failure on setBounds for %s::%s." % (name, par)) pass if error is not None: self.like[idx].setError(error) self._sync_params(name) if update_source: self.update_source(name)
def set_parameter(self, name, par, value, true_value=True, scale=None, bounds=None, error=None, update_source=True)
Update the value of a parameter. Parameter bounds will automatically be adjusted to encompass the new parameter value. Parameters ---------- name : str Source name. par : str Parameter name. value : float Parameter value. By default this argument should be the unscaled (True) parameter value. scale : float Parameter scale (optional). Value argument is interpreted with respect to the scale parameter if it is provided. error : float Parameter error (optional). By default this argument should be the unscaled (True) parameter value. update_source : bool Update the source dictionary for the object.
2.606494
2.608965
0.999053
name = self.roi.get_source_by_name(name).name idx = self.like.par_index(name, par) current_bounds = list(self.like.model[idx].getBounds()) current_scale = self.like.model[idx].getScale() current_value = self.like[idx].getValue() self.like[idx].setScale(scale) self.like[idx].setValue(current_value * current_scale / scale) self.like[idx].setBounds(current_bounds[0] * current_scale / scale, current_bounds[1] * current_scale / scale) self._sync_params(name)
def set_parameter_scale(self, name, par, scale)
Update the scale of a parameter while keeping its value constant.
3.248414
3.163765
1.026756
idx = self.like.par_index(name, par) self.like[idx].setBounds(*bounds) self._sync_params(name)
def set_parameter_bounds(self, name, par, bounds)
Set the bounds on the scaled value of a parameter. Parameters ---------- name : str Source name. par : str Parameter name. bounds : list Upper and lower bound.
9.571791
13.085155
0.7315
idx = self.like.par_index(name, par) self.like[idx].setError(error) self._sync_params(name)
def set_parameter_error(self, name, par, error)
Set the error on the value of a parameter. Parameters ---------- name : str Source name. par : str Parameter name. error : float The value for the parameter error
9.275969
14.994158
0.618639
name = self.roi.get_source_by_name(name).name lck_params = self._lck_params.setdefault(name, []) if lock: self.free_parameter(name, par, False) if not par in lck_params: lck_params += [par] else: if par in lck_params: lck_params.remove(par)
def lock_parameter(self, name, par, lock=True)
Set parameter to locked/unlocked state. A locked parameter will be ignored when running methods that free/fix sources or parameters. Parameters ---------- name : str Source name. par : str Parameter name. lock : bool Set parameter to locked (True) or unlocked (False) state.
3.551946
3.560699
0.997542
name = self.get_source_name(name) if par in self._lck_params.get(name, []): return idx = self.like.par_index(name, par) self.like[idx].setFree(free) self._sync_params(name)
def free_parameter(self, name, par, free=True)
Free/Fix a parameter of a source by name. Parameters ---------- name : str Source name. par : str Parameter name.
6.201579
7.502326
0.826621
name = self.get_source_name(name) if lock: par_names = self.get_source_params(name) self.free_source(name, False, pars=par_names) self._lck_params[name] = par_names else: self._lck_params[name] = []
def lock_source(self, name, lock=True)
Set all parameters of a source to a locked/unlocked state. Locked parameters will be ignored when running methods that free/fix sources or parameters. Parameters ---------- name : str Source name. lock : bool Set source parameters to locked (True) or unlocked (False) state.
3.799324
3.881001
0.978955
free_pars = self.get_free_param_vector() loglevel = kwargs.pop('loglevel', self.loglevel) # Find the source src = self.roi.get_source_by_name(name) name = src.name if pars is None or (isinstance(pars, list) and not pars): pars = [] pars += norm_parameters.get(src['SpectrumType'], []) pars += shape_parameters.get(src['SpectrumType'], []) elif pars == 'norm': pars = [] pars += norm_parameters.get(src['SpectrumType'], []) elif pars == 'shape': pars = [] pars += shape_parameters.get(src['SpectrumType'], []) elif isinstance(pars, list): pass else: raise Exception('Invalid parameter list.') # Remove locked parameters lck_params = self._lck_params.get(name, []) pars = [p for p in pars if p not in lck_params] # Deduce here the names of all parameters from the spectral type src_par_names = pyLike.StringVector() self.like[name].src.spectrum().getParamNames(src_par_names) par_indices = [] par_names = [] for p in src_par_names: if pars is not None and p not in pars: continue idx = self.like.par_index(name, p) if free == free_pars[idx]: continue par_indices.append(idx) par_names.append(p) if len(par_names) == 0: return if free: self.logger.log(loglevel, 'Freeing parameters for %-22s: %s', name, par_names) else: self.logger.log(loglevel, 'Fixing parameters for %-22s: %s', name, par_names) for (idx, par_name) in zip(par_indices, par_names): self.like[idx].setFree(free) self._sync_params_state(name)
def free_source(self, name, free=True, pars=None, **kwargs)
Free/Fix parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False) source parameters. pars : list Set a list of parameters to be freed/fixed for this source. If none then all source parameters will be freed/fixed with the exception of those defined in the skip_pars list.
2.967726
3.028254
0.980012
name = self.get_source_name(name) normPar = self.like.normPar(name).getName() self.free_source(name, pars=[normPar], free=free, **kwargs)
def free_norm(self, name, free=True, **kwargs)
Free/Fix normalization of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False).
7.557634
7.946286
0.95109
src = self.roi.get_source_by_name(name) self.free_source(name, free=free, pars=index_parameters.get(src['SpectrumType'], []), **kwargs)
def free_index(self, name, free=True, **kwargs)
Free/Fix index of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False).
9.6516
10.299415
0.937102
src = self.roi.get_source_by_name(name) self.free_source(name, free=free, pars=shape_parameters[src['SpectrumType']], **kwargs)
def free_shape(self, name, free=True, **kwargs)
Free/Fix shape parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False).
9.75115
10.177446
0.958114
if name not in self.like.sourceNames(): name = self.roi.get_source_by_name(name).name return name
def get_source_name(self, name)
Return the name of a source as it is defined in the pyLikelihood model object.
9.41761
6.919186
1.361087
self.logger.debug('Profiling %s', name) if savestate: saved_state = LikelihoodState(self.like) if fix_shape: self.free_sources(False, pars='shape', loglevel=logging.DEBUG) if npts is None: npts = self.config['gtlike']['llscan_npts'] # Find the source name = self.roi.get_source_by_name(name).name parName = self.like.normPar(name).getName() loge_bounds = self.loge_bounds if logemin is not None or logemax is not None: self.set_energy_range(logemin, logemax) # Find a sequence of values for the normalization scan if xvals is None: if reoptimize: xvals = self._find_scan_pts_reopt(name, npts=npts, **kwargs) else: xvals = self._find_scan_pts(name, npts=9) lnlp = self.profile(name, parName, reoptimize=False, xvals=xvals) lims = utils.get_parameter_limits(lnlp['xvals'], lnlp['dloglike'], cl_limit=0.99) if not np.isfinite(lims['ul']): self.logger.warning('Upper limit not found. ' 'Refitting normalization.') self.like.optimize(0) xvals = self._find_scan_pts(name, npts=npts) lnlp = self.profile(name, parName, reoptimize=False, xvals=xvals) lims = utils.get_parameter_limits(lnlp['xvals'], lnlp['dloglike'], cl_limit=0.99) if np.isfinite(lims['ll']): xhi = np.linspace(lims['x0'], lims['ul'], npts - npts // 2) xlo = np.linspace(lims['ll'], lims['x0'], npts // 2) xvals = np.concatenate((xlo[:-1], xhi)) xvals = np.insert(xvals, 0, 0.0) elif np.abs(lnlp['dloglike'][0] - lims['lnlmax']) > 0.1: lims['ll'] = 0.0 xhi = np.linspace(lims['x0'], lims['ul'], (npts + 1) - (npts + 1) // 2) xlo = np.linspace(lims['ll'], lims['x0'], (npts + 1) // 2) xvals = np.concatenate((xlo[:-1], xhi)) else: xvals = np.linspace(0, lims['ul'], npts) o = self.profile(name, parName, reoptimize=reoptimize, xvals=xvals, savestate=savestate, **kwargs) if savestate: saved_state.restore() if logemin is not None or logemax is not None: self.set_energy_range(*loge_bounds) self.logger.debug('Finished') return o
def profile_norm(self, name, logemin=None, logemax=None, reoptimize=False, xvals=None, npts=None, fix_shape=True, savestate=True, **kwargs)
Profile the normalization of a source. Parameters ---------- name : str Source name. reoptimize : bool Re-optimize free parameters in the model at each point in the profile likelihood scan.
2.957926
2.894189
1.022022
# Get the covariance matrix for name in srcNames: par = self.like.normPar(name) err = par.error() val = par.getValue() if par.error() == 0.0 or not par.isFree(): continue self.add_gauss_prior(name, par.getName(), val, err * cov_scale)
def constrain_norms(self, srcNames, cov_scale=1.0)
Constrain the normalizations of one or more sources by adding gaussian priors with sigma equal to the parameter error times a scaling factor.
6.244141
5.859744
1.0656
for src in self.roi.sources: for par in self.like[src.name].funcs["Spectrum"].params.values(): par.removePrior()
def remove_priors(self)
Clear all priors.
21.539072
18.104424
1.189713
optimizer = kwargs.get('optimizer', self.config['optimizer']['optimizer']) if optimizer.upper() == 'MINUIT': optObject = pyLike.Minuit(self.like.logLike) elif optimizer.upper() == 'NEWMINUIT': optObject = pyLike.NewMinuit(self.like.logLike) else: optFactory = pyLike.OptimizerFactory_instance() optObject = optFactory.create(str(optimizer), self.like.logLike) return optObject
def _create_optObject(self, **kwargs)
Make MINUIT or NewMinuit type optimizer object
3.820785
3.223979
1.185114
loglevel = kwargs.pop('loglevel', self.loglevel) self.logger.log(loglevel, "Starting fit.") # Extract options from kwargs config = copy.deepcopy(self.config['optimizer']) config.setdefault('covar', True) config.setdefault('reoptimize', False) config = utils.merge_dict(config, kwargs) num_free = self.like.nFreeParams() loglike0 = -self.like() # Initialize output dict o = {'fit_quality': 3, 'fit_status': 0, 'fit_success': True, 'dloglike': 0.0, 'edm': 0.0, 'loglike': loglike0, 'covariance': None, 'correlation': None, 'values': np.ones(num_free) * np.nan, 'errors': np.ones(num_free) * np.nan, 'indices': np.zeros(num_free, dtype=int), 'is_norm': np.empty(num_free, dtype=bool), 'src_names': num_free * [None], 'par_names': num_free * [None], 'config': config } if not num_free: self.logger.log(loglevel, "Skipping fit. No free parameters.") return o saved_state = LikelihoodState(self.like) fit_output = self._fit(**config) o.update(fit_output) self.logger.debug("Fit complete.") o['dloglike'] = o['loglike'] - loglike0 if not o['fit_success']: self.logger.error('%s failed with status code %i fit quality %i', config['optimizer'], o['fit_status'], o['fit_quality']) saved_state.restore() return o if update: free_params = self.get_params(True) self._extract_correlation(o, free_params) for name in self.like.sourceNames(): freePars = self.get_free_source_params(name) if len(freePars) == 0: continue self.update_source(name, reoptimize=config['reoptimize']) # Update roi model counts self._update_roi() self.logger.log(loglevel, "Fit returned successfully. " + "Quality: %3i Status: %3i", o['fit_quality'], o['fit_status']) self.logger.log(loglevel, "LogLike: %12.3f DeltaLogLike: %12.3f ", o['loglike'], o['dloglike']) return o
def fit(self, update=True, **kwargs)
Run the likelihood optimization. This will execute a fit of all parameters that are currently free in the model and update the charateristics of the corresponding model components (TS, npred, etc.). The fit will be repeated N times (set with the `retries` parameter) until a fit quality greater than or equal to `min_fit_quality` and a fit status code of 0 is obtained. If the fit does not succeed after N retries then all parameter values will be reverted to their state prior to the execution of the fit. Parameters ---------- update : bool Update the model dictionary for all sources with free parameters. tol : float Set the optimizer tolerance. verbosity : int Set the optimizer output level. optimizer : str Set the likelihood optimizer (e.g. MINUIT or NEWMINUIT). retries : int Set the number of times to rerun the fit when the fit quality is < 3. min_fit_quality : int Set the minimum fit quality. If the fit quality is smaller than this value then all model parameters will be restored to their values prior to the fit. reoptimize : bool Refit background sources when updating source properties (TS and likelihood profiles). Returns ------- fit : dict Dictionary containing diagnostic information from the fit (fit quality, parameter covariances, etc.).
3.639282
3.429083
1.061299
self.logger.info('Loading XML') for c in self.components: c.load_xml(xmlfile) for name in self.like.sourceNames(): self.update_source(name) self._fitcache = None self.logger.info('Finished Loading XML')
def load_xml(self, xmlfile)
Load model definition from XML. Parameters ---------- xmlfile : str Name of the input XML file.
6.539829
6.542411
0.999605
d = utils.load_yaml(yamlfile) for src, src_pars in d.items(): for par_name, par_dict in src_pars.items(): if par_name in ['SpectrumType']: continue par_value = par_dict.get('value', None) par_error = par_dict.get('error', None) par_scale = par_dict.get('scale', None) par_min = par_dict.get('min', None) par_max = par_dict.get('max', None) par_free = par_dict.get('free', None) if par_min is not None and par_max is not None: par_bounds = [par_min, par_max] else: par_bounds = None try: self.set_parameter(src, par_name, par_value, true_value=False, scale=par_scale, bounds=par_bounds, error=par_error, update_source=update_sources) except RuntimeError as msg: self.logger.warn(msg) self.logger.warn("Did not set parameter %s:%s"%(src,par_name)) continue except Exception as msg: self.logger.warn(msg) continue if par_free is not None: self.free_parameter(src, par_name, par_free) self._sync_params_state()
def load_parameters_from_yaml(self, yamlfile, update_sources=False)
Load model parameters from yaml Parameters ---------- yamlfile : str Name of the input yaml file.
2.177343
2.293349
0.949416
for c in self.components: c.restore_counts_maps() if hasattr(self.like.components[0].logLike, 'setCountsMap'): self._init_roi_model() else: self.write_xml('tmp') self._like = SummedLikelihood() for i, c in enumerate(self._components): c._create_binned_analysis() self._like.addComponent(c.like) self._init_roi_model() self.load_xml('tmp')
def _restore_counts_maps(self)
Revert counts maps to their state prior to injecting any simulated components.
6.555533
6.169822
1.062516
self._fitcache = None if src_dict is None: src_dict = {} else: src_dict = copy.deepcopy(src_dict) skydir = wcs_utils.get_target_skydir(src_dict, self.roi.skydir) src_dict.setdefault('ra', skydir.ra.deg) src_dict.setdefault('dec', skydir.dec.deg) src_dict.setdefault('SpatialModel', 'PointSource') src_dict.setdefault('SpatialWidth', 0.3) src_dict.setdefault('Index', 2.0) src_dict.setdefault('Prefactor', 1E-13) self.add_source('mcsource', src_dict, free=True, init_source=False) for c in self.components: c.simulate_roi('mcsource', clear=False) self.delete_source('mcsource') if hasattr(self.like.components[0].logLike, 'setCountsMap'): self._init_roi_model() else: self.write_xml('tmp') self._like = SummedLikelihood() for i, c in enumerate(self._components): c._create_binned_analysis('tmp.xml') self._like.addComponent(c.like) self._init_roi_model() self.load_xml('tmp')
def simulate_source(self, src_dict=None)
Inject simulated source counts into the data. Parameters ---------- src_dict : dict Dictionary defining the spatial and spectral properties of the source that will be injected.
4.429684
4.5396
0.975787
self.logger.info('Simulating ROI') self._fitcache = None if restore: self.logger.info('Restoring') self._restore_counts_maps() self.logger.info('Finished') return for c in self.components: c.simulate_roi(name=name, clear=True, randomize=randomize) if hasattr(self.like.components[0].logLike, 'setCountsMap'): self._init_roi_model() else: self.write_xml('tmp') self._like = SummedLikelihood() for i, c in enumerate(self._components): c._create_binned_analysis('tmp.xml') self._like.addComponent(c.like) self._init_roi_model() self.load_xml('tmp') self.logger.info('Finished')
def simulate_roi(self, name=None, randomize=True, restore=False)
Generate a simulation of the ROI using the current best-fit model and replace the data counts cube with this simulation. The simulation is created by generating an array of Poisson random numbers with expectation values drawn from the model cube of the binned analysis instance. This function will update the counts cube both in memory and in the source map file. The counts cube can be restored to its original state by calling this method with ``restore`` = True. Parameters ---------- name : str Name of the model component to be simulated. If None then the whole ROI will be simulated. restore : bool Restore the data counts cube to its original state.
5.128649
4.563069
1.123947
maps = [c.write_model_map(model_name, name) for c in self.components] outfile = os.path.join(self.workdir, 'mcube_%s.fits' % (model_name)) mmap = Map.from_geom(self.geom) for m in maps: mmap.coadd(m) mmap.write(outfile, overwrite=True, conv='fgst-ccube') return [mmap] + maps
def write_model_map(self, model_name, name=None)
Save the counts model map to a FITS file. Parameters ---------- model_name : str String that will be append to the name of the output file. name : str Name of the component. Returns -------
5.31489
5.638204
0.942657
maps = [c.write_weight_map(model_name) for c in self.components] outfile = os.path.join(self.workdir, 'wcube_%s.fits' % (model_name)) wmap = Map.from_geom(self.geom) # FIXME: Should we average weights maps rather than coadding? for m in maps: wmap.coadd(m) wmap.write(outfile, overwrite=True, conv='fgst-ccube') return [wmap] + maps
def write_weight_map(self, model_name)
Save the counts model map to a FITS file. Parameters ---------- model_name : str String that will be append to the name of the output file. Returns -------
6.405684
6.690164
0.957478